Wed, 19 Dec 2012 15:40:35 -0800
8004835: Improve AES intrinsics on x86
Summary: Enable AES intrinsics on non-AVX cpus, group together aes instructions in crypto stubs.
Reviewed-by: roland, twisti
1 /*
2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "nativeInst_x86.hpp"
30 #include "oops/instanceOop.hpp"
31 #include "oops/method.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/frame.inline.hpp"
36 #include "runtime/handles.inline.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubCodeGenerator.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "runtime/thread.inline.hpp"
41 #include "utilities/top.hpp"
42 #ifdef COMPILER2
43 #include "opto/runtime.hpp"
44 #endif
46 // Declaration and definition of StubGenerator (no .hpp file).
47 // For a more detailed description of the stub routine structure
48 // see the comment in stubRoutines.hpp
50 #define __ _masm->
51 #define a__ ((Assembler*)_masm)->
53 #ifdef PRODUCT
54 #define BLOCK_COMMENT(str) /* nothing */
55 #else
56 #define BLOCK_COMMENT(str) __ block_comment(str)
57 #endif
59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
62 const int FPU_CNTRL_WRD_MASK = 0xFFFF;
64 // -------------------------------------------------------------------------------------------------------------------------
65 // Stub Code definitions
67 static address handle_unsafe_access() {
68 JavaThread* thread = JavaThread::current();
69 address pc = thread->saved_exception_pc();
70 // pc is the instruction which we must emulate
71 // doing a no-op is fine: return garbage from the load
72 // therefore, compute npc
73 address npc = Assembler::locate_next_instruction(pc);
75 // request an async exception
76 thread->set_pending_unsafe_access_error();
78 // return address of next instruction to execute
79 return npc;
80 }
82 class StubGenerator: public StubCodeGenerator {
83 private:
85 #ifdef PRODUCT
86 #define inc_counter_np(counter) (0)
87 #else
88 void inc_counter_np_(int& counter) {
89 __ incrementl(ExternalAddress((address)&counter));
90 }
91 #define inc_counter_np(counter) \
92 BLOCK_COMMENT("inc_counter " #counter); \
93 inc_counter_np_(counter);
94 #endif //PRODUCT
96 void inc_copy_counter_np(BasicType t) {
97 #ifndef PRODUCT
98 switch (t) {
99 case T_BYTE: inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); return;
100 case T_SHORT: inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); return;
101 case T_INT: inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return;
102 case T_LONG: inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return;
103 case T_OBJECT: inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return;
104 }
105 ShouldNotReachHere();
106 #endif //PRODUCT
107 }
109 //------------------------------------------------------------------------------------------------------------------------
110 // Call stubs are used to call Java from C
111 //
112 // [ return_from_Java ] <--- rsp
113 // [ argument word n ]
114 // ...
115 // -N [ argument word 1 ]
116 // -7 [ Possible padding for stack alignment ]
117 // -6 [ Possible padding for stack alignment ]
118 // -5 [ Possible padding for stack alignment ]
119 // -4 [ mxcsr save ] <--- rsp_after_call
120 // -3 [ saved rbx, ]
121 // -2 [ saved rsi ]
122 // -1 [ saved rdi ]
123 // 0 [ saved rbp, ] <--- rbp,
124 // 1 [ return address ]
125 // 2 [ ptr. to call wrapper ]
126 // 3 [ result ]
127 // 4 [ result_type ]
128 // 5 [ method ]
129 // 6 [ entry_point ]
130 // 7 [ parameters ]
131 // 8 [ parameter_size ]
132 // 9 [ thread ]
135 address generate_call_stub(address& return_address) {
136 StubCodeMark mark(this, "StubRoutines", "call_stub");
137 address start = __ pc();
139 // stub code parameters / addresses
140 assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code");
141 bool sse_save = false;
142 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_catch_exception()!
143 const int locals_count_in_bytes (4*wordSize);
144 const Address mxcsr_save (rbp, -4 * wordSize);
145 const Address saved_rbx (rbp, -3 * wordSize);
146 const Address saved_rsi (rbp, -2 * wordSize);
147 const Address saved_rdi (rbp, -1 * wordSize);
148 const Address result (rbp, 3 * wordSize);
149 const Address result_type (rbp, 4 * wordSize);
150 const Address method (rbp, 5 * wordSize);
151 const Address entry_point (rbp, 6 * wordSize);
152 const Address parameters (rbp, 7 * wordSize);
153 const Address parameter_size(rbp, 8 * wordSize);
154 const Address thread (rbp, 9 * wordSize); // same as in generate_catch_exception()!
155 sse_save = UseSSE > 0;
157 // stub code
158 __ enter();
159 __ movptr(rcx, parameter_size); // parameter counter
160 __ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes
161 __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves
162 __ subptr(rsp, rcx);
163 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
165 // save rdi, rsi, & rbx, according to C calling conventions
166 __ movptr(saved_rdi, rdi);
167 __ movptr(saved_rsi, rsi);
168 __ movptr(saved_rbx, rbx);
169 // save and initialize %mxcsr
170 if (sse_save) {
171 Label skip_ldmx;
172 __ stmxcsr(mxcsr_save);
173 __ movl(rax, mxcsr_save);
174 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
175 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
176 __ cmp32(rax, mxcsr_std);
177 __ jcc(Assembler::equal, skip_ldmx);
178 __ ldmxcsr(mxcsr_std);
179 __ bind(skip_ldmx);
180 }
182 // make sure the control word is correct.
183 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
185 #ifdef ASSERT
186 // make sure we have no pending exceptions
187 { Label L;
188 __ movptr(rcx, thread);
189 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
190 __ jcc(Assembler::equal, L);
191 __ stop("StubRoutines::call_stub: entered with pending exception");
192 __ bind(L);
193 }
194 #endif
196 // pass parameters if any
197 BLOCK_COMMENT("pass parameters if any");
198 Label parameters_done;
199 __ movl(rcx, parameter_size); // parameter counter
200 __ testl(rcx, rcx);
201 __ jcc(Assembler::zero, parameters_done);
203 // parameter passing loop
205 Label loop;
206 // Copy Java parameters in reverse order (receiver last)
207 // Note that the argument order is inverted in the process
208 // source is rdx[rcx: N-1..0]
209 // dest is rsp[rbx: 0..N-1]
211 __ movptr(rdx, parameters); // parameter pointer
212 __ xorptr(rbx, rbx);
214 __ BIND(loop);
216 // get parameter
217 __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize));
218 __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(),
219 Interpreter::expr_offset_in_bytes(0)), rax); // store parameter
220 __ increment(rbx);
221 __ decrement(rcx);
222 __ jcc(Assembler::notZero, loop);
224 // call Java function
225 __ BIND(parameters_done);
226 __ movptr(rbx, method); // get Method*
227 __ movptr(rax, entry_point); // get entry_point
228 __ mov(rsi, rsp); // set sender sp
229 BLOCK_COMMENT("call Java function");
230 __ call(rax);
232 BLOCK_COMMENT("call_stub_return_address:");
233 return_address = __ pc();
235 #ifdef COMPILER2
236 {
237 Label L_skip;
238 if (UseSSE >= 2) {
239 __ verify_FPU(0, "call_stub_return");
240 } else {
241 for (int i = 1; i < 8; i++) {
242 __ ffree(i);
243 }
245 // UseSSE <= 1 so double result should be left on TOS
246 __ movl(rsi, result_type);
247 __ cmpl(rsi, T_DOUBLE);
248 __ jcc(Assembler::equal, L_skip);
249 if (UseSSE == 0) {
250 // UseSSE == 0 so float result should be left on TOS
251 __ cmpl(rsi, T_FLOAT);
252 __ jcc(Assembler::equal, L_skip);
253 }
254 __ ffree(0);
255 }
256 __ BIND(L_skip);
257 }
258 #endif // COMPILER2
260 // store result depending on type
261 // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
262 __ movptr(rdi, result);
263 Label is_long, is_float, is_double, exit;
264 __ movl(rsi, result_type);
265 __ cmpl(rsi, T_LONG);
266 __ jcc(Assembler::equal, is_long);
267 __ cmpl(rsi, T_FLOAT);
268 __ jcc(Assembler::equal, is_float);
269 __ cmpl(rsi, T_DOUBLE);
270 __ jcc(Assembler::equal, is_double);
272 // handle T_INT case
273 __ movl(Address(rdi, 0), rax);
274 __ BIND(exit);
276 // check that FPU stack is empty
277 __ verify_FPU(0, "generate_call_stub");
279 // pop parameters
280 __ lea(rsp, rsp_after_call);
282 // restore %mxcsr
283 if (sse_save) {
284 __ ldmxcsr(mxcsr_save);
285 }
287 // restore rdi, rsi and rbx,
288 __ movptr(rbx, saved_rbx);
289 __ movptr(rsi, saved_rsi);
290 __ movptr(rdi, saved_rdi);
291 __ addptr(rsp, 4*wordSize);
293 // return
294 __ pop(rbp);
295 __ ret(0);
297 // handle return types different from T_INT
298 __ BIND(is_long);
299 __ movl(Address(rdi, 0 * wordSize), rax);
300 __ movl(Address(rdi, 1 * wordSize), rdx);
301 __ jmp(exit);
303 __ BIND(is_float);
304 // interpreter uses xmm0 for return values
305 if (UseSSE >= 1) {
306 __ movflt(Address(rdi, 0), xmm0);
307 } else {
308 __ fstp_s(Address(rdi, 0));
309 }
310 __ jmp(exit);
312 __ BIND(is_double);
313 // interpreter uses xmm0 for return values
314 if (UseSSE >= 2) {
315 __ movdbl(Address(rdi, 0), xmm0);
316 } else {
317 __ fstp_d(Address(rdi, 0));
318 }
319 __ jmp(exit);
321 return start;
322 }
325 //------------------------------------------------------------------------------------------------------------------------
326 // Return point for a Java call if there's an exception thrown in Java code.
327 // The exception is caught and transformed into a pending exception stored in
328 // JavaThread that can be tested from within the VM.
329 //
330 // Note: Usually the parameters are removed by the callee. In case of an exception
331 // crossing an activation frame boundary, that is not the case if the callee
332 // is compiled code => need to setup the rsp.
333 //
334 // rax,: exception oop
336 address generate_catch_exception() {
337 StubCodeMark mark(this, "StubRoutines", "catch_exception");
338 const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()!
339 const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()!
340 address start = __ pc();
342 // get thread directly
343 __ movptr(rcx, thread);
344 #ifdef ASSERT
345 // verify that threads correspond
346 { Label L;
347 __ get_thread(rbx);
348 __ cmpptr(rbx, rcx);
349 __ jcc(Assembler::equal, L);
350 __ stop("StubRoutines::catch_exception: threads must correspond");
351 __ bind(L);
352 }
353 #endif
354 // set pending exception
355 __ verify_oop(rax);
356 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax );
357 __ lea(Address(rcx, Thread::exception_file_offset ()),
358 ExternalAddress((address)__FILE__));
359 __ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ );
360 // complete return to VM
361 assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before");
362 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
364 return start;
365 }
368 //------------------------------------------------------------------------------------------------------------------------
369 // Continuation point for runtime calls returning with a pending exception.
370 // The pending exception check happened in the runtime or native call stub.
371 // The pending exception in Thread is converted into a Java-level exception.
372 //
373 // Contract with Java-level exception handlers:
374 // rax: exception
375 // rdx: throwing pc
376 //
377 // NOTE: At entry of this stub, exception-pc must be on stack !!
379 address generate_forward_exception() {
380 StubCodeMark mark(this, "StubRoutines", "forward exception");
381 address start = __ pc();
382 const Register thread = rcx;
384 // other registers used in this stub
385 const Register exception_oop = rax;
386 const Register handler_addr = rbx;
387 const Register exception_pc = rdx;
389 // Upon entry, the sp points to the return address returning into Java
390 // (interpreted or compiled) code; i.e., the return address becomes the
391 // throwing pc.
392 //
393 // Arguments pushed before the runtime call are still on the stack but
394 // the exception handler will reset the stack pointer -> ignore them.
395 // A potential result in registers can be ignored as well.
397 #ifdef ASSERT
398 // make sure this code is only executed if there is a pending exception
399 { Label L;
400 __ get_thread(thread);
401 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
402 __ jcc(Assembler::notEqual, L);
403 __ stop("StubRoutines::forward exception: no pending exception (1)");
404 __ bind(L);
405 }
406 #endif
408 // compute exception handler into rbx,
409 __ get_thread(thread);
410 __ movptr(exception_pc, Address(rsp, 0));
411 BLOCK_COMMENT("call exception_handler_for_return_address");
412 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
413 __ mov(handler_addr, rax);
415 // setup rax & rdx, remove return address & clear pending exception
416 __ get_thread(thread);
417 __ pop(exception_pc);
418 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
419 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
421 #ifdef ASSERT
422 // make sure exception is set
423 { Label L;
424 __ testptr(exception_oop, exception_oop);
425 __ jcc(Assembler::notEqual, L);
426 __ stop("StubRoutines::forward exception: no pending exception (2)");
427 __ bind(L);
428 }
429 #endif
431 // Verify that there is really a valid exception in RAX.
432 __ verify_oop(exception_oop);
434 // continue at exception handler (return address removed)
435 // rax: exception
436 // rbx: exception handler
437 // rdx: throwing pc
438 __ jmp(handler_addr);
440 return start;
441 }
444 //----------------------------------------------------------------------------------------------------
445 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest)
446 //
447 // xchg exists as far back as 8086, lock needed for MP only
448 // Stack layout immediately after call:
449 //
450 // 0 [ret addr ] <--- rsp
451 // 1 [ ex ]
452 // 2 [ dest ]
453 //
454 // Result: *dest <- ex, return (old *dest)
455 //
456 // Note: win32 does not currently use this code
458 address generate_atomic_xchg() {
459 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
460 address start = __ pc();
462 __ push(rdx);
463 Address exchange(rsp, 2 * wordSize);
464 Address dest_addr(rsp, 3 * wordSize);
465 __ movl(rax, exchange);
466 __ movptr(rdx, dest_addr);
467 __ xchgl(rax, Address(rdx, 0));
468 __ pop(rdx);
469 __ ret(0);
471 return start;
472 }
474 //----------------------------------------------------------------------------------------------------
475 // Support for void verify_mxcsr()
476 //
477 // This routine is used with -Xcheck:jni to verify that native
478 // JNI code does not return to Java code without restoring the
479 // MXCSR register to our expected state.
482 address generate_verify_mxcsr() {
483 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
484 address start = __ pc();
486 const Address mxcsr_save(rsp, 0);
488 if (CheckJNICalls && UseSSE > 0 ) {
489 Label ok_ret;
490 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
491 __ push(rax);
492 __ subptr(rsp, wordSize); // allocate a temp location
493 __ stmxcsr(mxcsr_save);
494 __ movl(rax, mxcsr_save);
495 __ andl(rax, MXCSR_MASK);
496 __ cmp32(rax, mxcsr_std);
497 __ jcc(Assembler::equal, ok_ret);
499 __ warn("MXCSR changed by native JNI code.");
501 __ ldmxcsr(mxcsr_std);
503 __ bind(ok_ret);
504 __ addptr(rsp, wordSize);
505 __ pop(rax);
506 }
508 __ ret(0);
510 return start;
511 }
514 //---------------------------------------------------------------------------
515 // Support for void verify_fpu_cntrl_wrd()
516 //
517 // This routine is used with -Xcheck:jni to verify that native
518 // JNI code does not return to Java code without restoring the
519 // FP control word to our expected state.
521 address generate_verify_fpu_cntrl_wrd() {
522 StubCodeMark mark(this, "StubRoutines", "verify_spcw");
523 address start = __ pc();
525 const Address fpu_cntrl_wrd_save(rsp, 0);
527 if (CheckJNICalls) {
528 Label ok_ret;
529 __ push(rax);
530 __ subptr(rsp, wordSize); // allocate a temp location
531 __ fnstcw(fpu_cntrl_wrd_save);
532 __ movl(rax, fpu_cntrl_wrd_save);
533 __ andl(rax, FPU_CNTRL_WRD_MASK);
534 ExternalAddress fpu_std(StubRoutines::addr_fpu_cntrl_wrd_std());
535 __ cmp32(rax, fpu_std);
536 __ jcc(Assembler::equal, ok_ret);
538 __ warn("Floating point control word changed by native JNI code.");
540 __ fldcw(fpu_std);
542 __ bind(ok_ret);
543 __ addptr(rsp, wordSize);
544 __ pop(rax);
545 }
547 __ ret(0);
549 return start;
550 }
552 //---------------------------------------------------------------------------
553 // Wrapper for slow-case handling of double-to-integer conversion
554 // d2i or f2i fast case failed either because it is nan or because
555 // of under/overflow.
556 // Input: FPU TOS: float value
557 // Output: rax, (rdx): integer (long) result
559 address generate_d2i_wrapper(BasicType t, address fcn) {
560 StubCodeMark mark(this, "StubRoutines", "d2i_wrapper");
561 address start = __ pc();
563 // Capture info about frame layout
564 enum layout { FPUState_off = 0,
565 rbp_off = FPUStateSizeInWords,
566 rdi_off,
567 rsi_off,
568 rcx_off,
569 rbx_off,
570 saved_argument_off,
571 saved_argument_off2, // 2nd half of double
572 framesize
573 };
575 assert(FPUStateSizeInWords == 27, "update stack layout");
577 // Save outgoing argument to stack across push_FPU_state()
578 __ subptr(rsp, wordSize * 2);
579 __ fstp_d(Address(rsp, 0));
581 // Save CPU & FPU state
582 __ push(rbx);
583 __ push(rcx);
584 __ push(rsi);
585 __ push(rdi);
586 __ push(rbp);
587 __ push_FPU_state();
589 // push_FPU_state() resets the FP top of stack
590 // Load original double into FP top of stack
591 __ fld_d(Address(rsp, saved_argument_off * wordSize));
592 // Store double into stack as outgoing argument
593 __ subptr(rsp, wordSize*2);
594 __ fst_d(Address(rsp, 0));
596 // Prepare FPU for doing math in C-land
597 __ empty_FPU_stack();
598 // Call the C code to massage the double. Result in EAX
599 if (t == T_INT)
600 { BLOCK_COMMENT("SharedRuntime::d2i"); }
601 else if (t == T_LONG)
602 { BLOCK_COMMENT("SharedRuntime::d2l"); }
603 __ call_VM_leaf( fcn, 2 );
605 // Restore CPU & FPU state
606 __ pop_FPU_state();
607 __ pop(rbp);
608 __ pop(rdi);
609 __ pop(rsi);
610 __ pop(rcx);
611 __ pop(rbx);
612 __ addptr(rsp, wordSize * 2);
614 __ ret(0);
616 return start;
617 }
620 //---------------------------------------------------------------------------
621 // The following routine generates a subroutine to throw an asynchronous
622 // UnknownError when an unsafe access gets a fault that could not be
623 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
624 address generate_handler_for_unsafe_access() {
625 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
626 address start = __ pc();
628 __ push(0); // hole for return address-to-be
629 __ pusha(); // push registers
630 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
631 BLOCK_COMMENT("call handle_unsafe_access");
632 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
633 __ movptr(next_pc, rax); // stuff next address
634 __ popa();
635 __ ret(0); // jump to next address
637 return start;
638 }
641 //----------------------------------------------------------------------------------------------------
642 // Non-destructive plausibility checks for oops
644 address generate_verify_oop() {
645 StubCodeMark mark(this, "StubRoutines", "verify_oop");
646 address start = __ pc();
648 // Incoming arguments on stack after saving rax,:
649 //
650 // [tos ]: saved rdx
651 // [tos + 1]: saved EFLAGS
652 // [tos + 2]: return address
653 // [tos + 3]: char* error message
654 // [tos + 4]: oop object to verify
655 // [tos + 5]: saved rax, - saved by caller and bashed
657 Label exit, error;
658 __ pushf();
659 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
660 __ push(rdx); // save rdx
661 // make sure object is 'reasonable'
662 __ movptr(rax, Address(rsp, 4 * wordSize)); // get object
663 __ testptr(rax, rax);
664 __ jcc(Assembler::zero, exit); // if obj is NULL it is ok
666 // Check if the oop is in the right area of memory
667 const int oop_mask = Universe::verify_oop_mask();
668 const int oop_bits = Universe::verify_oop_bits();
669 __ mov(rdx, rax);
670 __ andptr(rdx, oop_mask);
671 __ cmpptr(rdx, oop_bits);
672 __ jcc(Assembler::notZero, error);
674 // make sure klass is 'reasonable', which is not zero.
675 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
676 __ testptr(rax, rax);
677 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
678 // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
680 // return if everything seems ok
681 __ bind(exit);
682 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back
683 __ pop(rdx); // restore rdx
684 __ popf(); // restore EFLAGS
685 __ ret(3 * wordSize); // pop arguments
687 // handle errors
688 __ bind(error);
689 __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back
690 __ pop(rdx); // get saved rdx back
691 __ popf(); // get saved EFLAGS off stack -- will be ignored
692 __ pusha(); // push registers (eip = return address & msg are already pushed)
693 BLOCK_COMMENT("call MacroAssembler::debug");
694 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
695 __ popa();
696 __ ret(3 * wordSize); // pop arguments
697 return start;
698 }
700 //
701 // Generate pre-barrier for array stores
702 //
703 // Input:
704 // start - starting address
705 // count - element count
706 void gen_write_ref_array_pre_barrier(Register start, Register count, bool uninitialized_target) {
707 assert_different_registers(start, count);
708 BarrierSet* bs = Universe::heap()->barrier_set();
709 switch (bs->kind()) {
710 case BarrierSet::G1SATBCT:
711 case BarrierSet::G1SATBCTLogging:
712 // With G1, don't generate the call if we statically know that the target in uninitialized
713 if (!uninitialized_target) {
714 __ pusha(); // push registers
715 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
716 start, count);
717 __ popa();
718 }
719 break;
720 case BarrierSet::CardTableModRef:
721 case BarrierSet::CardTableExtension:
722 case BarrierSet::ModRef:
723 break;
724 default :
725 ShouldNotReachHere();
727 }
728 }
731 //
732 // Generate a post-barrier for an array store
733 //
734 // start - starting address
735 // count - element count
736 //
737 // The two input registers are overwritten.
738 //
739 void gen_write_ref_array_post_barrier(Register start, Register count) {
740 BarrierSet* bs = Universe::heap()->barrier_set();
741 assert_different_registers(start, count);
742 switch (bs->kind()) {
743 case BarrierSet::G1SATBCT:
744 case BarrierSet::G1SATBCTLogging:
745 {
746 __ pusha(); // push registers
747 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
748 start, count);
749 __ popa();
750 }
751 break;
753 case BarrierSet::CardTableModRef:
754 case BarrierSet::CardTableExtension:
755 {
756 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
757 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
759 Label L_loop;
760 const Register end = count; // elements count; end == start+count-1
761 assert_different_registers(start, end);
763 __ lea(end, Address(start, count, Address::times_ptr, -wordSize));
764 __ shrptr(start, CardTableModRefBS::card_shift);
765 __ shrptr(end, CardTableModRefBS::card_shift);
766 __ subptr(end, start); // end --> count
767 __ BIND(L_loop);
768 intptr_t disp = (intptr_t) ct->byte_map_base;
769 Address cardtable(start, count, Address::times_1, disp);
770 __ movb(cardtable, 0);
771 __ decrement(count);
772 __ jcc(Assembler::greaterEqual, L_loop);
773 }
774 break;
775 case BarrierSet::ModRef:
776 break;
777 default :
778 ShouldNotReachHere();
780 }
781 }
784 // Copy 64 bytes chunks
785 //
786 // Inputs:
787 // from - source array address
788 // to_from - destination array address - from
789 // qword_count - 8-bytes element count, negative
790 //
791 void xmm_copy_forward(Register from, Register to_from, Register qword_count) {
792 assert( UseSSE >= 2, "supported cpu only" );
793 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
794 // Copy 64-byte chunks
795 __ jmpb(L_copy_64_bytes);
796 __ align(OptoLoopAlignment);
797 __ BIND(L_copy_64_bytes_loop);
799 if(UseUnalignedLoadStores) {
800 __ movdqu(xmm0, Address(from, 0));
801 __ movdqu(Address(from, to_from, Address::times_1, 0), xmm0);
802 __ movdqu(xmm1, Address(from, 16));
803 __ movdqu(Address(from, to_from, Address::times_1, 16), xmm1);
804 __ movdqu(xmm2, Address(from, 32));
805 __ movdqu(Address(from, to_from, Address::times_1, 32), xmm2);
806 __ movdqu(xmm3, Address(from, 48));
807 __ movdqu(Address(from, to_from, Address::times_1, 48), xmm3);
809 } else {
810 __ movq(xmm0, Address(from, 0));
811 __ movq(Address(from, to_from, Address::times_1, 0), xmm0);
812 __ movq(xmm1, Address(from, 8));
813 __ movq(Address(from, to_from, Address::times_1, 8), xmm1);
814 __ movq(xmm2, Address(from, 16));
815 __ movq(Address(from, to_from, Address::times_1, 16), xmm2);
816 __ movq(xmm3, Address(from, 24));
817 __ movq(Address(from, to_from, Address::times_1, 24), xmm3);
818 __ movq(xmm4, Address(from, 32));
819 __ movq(Address(from, to_from, Address::times_1, 32), xmm4);
820 __ movq(xmm5, Address(from, 40));
821 __ movq(Address(from, to_from, Address::times_1, 40), xmm5);
822 __ movq(xmm6, Address(from, 48));
823 __ movq(Address(from, to_from, Address::times_1, 48), xmm6);
824 __ movq(xmm7, Address(from, 56));
825 __ movq(Address(from, to_from, Address::times_1, 56), xmm7);
826 }
828 __ addl(from, 64);
829 __ BIND(L_copy_64_bytes);
830 __ subl(qword_count, 8);
831 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
832 __ addl(qword_count, 8);
833 __ jccb(Assembler::zero, L_exit);
834 //
835 // length is too short, just copy qwords
836 //
837 __ BIND(L_copy_8_bytes);
838 __ movq(xmm0, Address(from, 0));
839 __ movq(Address(from, to_from, Address::times_1), xmm0);
840 __ addl(from, 8);
841 __ decrement(qword_count);
842 __ jcc(Assembler::greater, L_copy_8_bytes);
843 __ BIND(L_exit);
844 }
846 // Copy 64 bytes chunks
847 //
848 // Inputs:
849 // from - source array address
850 // to_from - destination array address - from
851 // qword_count - 8-bytes element count, negative
852 //
853 void mmx_copy_forward(Register from, Register to_from, Register qword_count) {
854 assert( VM_Version::supports_mmx(), "supported cpu only" );
855 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
856 // Copy 64-byte chunks
857 __ jmpb(L_copy_64_bytes);
858 __ align(OptoLoopAlignment);
859 __ BIND(L_copy_64_bytes_loop);
860 __ movq(mmx0, Address(from, 0));
861 __ movq(mmx1, Address(from, 8));
862 __ movq(mmx2, Address(from, 16));
863 __ movq(Address(from, to_from, Address::times_1, 0), mmx0);
864 __ movq(mmx3, Address(from, 24));
865 __ movq(Address(from, to_from, Address::times_1, 8), mmx1);
866 __ movq(mmx4, Address(from, 32));
867 __ movq(Address(from, to_from, Address::times_1, 16), mmx2);
868 __ movq(mmx5, Address(from, 40));
869 __ movq(Address(from, to_from, Address::times_1, 24), mmx3);
870 __ movq(mmx6, Address(from, 48));
871 __ movq(Address(from, to_from, Address::times_1, 32), mmx4);
872 __ movq(mmx7, Address(from, 56));
873 __ movq(Address(from, to_from, Address::times_1, 40), mmx5);
874 __ movq(Address(from, to_from, Address::times_1, 48), mmx6);
875 __ movq(Address(from, to_from, Address::times_1, 56), mmx7);
876 __ addptr(from, 64);
877 __ BIND(L_copy_64_bytes);
878 __ subl(qword_count, 8);
879 __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
880 __ addl(qword_count, 8);
881 __ jccb(Assembler::zero, L_exit);
882 //
883 // length is too short, just copy qwords
884 //
885 __ BIND(L_copy_8_bytes);
886 __ movq(mmx0, Address(from, 0));
887 __ movq(Address(from, to_from, Address::times_1), mmx0);
888 __ addptr(from, 8);
889 __ decrement(qword_count);
890 __ jcc(Assembler::greater, L_copy_8_bytes);
891 __ BIND(L_exit);
892 __ emms();
893 }
895 address generate_disjoint_copy(BasicType t, bool aligned,
896 Address::ScaleFactor sf,
897 address* entry, const char *name,
898 bool dest_uninitialized = false) {
899 __ align(CodeEntryAlignment);
900 StubCodeMark mark(this, "StubRoutines", name);
901 address start = __ pc();
903 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte;
904 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes;
906 int shift = Address::times_ptr - sf;
908 const Register from = rsi; // source array address
909 const Register to = rdi; // destination array address
910 const Register count = rcx; // elements count
911 const Register to_from = to; // (to - from)
912 const Register saved_to = rdx; // saved destination array address
914 __ enter(); // required for proper stackwalking of RuntimeStub frame
915 __ push(rsi);
916 __ push(rdi);
917 __ movptr(from , Address(rsp, 12+ 4));
918 __ movptr(to , Address(rsp, 12+ 8));
919 __ movl(count, Address(rsp, 12+ 12));
921 if (entry != NULL) {
922 *entry = __ pc(); // Entry point from conjoint arraycopy stub.
923 BLOCK_COMMENT("Entry:");
924 }
926 if (t == T_OBJECT) {
927 __ testl(count, count);
928 __ jcc(Assembler::zero, L_0_count);
929 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
930 __ mov(saved_to, to); // save 'to'
931 }
933 __ subptr(to, from); // to --> to_from
934 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
935 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
936 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
937 // align source address at 4 bytes address boundary
938 if (t == T_BYTE) {
939 // One byte misalignment happens only for byte arrays
940 __ testl(from, 1);
941 __ jccb(Assembler::zero, L_skip_align1);
942 __ movb(rax, Address(from, 0));
943 __ movb(Address(from, to_from, Address::times_1, 0), rax);
944 __ increment(from);
945 __ decrement(count);
946 __ BIND(L_skip_align1);
947 }
948 // Two bytes misalignment happens only for byte and short (char) arrays
949 __ testl(from, 2);
950 __ jccb(Assembler::zero, L_skip_align2);
951 __ movw(rax, Address(from, 0));
952 __ movw(Address(from, to_from, Address::times_1, 0), rax);
953 __ addptr(from, 2);
954 __ subl(count, 1<<(shift-1));
955 __ BIND(L_skip_align2);
956 }
957 if (!VM_Version::supports_mmx()) {
958 __ mov(rax, count); // save 'count'
959 __ shrl(count, shift); // bytes count
960 __ addptr(to_from, from);// restore 'to'
961 __ rep_mov();
962 __ subptr(to_from, from);// restore 'to_from'
963 __ mov(count, rax); // restore 'count'
964 __ jmpb(L_copy_2_bytes); // all dwords were copied
965 } else {
966 if (!UseUnalignedLoadStores) {
967 // align to 8 bytes, we know we are 4 byte aligned to start
968 __ testptr(from, 4);
969 __ jccb(Assembler::zero, L_copy_64_bytes);
970 __ movl(rax, Address(from, 0));
971 __ movl(Address(from, to_from, Address::times_1, 0), rax);
972 __ addptr(from, 4);
973 __ subl(count, 1<<shift);
974 }
975 __ BIND(L_copy_64_bytes);
976 __ mov(rax, count);
977 __ shrl(rax, shift+1); // 8 bytes chunk count
978 //
979 // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop
980 //
981 if (UseXMMForArrayCopy) {
982 xmm_copy_forward(from, to_from, rax);
983 } else {
984 mmx_copy_forward(from, to_from, rax);
985 }
986 }
987 // copy tailing dword
988 __ BIND(L_copy_4_bytes);
989 __ testl(count, 1<<shift);
990 __ jccb(Assembler::zero, L_copy_2_bytes);
991 __ movl(rax, Address(from, 0));
992 __ movl(Address(from, to_from, Address::times_1, 0), rax);
993 if (t == T_BYTE || t == T_SHORT) {
994 __ addptr(from, 4);
995 __ BIND(L_copy_2_bytes);
996 // copy tailing word
997 __ testl(count, 1<<(shift-1));
998 __ jccb(Assembler::zero, L_copy_byte);
999 __ movw(rax, Address(from, 0));
1000 __ movw(Address(from, to_from, Address::times_1, 0), rax);
1001 if (t == T_BYTE) {
1002 __ addptr(from, 2);
1003 __ BIND(L_copy_byte);
1004 // copy tailing byte
1005 __ testl(count, 1);
1006 __ jccb(Assembler::zero, L_exit);
1007 __ movb(rax, Address(from, 0));
1008 __ movb(Address(from, to_from, Address::times_1, 0), rax);
1009 __ BIND(L_exit);
1010 } else {
1011 __ BIND(L_copy_byte);
1012 }
1013 } else {
1014 __ BIND(L_copy_2_bytes);
1015 }
1017 if (t == T_OBJECT) {
1018 __ movl(count, Address(rsp, 12+12)); // reread 'count'
1019 __ mov(to, saved_to); // restore 'to'
1020 gen_write_ref_array_post_barrier(to, count);
1021 __ BIND(L_0_count);
1022 }
1023 inc_copy_counter_np(t);
1024 __ pop(rdi);
1025 __ pop(rsi);
1026 __ leave(); // required for proper stackwalking of RuntimeStub frame
1027 __ xorptr(rax, rax); // return 0
1028 __ ret(0);
1029 return start;
1030 }
1033 address generate_fill(BasicType t, bool aligned, const char *name) {
1034 __ align(CodeEntryAlignment);
1035 StubCodeMark mark(this, "StubRoutines", name);
1036 address start = __ pc();
1038 BLOCK_COMMENT("Entry:");
1040 const Register to = rdi; // source array address
1041 const Register value = rdx; // value
1042 const Register count = rsi; // elements count
1044 __ enter(); // required for proper stackwalking of RuntimeStub frame
1045 __ push(rsi);
1046 __ push(rdi);
1047 __ movptr(to , Address(rsp, 12+ 4));
1048 __ movl(value, Address(rsp, 12+ 8));
1049 __ movl(count, Address(rsp, 12+ 12));
1051 __ generate_fill(t, aligned, to, value, count, rax, xmm0);
1053 __ pop(rdi);
1054 __ pop(rsi);
1055 __ leave(); // required for proper stackwalking of RuntimeStub frame
1056 __ ret(0);
1057 return start;
1058 }
1060 address generate_conjoint_copy(BasicType t, bool aligned,
1061 Address::ScaleFactor sf,
1062 address nooverlap_target,
1063 address* entry, const char *name,
1064 bool dest_uninitialized = false) {
1065 __ align(CodeEntryAlignment);
1066 StubCodeMark mark(this, "StubRoutines", name);
1067 address start = __ pc();
1069 Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte;
1070 Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop;
1072 int shift = Address::times_ptr - sf;
1074 const Register src = rax; // source array address
1075 const Register dst = rdx; // destination array address
1076 const Register from = rsi; // source array address
1077 const Register to = rdi; // destination array address
1078 const Register count = rcx; // elements count
1079 const Register end = rax; // array end address
1081 __ enter(); // required for proper stackwalking of RuntimeStub frame
1082 __ push(rsi);
1083 __ push(rdi);
1084 __ movptr(src , Address(rsp, 12+ 4)); // from
1085 __ movptr(dst , Address(rsp, 12+ 8)); // to
1086 __ movl2ptr(count, Address(rsp, 12+12)); // count
1088 if (entry != NULL) {
1089 *entry = __ pc(); // Entry point from generic arraycopy stub.
1090 BLOCK_COMMENT("Entry:");
1091 }
1093 // nooverlap_target expects arguments in rsi and rdi.
1094 __ mov(from, src);
1095 __ mov(to , dst);
1097 // arrays overlap test: dispatch to disjoint stub if necessary.
1098 RuntimeAddress nooverlap(nooverlap_target);
1099 __ cmpptr(dst, src);
1100 __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size
1101 __ jump_cc(Assembler::belowEqual, nooverlap);
1102 __ cmpptr(dst, end);
1103 __ jump_cc(Assembler::aboveEqual, nooverlap);
1105 if (t == T_OBJECT) {
1106 __ testl(count, count);
1107 __ jcc(Assembler::zero, L_0_count);
1108 gen_write_ref_array_pre_barrier(dst, count, dest_uninitialized);
1109 }
1111 // copy from high to low
1112 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
1113 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
1114 if (t == T_BYTE || t == T_SHORT) {
1115 // Align the end of destination array at 4 bytes address boundary
1116 __ lea(end, Address(dst, count, sf, 0));
1117 if (t == T_BYTE) {
1118 // One byte misalignment happens only for byte arrays
1119 __ testl(end, 1);
1120 __ jccb(Assembler::zero, L_skip_align1);
1121 __ decrement(count);
1122 __ movb(rdx, Address(from, count, sf, 0));
1123 __ movb(Address(to, count, sf, 0), rdx);
1124 __ BIND(L_skip_align1);
1125 }
1126 // Two bytes misalignment happens only for byte and short (char) arrays
1127 __ testl(end, 2);
1128 __ jccb(Assembler::zero, L_skip_align2);
1129 __ subptr(count, 1<<(shift-1));
1130 __ movw(rdx, Address(from, count, sf, 0));
1131 __ movw(Address(to, count, sf, 0), rdx);
1132 __ BIND(L_skip_align2);
1133 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
1134 __ jcc(Assembler::below, L_copy_4_bytes);
1135 }
1137 if (!VM_Version::supports_mmx()) {
1138 __ std();
1139 __ mov(rax, count); // Save 'count'
1140 __ mov(rdx, to); // Save 'to'
1141 __ lea(rsi, Address(from, count, sf, -4));
1142 __ lea(rdi, Address(to , count, sf, -4));
1143 __ shrptr(count, shift); // bytes count
1144 __ rep_mov();
1145 __ cld();
1146 __ mov(count, rax); // restore 'count'
1147 __ andl(count, (1<<shift)-1); // mask the number of rest elements
1148 __ movptr(from, Address(rsp, 12+4)); // reread 'from'
1149 __ mov(to, rdx); // restore 'to'
1150 __ jmpb(L_copy_2_bytes); // all dword were copied
1151 } else {
1152 // Align to 8 bytes the end of array. It is aligned to 4 bytes already.
1153 __ testptr(end, 4);
1154 __ jccb(Assembler::zero, L_copy_8_bytes);
1155 __ subl(count, 1<<shift);
1156 __ movl(rdx, Address(from, count, sf, 0));
1157 __ movl(Address(to, count, sf, 0), rdx);
1158 __ jmpb(L_copy_8_bytes);
1160 __ align(OptoLoopAlignment);
1161 // Move 8 bytes
1162 __ BIND(L_copy_8_bytes_loop);
1163 if (UseXMMForArrayCopy) {
1164 __ movq(xmm0, Address(from, count, sf, 0));
1165 __ movq(Address(to, count, sf, 0), xmm0);
1166 } else {
1167 __ movq(mmx0, Address(from, count, sf, 0));
1168 __ movq(Address(to, count, sf, 0), mmx0);
1169 }
1170 __ BIND(L_copy_8_bytes);
1171 __ subl(count, 2<<shift);
1172 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1173 __ addl(count, 2<<shift);
1174 if (!UseXMMForArrayCopy) {
1175 __ emms();
1176 }
1177 }
1178 __ BIND(L_copy_4_bytes);
1179 // copy prefix qword
1180 __ testl(count, 1<<shift);
1181 __ jccb(Assembler::zero, L_copy_2_bytes);
1182 __ movl(rdx, Address(from, count, sf, -4));
1183 __ movl(Address(to, count, sf, -4), rdx);
1185 if (t == T_BYTE || t == T_SHORT) {
1186 __ subl(count, (1<<shift));
1187 __ BIND(L_copy_2_bytes);
1188 // copy prefix dword
1189 __ testl(count, 1<<(shift-1));
1190 __ jccb(Assembler::zero, L_copy_byte);
1191 __ movw(rdx, Address(from, count, sf, -2));
1192 __ movw(Address(to, count, sf, -2), rdx);
1193 if (t == T_BYTE) {
1194 __ subl(count, 1<<(shift-1));
1195 __ BIND(L_copy_byte);
1196 // copy prefix byte
1197 __ testl(count, 1);
1198 __ jccb(Assembler::zero, L_exit);
1199 __ movb(rdx, Address(from, 0));
1200 __ movb(Address(to, 0), rdx);
1201 __ BIND(L_exit);
1202 } else {
1203 __ BIND(L_copy_byte);
1204 }
1205 } else {
1206 __ BIND(L_copy_2_bytes);
1207 }
1208 if (t == T_OBJECT) {
1209 __ movl2ptr(count, Address(rsp, 12+12)); // reread count
1210 gen_write_ref_array_post_barrier(to, count);
1211 __ BIND(L_0_count);
1212 }
1213 inc_copy_counter_np(t);
1214 __ pop(rdi);
1215 __ pop(rsi);
1216 __ leave(); // required for proper stackwalking of RuntimeStub frame
1217 __ xorptr(rax, rax); // return 0
1218 __ ret(0);
1219 return start;
1220 }
1223 address generate_disjoint_long_copy(address* entry, const char *name) {
1224 __ align(CodeEntryAlignment);
1225 StubCodeMark mark(this, "StubRoutines", name);
1226 address start = __ pc();
1228 Label L_copy_8_bytes, L_copy_8_bytes_loop;
1229 const Register from = rax; // source array address
1230 const Register to = rdx; // destination array address
1231 const Register count = rcx; // elements count
1232 const Register to_from = rdx; // (to - from)
1234 __ enter(); // required for proper stackwalking of RuntimeStub frame
1235 __ movptr(from , Address(rsp, 8+0)); // from
1236 __ movptr(to , Address(rsp, 8+4)); // to
1237 __ movl2ptr(count, Address(rsp, 8+8)); // count
1239 *entry = __ pc(); // Entry point from conjoint arraycopy stub.
1240 BLOCK_COMMENT("Entry:");
1242 __ subptr(to, from); // to --> to_from
1243 if (VM_Version::supports_mmx()) {
1244 if (UseXMMForArrayCopy) {
1245 xmm_copy_forward(from, to_from, count);
1246 } else {
1247 mmx_copy_forward(from, to_from, count);
1248 }
1249 } else {
1250 __ jmpb(L_copy_8_bytes);
1251 __ align(OptoLoopAlignment);
1252 __ BIND(L_copy_8_bytes_loop);
1253 __ fild_d(Address(from, 0));
1254 __ fistp_d(Address(from, to_from, Address::times_1));
1255 __ addptr(from, 8);
1256 __ BIND(L_copy_8_bytes);
1257 __ decrement(count);
1258 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1259 }
1260 inc_copy_counter_np(T_LONG);
1261 __ leave(); // required for proper stackwalking of RuntimeStub frame
1262 __ xorptr(rax, rax); // return 0
1263 __ ret(0);
1264 return start;
1265 }
1267 address generate_conjoint_long_copy(address nooverlap_target,
1268 address* entry, const char *name) {
1269 __ align(CodeEntryAlignment);
1270 StubCodeMark mark(this, "StubRoutines", name);
1271 address start = __ pc();
1273 Label L_copy_8_bytes, L_copy_8_bytes_loop;
1274 const Register from = rax; // source array address
1275 const Register to = rdx; // destination array address
1276 const Register count = rcx; // elements count
1277 const Register end_from = rax; // source array end address
1279 __ enter(); // required for proper stackwalking of RuntimeStub frame
1280 __ movptr(from , Address(rsp, 8+0)); // from
1281 __ movptr(to , Address(rsp, 8+4)); // to
1282 __ movl2ptr(count, Address(rsp, 8+8)); // count
1284 *entry = __ pc(); // Entry point from generic arraycopy stub.
1285 BLOCK_COMMENT("Entry:");
1287 // arrays overlap test
1288 __ cmpptr(to, from);
1289 RuntimeAddress nooverlap(nooverlap_target);
1290 __ jump_cc(Assembler::belowEqual, nooverlap);
1291 __ lea(end_from, Address(from, count, Address::times_8, 0));
1292 __ cmpptr(to, end_from);
1293 __ movptr(from, Address(rsp, 8)); // from
1294 __ jump_cc(Assembler::aboveEqual, nooverlap);
1296 __ jmpb(L_copy_8_bytes);
1298 __ align(OptoLoopAlignment);
1299 __ BIND(L_copy_8_bytes_loop);
1300 if (VM_Version::supports_mmx()) {
1301 if (UseXMMForArrayCopy) {
1302 __ movq(xmm0, Address(from, count, Address::times_8));
1303 __ movq(Address(to, count, Address::times_8), xmm0);
1304 } else {
1305 __ movq(mmx0, Address(from, count, Address::times_8));
1306 __ movq(Address(to, count, Address::times_8), mmx0);
1307 }
1308 } else {
1309 __ fild_d(Address(from, count, Address::times_8));
1310 __ fistp_d(Address(to, count, Address::times_8));
1311 }
1312 __ BIND(L_copy_8_bytes);
1313 __ decrement(count);
1314 __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1316 if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
1317 __ emms();
1318 }
1319 inc_copy_counter_np(T_LONG);
1320 __ leave(); // required for proper stackwalking of RuntimeStub frame
1321 __ xorptr(rax, rax); // return 0
1322 __ ret(0);
1323 return start;
1324 }
1327 // Helper for generating a dynamic type check.
1328 // The sub_klass must be one of {rbx, rdx, rsi}.
1329 // The temp is killed.
1330 void generate_type_check(Register sub_klass,
1331 Address& super_check_offset_addr,
1332 Address& super_klass_addr,
1333 Register temp,
1334 Label* L_success, Label* L_failure) {
1335 BLOCK_COMMENT("type_check:");
1337 Label L_fallthrough;
1338 #define LOCAL_JCC(assembler_con, label_ptr) \
1339 if (label_ptr != NULL) __ jcc(assembler_con, *(label_ptr)); \
1340 else __ jcc(assembler_con, L_fallthrough) /*omit semi*/
1342 // The following is a strange variation of the fast path which requires
1343 // one less register, because needed values are on the argument stack.
1344 // __ check_klass_subtype_fast_path(sub_klass, *super_klass*, temp,
1345 // L_success, L_failure, NULL);
1346 assert_different_registers(sub_klass, temp);
1348 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
1350 // if the pointers are equal, we are done (e.g., String[] elements)
1351 __ cmpptr(sub_klass, super_klass_addr);
1352 LOCAL_JCC(Assembler::equal, L_success);
1354 // check the supertype display:
1355 __ movl2ptr(temp, super_check_offset_addr);
1356 Address super_check_addr(sub_klass, temp, Address::times_1, 0);
1357 __ movptr(temp, super_check_addr); // load displayed supertype
1358 __ cmpptr(temp, super_klass_addr); // test the super type
1359 LOCAL_JCC(Assembler::equal, L_success);
1361 // if it was a primary super, we can just fail immediately
1362 __ cmpl(super_check_offset_addr, sc_offset);
1363 LOCAL_JCC(Assembler::notEqual, L_failure);
1365 // The repne_scan instruction uses fixed registers, which will get spilled.
1366 // We happen to know this works best when super_klass is in rax.
1367 Register super_klass = temp;
1368 __ movptr(super_klass, super_klass_addr);
1369 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg,
1370 L_success, L_failure);
1372 __ bind(L_fallthrough);
1374 if (L_success == NULL) { BLOCK_COMMENT("L_success:"); }
1375 if (L_failure == NULL) { BLOCK_COMMENT("L_failure:"); }
1377 #undef LOCAL_JCC
1378 }
1380 //
1381 // Generate checkcasting array copy stub
1382 //
1383 // Input:
1384 // 4(rsp) - source array address
1385 // 8(rsp) - destination array address
1386 // 12(rsp) - element count, can be zero
1387 // 16(rsp) - size_t ckoff (super_check_offset)
1388 // 20(rsp) - oop ckval (super_klass)
1389 //
1390 // Output:
1391 // rax, == 0 - success
1392 // rax, == -1^K - failure, where K is partial transfer count
1393 //
1394 address generate_checkcast_copy(const char *name, address* entry, bool dest_uninitialized = false) {
1395 __ align(CodeEntryAlignment);
1396 StubCodeMark mark(this, "StubRoutines", name);
1397 address start = __ pc();
1399 Label L_load_element, L_store_element, L_do_card_marks, L_done;
1401 // register use:
1402 // rax, rdx, rcx -- loop control (end_from, end_to, count)
1403 // rdi, rsi -- element access (oop, klass)
1404 // rbx, -- temp
1405 const Register from = rax; // source array address
1406 const Register to = rdx; // destination array address
1407 const Register length = rcx; // elements count
1408 const Register elem = rdi; // each oop copied
1409 const Register elem_klass = rsi; // each elem._klass (sub_klass)
1410 const Register temp = rbx; // lone remaining temp
1412 __ enter(); // required for proper stackwalking of RuntimeStub frame
1414 __ push(rsi);
1415 __ push(rdi);
1416 __ push(rbx);
1418 Address from_arg(rsp, 16+ 4); // from
1419 Address to_arg(rsp, 16+ 8); // to
1420 Address length_arg(rsp, 16+12); // elements count
1421 Address ckoff_arg(rsp, 16+16); // super_check_offset
1422 Address ckval_arg(rsp, 16+20); // super_klass
1424 // Load up:
1425 __ movptr(from, from_arg);
1426 __ movptr(to, to_arg);
1427 __ movl2ptr(length, length_arg);
1429 if (entry != NULL) {
1430 *entry = __ pc(); // Entry point from generic arraycopy stub.
1431 BLOCK_COMMENT("Entry:");
1432 }
1434 //---------------------------------------------------------------
1435 // Assembler stub will be used for this call to arraycopy
1436 // if the two arrays are subtypes of Object[] but the
1437 // destination array type is not equal to or a supertype
1438 // of the source type. Each element must be separately
1439 // checked.
1441 // Loop-invariant addresses. They are exclusive end pointers.
1442 Address end_from_addr(from, length, Address::times_ptr, 0);
1443 Address end_to_addr(to, length, Address::times_ptr, 0);
1445 Register end_from = from; // re-use
1446 Register end_to = to; // re-use
1447 Register count = length; // re-use
1449 // Loop-variant addresses. They assume post-incremented count < 0.
1450 Address from_element_addr(end_from, count, Address::times_ptr, 0);
1451 Address to_element_addr(end_to, count, Address::times_ptr, 0);
1452 Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes());
1454 // Copy from low to high addresses, indexed from the end of each array.
1455 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
1456 __ lea(end_from, end_from_addr);
1457 __ lea(end_to, end_to_addr);
1458 assert(length == count, ""); // else fix next line:
1459 __ negptr(count); // negate and test the length
1460 __ jccb(Assembler::notZero, L_load_element);
1462 // Empty array: Nothing to do.
1463 __ xorptr(rax, rax); // return 0 on (trivial) success
1464 __ jmp(L_done);
1466 // ======== begin loop ========
1467 // (Loop is rotated; its entry is L_load_element.)
1468 // Loop control:
1469 // for (count = -count; count != 0; count++)
1470 // Base pointers src, dst are biased by 8*count,to last element.
1471 __ align(OptoLoopAlignment);
1473 __ BIND(L_store_element);
1474 __ movptr(to_element_addr, elem); // store the oop
1475 __ increment(count); // increment the count toward zero
1476 __ jccb(Assembler::zero, L_do_card_marks);
1478 // ======== loop entry is here ========
1479 __ BIND(L_load_element);
1480 __ movptr(elem, from_element_addr); // load the oop
1481 __ testptr(elem, elem);
1482 __ jccb(Assembler::zero, L_store_element);
1484 // (Could do a trick here: Remember last successful non-null
1485 // element stored and make a quick oop equality check on it.)
1487 __ movptr(elem_klass, elem_klass_addr); // query the object klass
1488 generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp,
1489 &L_store_element, NULL);
1490 // (On fall-through, we have failed the element type check.)
1491 // ======== end loop ========
1493 // It was a real error; we must depend on the caller to finish the job.
1494 // Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops.
1495 // Emit GC store barriers for the oops we have copied (length_arg + count),
1496 // and report their number to the caller.
1497 __ addl(count, length_arg); // transfers = (length - remaining)
1498 __ movl2ptr(rax, count); // save the value
1499 __ notptr(rax); // report (-1^K) to caller
1500 __ movptr(to, to_arg); // reload
1501 assert_different_registers(to, count, rax);
1502 gen_write_ref_array_post_barrier(to, count);
1503 __ jmpb(L_done);
1505 // Come here on success only.
1506 __ BIND(L_do_card_marks);
1507 __ movl2ptr(count, length_arg);
1508 __ movptr(to, to_arg); // reload
1509 gen_write_ref_array_post_barrier(to, count);
1510 __ xorptr(rax, rax); // return 0 on success
1512 // Common exit point (success or failure).
1513 __ BIND(L_done);
1514 __ pop(rbx);
1515 __ pop(rdi);
1516 __ pop(rsi);
1517 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
1518 __ leave(); // required for proper stackwalking of RuntimeStub frame
1519 __ ret(0);
1521 return start;
1522 }
1524 //
1525 // Generate 'unsafe' array copy stub
1526 // Though just as safe as the other stubs, it takes an unscaled
1527 // size_t argument instead of an element count.
1528 //
1529 // Input:
1530 // 4(rsp) - source array address
1531 // 8(rsp) - destination array address
1532 // 12(rsp) - byte count, can be zero
1533 //
1534 // Output:
1535 // rax, == 0 - success
1536 // rax, == -1 - need to call System.arraycopy
1537 //
1538 // Examines the alignment of the operands and dispatches
1539 // to a long, int, short, or byte copy loop.
1540 //
1541 address generate_unsafe_copy(const char *name,
1542 address byte_copy_entry,
1543 address short_copy_entry,
1544 address int_copy_entry,
1545 address long_copy_entry) {
1547 Label L_long_aligned, L_int_aligned, L_short_aligned;
1549 __ align(CodeEntryAlignment);
1550 StubCodeMark mark(this, "StubRoutines", name);
1551 address start = __ pc();
1553 const Register from = rax; // source array address
1554 const Register to = rdx; // destination array address
1555 const Register count = rcx; // elements count
1557 __ enter(); // required for proper stackwalking of RuntimeStub frame
1558 __ push(rsi);
1559 __ push(rdi);
1560 Address from_arg(rsp, 12+ 4); // from
1561 Address to_arg(rsp, 12+ 8); // to
1562 Address count_arg(rsp, 12+12); // byte count
1564 // Load up:
1565 __ movptr(from , from_arg);
1566 __ movptr(to , to_arg);
1567 __ movl2ptr(count, count_arg);
1569 // bump this on entry, not on exit:
1570 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
1572 const Register bits = rsi;
1573 __ mov(bits, from);
1574 __ orptr(bits, to);
1575 __ orptr(bits, count);
1577 __ testl(bits, BytesPerLong-1);
1578 __ jccb(Assembler::zero, L_long_aligned);
1580 __ testl(bits, BytesPerInt-1);
1581 __ jccb(Assembler::zero, L_int_aligned);
1583 __ testl(bits, BytesPerShort-1);
1584 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
1586 __ BIND(L_short_aligned);
1587 __ shrptr(count, LogBytesPerShort); // size => short_count
1588 __ movl(count_arg, count); // update 'count'
1589 __ jump(RuntimeAddress(short_copy_entry));
1591 __ BIND(L_int_aligned);
1592 __ shrptr(count, LogBytesPerInt); // size => int_count
1593 __ movl(count_arg, count); // update 'count'
1594 __ jump(RuntimeAddress(int_copy_entry));
1596 __ BIND(L_long_aligned);
1597 __ shrptr(count, LogBytesPerLong); // size => qword_count
1598 __ movl(count_arg, count); // update 'count'
1599 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it.
1600 __ pop(rsi);
1601 __ jump(RuntimeAddress(long_copy_entry));
1603 return start;
1604 }
1607 // Perform range checks on the proposed arraycopy.
1608 // Smashes src_pos and dst_pos. (Uses them up for temps.)
1609 void arraycopy_range_checks(Register src,
1610 Register src_pos,
1611 Register dst,
1612 Register dst_pos,
1613 Address& length,
1614 Label& L_failed) {
1615 BLOCK_COMMENT("arraycopy_range_checks:");
1616 const Register src_end = src_pos; // source array end position
1617 const Register dst_end = dst_pos; // destination array end position
1618 __ addl(src_end, length); // src_pos + length
1619 __ addl(dst_end, length); // dst_pos + length
1621 // if (src_pos + length > arrayOop(src)->length() ) FAIL;
1622 __ cmpl(src_end, Address(src, arrayOopDesc::length_offset_in_bytes()));
1623 __ jcc(Assembler::above, L_failed);
1625 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
1626 __ cmpl(dst_end, Address(dst, arrayOopDesc::length_offset_in_bytes()));
1627 __ jcc(Assembler::above, L_failed);
1629 BLOCK_COMMENT("arraycopy_range_checks done");
1630 }
1633 //
1634 // Generate generic array copy stubs
1635 //
1636 // Input:
1637 // 4(rsp) - src oop
1638 // 8(rsp) - src_pos
1639 // 12(rsp) - dst oop
1640 // 16(rsp) - dst_pos
1641 // 20(rsp) - element count
1642 //
1643 // Output:
1644 // rax, == 0 - success
1645 // rax, == -1^K - failure, where K is partial transfer count
1646 //
1647 address generate_generic_copy(const char *name,
1648 address entry_jbyte_arraycopy,
1649 address entry_jshort_arraycopy,
1650 address entry_jint_arraycopy,
1651 address entry_oop_arraycopy,
1652 address entry_jlong_arraycopy,
1653 address entry_checkcast_arraycopy) {
1654 Label L_failed, L_failed_0, L_objArray;
1656 { int modulus = CodeEntryAlignment;
1657 int target = modulus - 5; // 5 = sizeof jmp(L_failed)
1658 int advance = target - (__ offset() % modulus);
1659 if (advance < 0) advance += modulus;
1660 if (advance > 0) __ nop(advance);
1661 }
1662 StubCodeMark mark(this, "StubRoutines", name);
1664 // Short-hop target to L_failed. Makes for denser prologue code.
1665 __ BIND(L_failed_0);
1666 __ jmp(L_failed);
1667 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed");
1669 __ align(CodeEntryAlignment);
1670 address start = __ pc();
1672 __ enter(); // required for proper stackwalking of RuntimeStub frame
1673 __ push(rsi);
1674 __ push(rdi);
1676 // bump this on entry, not on exit:
1677 inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
1679 // Input values
1680 Address SRC (rsp, 12+ 4);
1681 Address SRC_POS (rsp, 12+ 8);
1682 Address DST (rsp, 12+12);
1683 Address DST_POS (rsp, 12+16);
1684 Address LENGTH (rsp, 12+20);
1686 //-----------------------------------------------------------------------
1687 // Assembler stub will be used for this call to arraycopy
1688 // if the following conditions are met:
1689 //
1690 // (1) src and dst must not be null.
1691 // (2) src_pos must not be negative.
1692 // (3) dst_pos must not be negative.
1693 // (4) length must not be negative.
1694 // (5) src klass and dst klass should be the same and not NULL.
1695 // (6) src and dst should be arrays.
1696 // (7) src_pos + length must not exceed length of src.
1697 // (8) dst_pos + length must not exceed length of dst.
1698 //
1700 const Register src = rax; // source array oop
1701 const Register src_pos = rsi;
1702 const Register dst = rdx; // destination array oop
1703 const Register dst_pos = rdi;
1704 const Register length = rcx; // transfer count
1706 // if (src == NULL) return -1;
1707 __ movptr(src, SRC); // src oop
1708 __ testptr(src, src);
1709 __ jccb(Assembler::zero, L_failed_0);
1711 // if (src_pos < 0) return -1;
1712 __ movl2ptr(src_pos, SRC_POS); // src_pos
1713 __ testl(src_pos, src_pos);
1714 __ jccb(Assembler::negative, L_failed_0);
1716 // if (dst == NULL) return -1;
1717 __ movptr(dst, DST); // dst oop
1718 __ testptr(dst, dst);
1719 __ jccb(Assembler::zero, L_failed_0);
1721 // if (dst_pos < 0) return -1;
1722 __ movl2ptr(dst_pos, DST_POS); // dst_pos
1723 __ testl(dst_pos, dst_pos);
1724 __ jccb(Assembler::negative, L_failed_0);
1726 // if (length < 0) return -1;
1727 __ movl2ptr(length, LENGTH); // length
1728 __ testl(length, length);
1729 __ jccb(Assembler::negative, L_failed_0);
1731 // if (src->klass() == NULL) return -1;
1732 Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
1733 Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
1734 const Register rcx_src_klass = rcx; // array klass
1735 __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes()));
1737 #ifdef ASSERT
1738 // assert(src->klass() != NULL);
1739 BLOCK_COMMENT("assert klasses not null");
1740 { Label L1, L2;
1741 __ testptr(rcx_src_klass, rcx_src_klass);
1742 __ jccb(Assembler::notZero, L2); // it is broken if klass is NULL
1743 __ bind(L1);
1744 __ stop("broken null klass");
1745 __ bind(L2);
1746 __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD);
1747 __ jccb(Assembler::equal, L1); // this would be broken also
1748 BLOCK_COMMENT("assert done");
1749 }
1750 #endif //ASSERT
1752 // Load layout helper (32-bits)
1753 //
1754 // |array_tag| | header_size | element_type | |log2_element_size|
1755 // 32 30 24 16 8 2 0
1756 //
1757 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
1758 //
1760 int lh_offset = in_bytes(Klass::layout_helper_offset());
1761 Address src_klass_lh_addr(rcx_src_klass, lh_offset);
1763 // Handle objArrays completely differently...
1764 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
1765 __ cmpl(src_klass_lh_addr, objArray_lh);
1766 __ jcc(Assembler::equal, L_objArray);
1768 // if (src->klass() != dst->klass()) return -1;
1769 __ cmpptr(rcx_src_klass, dst_klass_addr);
1770 __ jccb(Assembler::notEqual, L_failed_0);
1772 const Register rcx_lh = rcx; // layout helper
1773 assert(rcx_lh == rcx_src_klass, "known alias");
1774 __ movl(rcx_lh, src_klass_lh_addr);
1776 // if (!src->is_Array()) return -1;
1777 __ cmpl(rcx_lh, Klass::_lh_neutral_value);
1778 __ jcc(Assembler::greaterEqual, L_failed_0); // signed cmp
1780 // At this point, it is known to be a typeArray (array_tag 0x3).
1781 #ifdef ASSERT
1782 { Label L;
1783 __ cmpl(rcx_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
1784 __ jcc(Assembler::greaterEqual, L); // signed cmp
1785 __ stop("must be a primitive array");
1786 __ bind(L);
1787 }
1788 #endif
1790 assert_different_registers(src, src_pos, dst, dst_pos, rcx_lh);
1791 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
1793 // TypeArrayKlass
1794 //
1795 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
1796 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
1797 //
1798 const Register rsi_offset = rsi; // array offset
1799 const Register src_array = src; // src array offset
1800 const Register dst_array = dst; // dst array offset
1801 const Register rdi_elsize = rdi; // log2 element size
1803 __ mov(rsi_offset, rcx_lh);
1804 __ shrptr(rsi_offset, Klass::_lh_header_size_shift);
1805 __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset
1806 __ addptr(src_array, rsi_offset); // src array offset
1807 __ addptr(dst_array, rsi_offset); // dst array offset
1808 __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize
1810 // next registers should be set before the jump to corresponding stub
1811 const Register from = src; // source array address
1812 const Register to = dst; // destination array address
1813 const Register count = rcx; // elements count
1814 // some of them should be duplicated on stack
1815 #define FROM Address(rsp, 12+ 4)
1816 #define TO Address(rsp, 12+ 8) // Not used now
1817 #define COUNT Address(rsp, 12+12) // Only for oop arraycopy
1819 BLOCK_COMMENT("scale indexes to element size");
1820 __ movl2ptr(rsi, SRC_POS); // src_pos
1821 __ shlptr(rsi); // src_pos << rcx (log2 elsize)
1822 assert(src_array == from, "");
1823 __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize
1824 __ movl2ptr(rdi, DST_POS); // dst_pos
1825 __ shlptr(rdi); // dst_pos << rcx (log2 elsize)
1826 assert(dst_array == to, "");
1827 __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize
1828 __ movptr(FROM, from); // src_addr
1829 __ mov(rdi_elsize, rcx_lh); // log2 elsize
1830 __ movl2ptr(count, LENGTH); // elements count
1832 BLOCK_COMMENT("choose copy loop based on element size");
1833 __ cmpl(rdi_elsize, 0);
1835 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jbyte_arraycopy));
1836 __ cmpl(rdi_elsize, LogBytesPerShort);
1837 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jshort_arraycopy));
1838 __ cmpl(rdi_elsize, LogBytesPerInt);
1839 __ jump_cc(Assembler::equal, RuntimeAddress(entry_jint_arraycopy));
1840 #ifdef ASSERT
1841 __ cmpl(rdi_elsize, LogBytesPerLong);
1842 __ jccb(Assembler::notEqual, L_failed);
1843 #endif
1844 __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it.
1845 __ pop(rsi);
1846 __ jump(RuntimeAddress(entry_jlong_arraycopy));
1848 __ BIND(L_failed);
1849 __ xorptr(rax, rax);
1850 __ notptr(rax); // return -1
1851 __ pop(rdi);
1852 __ pop(rsi);
1853 __ leave(); // required for proper stackwalking of RuntimeStub frame
1854 __ ret(0);
1856 // ObjArrayKlass
1857 __ BIND(L_objArray);
1858 // live at this point: rcx_src_klass, src[_pos], dst[_pos]
1860 Label L_plain_copy, L_checkcast_copy;
1861 // test array classes for subtyping
1862 __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality
1863 __ jccb(Assembler::notEqual, L_checkcast_copy);
1865 // Identically typed arrays can be copied without element-wise checks.
1866 assert_different_registers(src, src_pos, dst, dst_pos, rcx_src_klass);
1867 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
1869 __ BIND(L_plain_copy);
1870 __ movl2ptr(count, LENGTH); // elements count
1871 __ movl2ptr(src_pos, SRC_POS); // reload src_pos
1872 __ lea(from, Address(src, src_pos, Address::times_ptr,
1873 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
1874 __ movl2ptr(dst_pos, DST_POS); // reload dst_pos
1875 __ lea(to, Address(dst, dst_pos, Address::times_ptr,
1876 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
1877 __ movptr(FROM, from); // src_addr
1878 __ movptr(TO, to); // dst_addr
1879 __ movl(COUNT, count); // count
1880 __ jump(RuntimeAddress(entry_oop_arraycopy));
1882 __ BIND(L_checkcast_copy);
1883 // live at this point: rcx_src_klass, dst[_pos], src[_pos]
1884 {
1885 // Handy offsets:
1886 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
1887 int sco_offset = in_bytes(Klass::super_check_offset_offset());
1889 Register rsi_dst_klass = rsi;
1890 Register rdi_temp = rdi;
1891 assert(rsi_dst_klass == src_pos, "expected alias w/ src_pos");
1892 assert(rdi_temp == dst_pos, "expected alias w/ dst_pos");
1893 Address dst_klass_lh_addr(rsi_dst_klass, lh_offset);
1895 // Before looking at dst.length, make sure dst is also an objArray.
1896 __ movptr(rsi_dst_klass, dst_klass_addr);
1897 __ cmpl(dst_klass_lh_addr, objArray_lh);
1898 __ jccb(Assembler::notEqual, L_failed);
1900 // It is safe to examine both src.length and dst.length.
1901 __ movl2ptr(src_pos, SRC_POS); // reload rsi
1902 arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
1903 // (Now src_pos and dst_pos are killed, but not src and dst.)
1905 // We'll need this temp (don't forget to pop it after the type check).
1906 __ push(rbx);
1907 Register rbx_src_klass = rbx;
1909 __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx
1910 __ movptr(rsi_dst_klass, dst_klass_addr);
1911 Address super_check_offset_addr(rsi_dst_klass, sco_offset);
1912 Label L_fail_array_check;
1913 generate_type_check(rbx_src_klass,
1914 super_check_offset_addr, dst_klass_addr,
1915 rdi_temp, NULL, &L_fail_array_check);
1916 // (On fall-through, we have passed the array type check.)
1917 __ pop(rbx);
1918 __ jmp(L_plain_copy);
1920 __ BIND(L_fail_array_check);
1921 // Reshuffle arguments so we can call checkcast_arraycopy:
1923 // match initial saves for checkcast_arraycopy
1924 // push(rsi); // already done; see above
1925 // push(rdi); // already done; see above
1926 // push(rbx); // already done; see above
1928 // Marshal outgoing arguments now, freeing registers.
1929 Address from_arg(rsp, 16+ 4); // from
1930 Address to_arg(rsp, 16+ 8); // to
1931 Address length_arg(rsp, 16+12); // elements count
1932 Address ckoff_arg(rsp, 16+16); // super_check_offset
1933 Address ckval_arg(rsp, 16+20); // super_klass
1935 Address SRC_POS_arg(rsp, 16+ 8);
1936 Address DST_POS_arg(rsp, 16+16);
1937 Address LENGTH_arg(rsp, 16+20);
1938 // push rbx, changed the incoming offsets (why not just use rbp,??)
1939 // assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, "");
1941 __ movptr(rbx, Address(rsi_dst_klass, ek_offset));
1942 __ movl2ptr(length, LENGTH_arg); // reload elements count
1943 __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos
1944 __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos
1946 __ movptr(ckval_arg, rbx); // destination element type
1947 __ movl(rbx, Address(rbx, sco_offset));
1948 __ movl(ckoff_arg, rbx); // corresponding class check offset
1950 __ movl(length_arg, length); // outgoing length argument
1952 __ lea(from, Address(src, src_pos, Address::times_ptr,
1953 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
1954 __ movptr(from_arg, from);
1956 __ lea(to, Address(dst, dst_pos, Address::times_ptr,
1957 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
1958 __ movptr(to_arg, to);
1959 __ jump(RuntimeAddress(entry_checkcast_arraycopy));
1960 }
1962 return start;
1963 }
1965 void generate_arraycopy_stubs() {
1966 address entry;
1967 address entry_jbyte_arraycopy;
1968 address entry_jshort_arraycopy;
1969 address entry_jint_arraycopy;
1970 address entry_oop_arraycopy;
1971 address entry_jlong_arraycopy;
1972 address entry_checkcast_arraycopy;
1974 StubRoutines::_arrayof_jbyte_disjoint_arraycopy =
1975 generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry,
1976 "arrayof_jbyte_disjoint_arraycopy");
1977 StubRoutines::_arrayof_jbyte_arraycopy =
1978 generate_conjoint_copy(T_BYTE, true, Address::times_1, entry,
1979 NULL, "arrayof_jbyte_arraycopy");
1980 StubRoutines::_jbyte_disjoint_arraycopy =
1981 generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry,
1982 "jbyte_disjoint_arraycopy");
1983 StubRoutines::_jbyte_arraycopy =
1984 generate_conjoint_copy(T_BYTE, false, Address::times_1, entry,
1985 &entry_jbyte_arraycopy, "jbyte_arraycopy");
1987 StubRoutines::_arrayof_jshort_disjoint_arraycopy =
1988 generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry,
1989 "arrayof_jshort_disjoint_arraycopy");
1990 StubRoutines::_arrayof_jshort_arraycopy =
1991 generate_conjoint_copy(T_SHORT, true, Address::times_2, entry,
1992 NULL, "arrayof_jshort_arraycopy");
1993 StubRoutines::_jshort_disjoint_arraycopy =
1994 generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry,
1995 "jshort_disjoint_arraycopy");
1996 StubRoutines::_jshort_arraycopy =
1997 generate_conjoint_copy(T_SHORT, false, Address::times_2, entry,
1998 &entry_jshort_arraycopy, "jshort_arraycopy");
2000 // Next arrays are always aligned on 4 bytes at least.
2001 StubRoutines::_jint_disjoint_arraycopy =
2002 generate_disjoint_copy(T_INT, true, Address::times_4, &entry,
2003 "jint_disjoint_arraycopy");
2004 StubRoutines::_jint_arraycopy =
2005 generate_conjoint_copy(T_INT, true, Address::times_4, entry,
2006 &entry_jint_arraycopy, "jint_arraycopy");
2008 StubRoutines::_oop_disjoint_arraycopy =
2009 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry,
2010 "oop_disjoint_arraycopy");
2011 StubRoutines::_oop_arraycopy =
2012 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry,
2013 &entry_oop_arraycopy, "oop_arraycopy");
2015 StubRoutines::_oop_disjoint_arraycopy_uninit =
2016 generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry,
2017 "oop_disjoint_arraycopy_uninit",
2018 /*dest_uninitialized*/true);
2019 StubRoutines::_oop_arraycopy_uninit =
2020 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry,
2021 NULL, "oop_arraycopy_uninit",
2022 /*dest_uninitialized*/true);
2024 StubRoutines::_jlong_disjoint_arraycopy =
2025 generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy");
2026 StubRoutines::_jlong_arraycopy =
2027 generate_conjoint_long_copy(entry, &entry_jlong_arraycopy,
2028 "jlong_arraycopy");
2030 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
2031 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
2032 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
2033 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
2034 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2035 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
2037 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy;
2038 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy;
2039 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit;
2040 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy;
2042 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2043 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2044 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit;
2045 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2047 StubRoutines::_checkcast_arraycopy =
2048 generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
2049 StubRoutines::_checkcast_arraycopy_uninit =
2050 generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, /*dest_uninitialized*/true);
2052 StubRoutines::_unsafe_arraycopy =
2053 generate_unsafe_copy("unsafe_arraycopy",
2054 entry_jbyte_arraycopy,
2055 entry_jshort_arraycopy,
2056 entry_jint_arraycopy,
2057 entry_jlong_arraycopy);
2059 StubRoutines::_generic_arraycopy =
2060 generate_generic_copy("generic_arraycopy",
2061 entry_jbyte_arraycopy,
2062 entry_jshort_arraycopy,
2063 entry_jint_arraycopy,
2064 entry_oop_arraycopy,
2065 entry_jlong_arraycopy,
2066 entry_checkcast_arraycopy);
2067 }
2069 void generate_math_stubs() {
2070 {
2071 StubCodeMark mark(this, "StubRoutines", "log");
2072 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
2074 __ fld_d(Address(rsp, 4));
2075 __ flog();
2076 __ ret(0);
2077 }
2078 {
2079 StubCodeMark mark(this, "StubRoutines", "log10");
2080 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
2082 __ fld_d(Address(rsp, 4));
2083 __ flog10();
2084 __ ret(0);
2085 }
2086 {
2087 StubCodeMark mark(this, "StubRoutines", "sin");
2088 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
2090 __ fld_d(Address(rsp, 4));
2091 __ trigfunc('s');
2092 __ ret(0);
2093 }
2094 {
2095 StubCodeMark mark(this, "StubRoutines", "cos");
2096 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
2098 __ fld_d(Address(rsp, 4));
2099 __ trigfunc('c');
2100 __ ret(0);
2101 }
2102 {
2103 StubCodeMark mark(this, "StubRoutines", "tan");
2104 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
2106 __ fld_d(Address(rsp, 4));
2107 __ trigfunc('t');
2108 __ ret(0);
2109 }
2110 {
2111 StubCodeMark mark(this, "StubRoutines", "exp");
2112 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc();
2114 __ fld_d(Address(rsp, 4));
2115 __ exp_with_fallback(0);
2116 __ ret(0);
2117 }
2118 {
2119 StubCodeMark mark(this, "StubRoutines", "pow");
2120 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc();
2122 __ fld_d(Address(rsp, 12));
2123 __ fld_d(Address(rsp, 4));
2124 __ pow_with_fallback(0);
2125 __ ret(0);
2126 }
2127 }
2129 // AES intrinsic stubs
2130 enum {AESBlockSize = 16};
2132 address generate_key_shuffle_mask() {
2133 __ align(16);
2134 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask");
2135 address start = __ pc();
2136 __ emit_data(0x00010203, relocInfo::none, 0 );
2137 __ emit_data(0x04050607, relocInfo::none, 0 );
2138 __ emit_data(0x08090a0b, relocInfo::none, 0 );
2139 __ emit_data(0x0c0d0e0f, relocInfo::none, 0 );
2140 return start;
2141 }
2143 // Utility routine for loading a 128-bit key word in little endian format
2144 // can optionally specify that the shuffle mask is already in an xmmregister
2145 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
2146 __ movdqu(xmmdst, Address(key, offset));
2147 if (xmm_shuf_mask != NULL) {
2148 __ pshufb(xmmdst, xmm_shuf_mask);
2149 } else {
2150 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
2151 }
2152 }
2154 // aesenc using specified key+offset
2155 // can optionally specify that the shuffle mask is already in an xmmregister
2156 void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
2157 load_key(xmmtmp, key, offset, xmm_shuf_mask);
2158 __ aesenc(xmmdst, xmmtmp);
2159 }
2161 // aesdec using specified key+offset
2162 // can optionally specify that the shuffle mask is already in an xmmregister
2163 void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
2164 load_key(xmmtmp, key, offset, xmm_shuf_mask);
2165 __ aesdec(xmmdst, xmmtmp);
2166 }
2169 // Arguments:
2170 //
2171 // Inputs:
2172 // c_rarg0 - source byte array address
2173 // c_rarg1 - destination byte array address
2174 // c_rarg2 - K (key) in little endian int array
2175 //
2176 address generate_aescrypt_encryptBlock() {
2177 assert(UseAES, "need AES instructions and misaligned SSE support");
2178 __ align(CodeEntryAlignment);
2179 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
2180 Label L_doLast;
2181 address start = __ pc();
2183 const Register from = rdx; // source array address
2184 const Register to = rdx; // destination array address
2185 const Register key = rcx; // key array address
2186 const Register keylen = rax;
2187 const Address from_param(rbp, 8+0);
2188 const Address to_param (rbp, 8+4);
2189 const Address key_param (rbp, 8+8);
2191 const XMMRegister xmm_result = xmm0;
2192 const XMMRegister xmm_key_shuf_mask = xmm1;
2193 const XMMRegister xmm_temp1 = xmm2;
2194 const XMMRegister xmm_temp2 = xmm3;
2195 const XMMRegister xmm_temp3 = xmm4;
2196 const XMMRegister xmm_temp4 = xmm5;
2198 __ enter(); // required for proper stackwalking of RuntimeStub frame
2199 __ movptr(from, from_param);
2200 __ movptr(key, key_param);
2202 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
2203 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
2205 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
2206 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input
2207 __ movptr(to, to_param);
2209 // For encryption, the java expanded key ordering is just what we need
2211 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask);
2212 __ pxor(xmm_result, xmm_temp1);
2214 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
2215 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
2216 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
2217 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
2219 __ aesenc(xmm_result, xmm_temp1);
2220 __ aesenc(xmm_result, xmm_temp2);
2221 __ aesenc(xmm_result, xmm_temp3);
2222 __ aesenc(xmm_result, xmm_temp4);
2224 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
2225 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
2226 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
2227 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
2229 __ aesenc(xmm_result, xmm_temp1);
2230 __ aesenc(xmm_result, xmm_temp2);
2231 __ aesenc(xmm_result, xmm_temp3);
2232 __ aesenc(xmm_result, xmm_temp4);
2234 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
2235 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
2237 __ cmpl(keylen, 44);
2238 __ jccb(Assembler::equal, L_doLast);
2240 __ aesenc(xmm_result, xmm_temp1);
2241 __ aesenc(xmm_result, xmm_temp2);
2243 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
2244 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
2246 __ cmpl(keylen, 52);
2247 __ jccb(Assembler::equal, L_doLast);
2249 __ aesenc(xmm_result, xmm_temp1);
2250 __ aesenc(xmm_result, xmm_temp2);
2252 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
2253 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
2255 __ BIND(L_doLast);
2256 __ aesenc(xmm_result, xmm_temp1);
2257 __ aesenclast(xmm_result, xmm_temp2);
2258 __ movdqu(Address(to, 0), xmm_result); // store the result
2259 __ xorptr(rax, rax); // return 0
2260 __ leave(); // required for proper stackwalking of RuntimeStub frame
2261 __ ret(0);
2263 return start;
2264 }
2267 // Arguments:
2268 //
2269 // Inputs:
2270 // c_rarg0 - source byte array address
2271 // c_rarg1 - destination byte array address
2272 // c_rarg2 - K (key) in little endian int array
2273 //
2274 address generate_aescrypt_decryptBlock() {
2275 assert(UseAES, "need AES instructions and misaligned SSE support");
2276 __ align(CodeEntryAlignment);
2277 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
2278 Label L_doLast;
2279 address start = __ pc();
2281 const Register from = rdx; // source array address
2282 const Register to = rdx; // destination array address
2283 const Register key = rcx; // key array address
2284 const Register keylen = rax;
2285 const Address from_param(rbp, 8+0);
2286 const Address to_param (rbp, 8+4);
2287 const Address key_param (rbp, 8+8);
2289 const XMMRegister xmm_result = xmm0;
2290 const XMMRegister xmm_key_shuf_mask = xmm1;
2291 const XMMRegister xmm_temp1 = xmm2;
2292 const XMMRegister xmm_temp2 = xmm3;
2293 const XMMRegister xmm_temp3 = xmm4;
2294 const XMMRegister xmm_temp4 = xmm5;
2296 __ enter(); // required for proper stackwalking of RuntimeStub frame
2297 __ movptr(from, from_param);
2298 __ movptr(key, key_param);
2300 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
2301 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
2303 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
2304 __ movdqu(xmm_result, Address(from, 0));
2305 __ movptr(to, to_param);
2307 // for decryption java expanded key ordering is rotated one position from what we want
2308 // so we start from 0x10 here and hit 0x00 last
2309 // we don't know if the key is aligned, hence not using load-execute form
2310 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
2311 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
2312 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
2313 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
2315 __ pxor (xmm_result, xmm_temp1);
2316 __ aesdec(xmm_result, xmm_temp2);
2317 __ aesdec(xmm_result, xmm_temp3);
2318 __ aesdec(xmm_result, xmm_temp4);
2320 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
2321 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
2322 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
2323 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
2325 __ aesdec(xmm_result, xmm_temp1);
2326 __ aesdec(xmm_result, xmm_temp2);
2327 __ aesdec(xmm_result, xmm_temp3);
2328 __ aesdec(xmm_result, xmm_temp4);
2330 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
2331 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
2332 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask);
2334 __ cmpl(keylen, 44);
2335 __ jccb(Assembler::equal, L_doLast);
2337 __ aesdec(xmm_result, xmm_temp1);
2338 __ aesdec(xmm_result, xmm_temp2);
2340 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
2341 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
2343 __ cmpl(keylen, 52);
2344 __ jccb(Assembler::equal, L_doLast);
2346 __ aesdec(xmm_result, xmm_temp1);
2347 __ aesdec(xmm_result, xmm_temp2);
2349 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
2350 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
2352 __ BIND(L_doLast);
2353 __ aesdec(xmm_result, xmm_temp1);
2354 __ aesdec(xmm_result, xmm_temp2);
2356 // for decryption the aesdeclast operation is always on key+0x00
2357 __ aesdeclast(xmm_result, xmm_temp3);
2358 __ movdqu(Address(to, 0), xmm_result); // store the result
2359 __ xorptr(rax, rax); // return 0
2360 __ leave(); // required for proper stackwalking of RuntimeStub frame
2361 __ ret(0);
2363 return start;
2364 }
2366 void handleSOERegisters(bool saving) {
2367 const int saveFrameSizeInBytes = 4 * wordSize;
2368 const Address saved_rbx (rbp, -3 * wordSize);
2369 const Address saved_rsi (rbp, -2 * wordSize);
2370 const Address saved_rdi (rbp, -1 * wordSize);
2372 if (saving) {
2373 __ subptr(rsp, saveFrameSizeInBytes);
2374 __ movptr(saved_rsi, rsi);
2375 __ movptr(saved_rdi, rdi);
2376 __ movptr(saved_rbx, rbx);
2377 } else {
2378 // restoring
2379 __ movptr(rsi, saved_rsi);
2380 __ movptr(rdi, saved_rdi);
2381 __ movptr(rbx, saved_rbx);
2382 }
2383 }
2385 // Arguments:
2386 //
2387 // Inputs:
2388 // c_rarg0 - source byte array address
2389 // c_rarg1 - destination byte array address
2390 // c_rarg2 - K (key) in little endian int array
2391 // c_rarg3 - r vector byte array address
2392 // c_rarg4 - input length
2393 //
2394 address generate_cipherBlockChaining_encryptAESCrypt() {
2395 assert(UseAES, "need AES instructions and misaligned SSE support");
2396 __ align(CodeEntryAlignment);
2397 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
2398 address start = __ pc();
2400 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256;
2401 const Register from = rsi; // source array address
2402 const Register to = rdx; // destination array address
2403 const Register key = rcx; // key array address
2404 const Register rvec = rdi; // r byte array initialized from initvector array address
2405 // and left with the results of the last encryption block
2406 const Register len_reg = rbx; // src len (must be multiple of blocksize 16)
2407 const Register pos = rax;
2409 // xmm register assignments for the loops below
2410 const XMMRegister xmm_result = xmm0;
2411 const XMMRegister xmm_temp = xmm1;
2412 // first 6 keys preloaded into xmm2-xmm7
2413 const int XMM_REG_NUM_KEY_FIRST = 2;
2414 const int XMM_REG_NUM_KEY_LAST = 7;
2415 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
2417 __ enter(); // required for proper stackwalking of RuntimeStub frame
2418 handleSOERegisters(true /*saving*/);
2420 // load registers from incoming parameters
2421 const Address from_param(rbp, 8+0);
2422 const Address to_param (rbp, 8+4);
2423 const Address key_param (rbp, 8+8);
2424 const Address rvec_param (rbp, 8+12);
2425 const Address len_param (rbp, 8+16);
2426 __ movptr(from , from_param);
2427 __ movptr(to , to_param);
2428 __ movptr(key , key_param);
2429 __ movptr(rvec , rvec_param);
2430 __ movptr(len_reg , len_param);
2432 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front
2433 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
2434 // load up xmm regs 2 thru 7 with keys 0-5
2435 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
2436 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
2437 offset += 0x10;
2438 }
2440 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec
2442 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
2443 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
2444 __ cmpl(rax, 44);
2445 __ jcc(Assembler::notEqual, L_key_192_256);
2447 // 128 bit code follows here
2448 __ movl(pos, 0);
2449 __ align(OptoLoopAlignment);
2450 __ BIND(L_loopTop_128);
2451 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
2452 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
2454 __ pxor (xmm_result, xmm_key0); // do the aes rounds
2455 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
2456 __ aesenc(xmm_result, as_XMMRegister(rnum));
2457 }
2458 for (int key_offset = 0x60; key_offset <= 0x90; key_offset += 0x10) {
2459 aes_enc_key(xmm_result, xmm_temp, key, key_offset);
2460 }
2461 load_key(xmm_temp, key, 0xa0);
2462 __ aesenclast(xmm_result, xmm_temp);
2464 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
2465 // no need to store r to memory until we exit
2466 __ addptr(pos, AESBlockSize);
2467 __ subptr(len_reg, AESBlockSize);
2468 __ jcc(Assembler::notEqual, L_loopTop_128);
2470 __ BIND(L_exit);
2471 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object
2473 handleSOERegisters(false /*restoring*/);
2474 __ movl(rax, 0); // return 0 (why?)
2475 __ leave(); // required for proper stackwalking of RuntimeStub frame
2476 __ ret(0);
2478 __ BIND(L_key_192_256);
2479 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
2480 __ cmpl(rax, 52);
2481 __ jcc(Assembler::notEqual, L_key_256);
2483 // 192-bit code follows here (could be changed to use more xmm registers)
2484 __ movl(pos, 0);
2485 __ align(OptoLoopAlignment);
2486 __ BIND(L_loopTop_192);
2487 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
2488 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
2490 __ pxor (xmm_result, xmm_key0); // do the aes rounds
2491 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
2492 __ aesenc(xmm_result, as_XMMRegister(rnum));
2493 }
2494 for (int key_offset = 0x60; key_offset <= 0xb0; key_offset += 0x10) {
2495 aes_enc_key(xmm_result, xmm_temp, key, key_offset);
2496 }
2497 load_key(xmm_temp, key, 0xc0);
2498 __ aesenclast(xmm_result, xmm_temp);
2500 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
2501 // no need to store r to memory until we exit
2502 __ addptr(pos, AESBlockSize);
2503 __ subptr(len_reg, AESBlockSize);
2504 __ jcc(Assembler::notEqual, L_loopTop_192);
2505 __ jmp(L_exit);
2507 __ BIND(L_key_256);
2508 // 256-bit code follows here (could be changed to use more xmm registers)
2509 __ movl(pos, 0);
2510 __ align(OptoLoopAlignment);
2511 __ BIND(L_loopTop_256);
2512 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
2513 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
2515 __ pxor (xmm_result, xmm_key0); // do the aes rounds
2516 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
2517 __ aesenc(xmm_result, as_XMMRegister(rnum));
2518 }
2519 for (int key_offset = 0x60; key_offset <= 0xd0; key_offset += 0x10) {
2520 aes_enc_key(xmm_result, xmm_temp, key, key_offset);
2521 }
2522 load_key(xmm_temp, key, 0xe0);
2523 __ aesenclast(xmm_result, xmm_temp);
2525 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
2526 // no need to store r to memory until we exit
2527 __ addptr(pos, AESBlockSize);
2528 __ subptr(len_reg, AESBlockSize);
2529 __ jcc(Assembler::notEqual, L_loopTop_256);
2530 __ jmp(L_exit);
2532 return start;
2533 }
2536 // CBC AES Decryption.
2537 // In 32-bit stub, because of lack of registers we do not try to parallelize 4 blocks at a time.
2538 //
2539 // Arguments:
2540 //
2541 // Inputs:
2542 // c_rarg0 - source byte array address
2543 // c_rarg1 - destination byte array address
2544 // c_rarg2 - K (key) in little endian int array
2545 // c_rarg3 - r vector byte array address
2546 // c_rarg4 - input length
2547 //
2549 address generate_cipherBlockChaining_decryptAESCrypt() {
2550 assert(UseAES, "need AES instructions and misaligned SSE support");
2551 __ align(CodeEntryAlignment);
2552 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
2553 address start = __ pc();
2555 Label L_exit, L_key_192_256, L_key_256;
2556 Label L_singleBlock_loopTop_128;
2557 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256;
2558 const Register from = rsi; // source array address
2559 const Register to = rdx; // destination array address
2560 const Register key = rcx; // key array address
2561 const Register rvec = rdi; // r byte array initialized from initvector array address
2562 // and left with the results of the last encryption block
2563 const Register len_reg = rbx; // src len (must be multiple of blocksize 16)
2564 const Register pos = rax;
2566 // xmm register assignments for the loops below
2567 const XMMRegister xmm_result = xmm0;
2568 const XMMRegister xmm_temp = xmm1;
2569 // first 6 keys preloaded into xmm2-xmm7
2570 const int XMM_REG_NUM_KEY_FIRST = 2;
2571 const int XMM_REG_NUM_KEY_LAST = 7;
2572 const int FIRST_NON_REG_KEY_offset = 0x70;
2573 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
2575 __ enter(); // required for proper stackwalking of RuntimeStub frame
2576 handleSOERegisters(true /*saving*/);
2578 // load registers from incoming parameters
2579 const Address from_param(rbp, 8+0);
2580 const Address to_param (rbp, 8+4);
2581 const Address key_param (rbp, 8+8);
2582 const Address rvec_param (rbp, 8+12);
2583 const Address len_param (rbp, 8+16);
2584 __ movptr(from , from_param);
2585 __ movptr(to , to_param);
2586 __ movptr(key , key_param);
2587 __ movptr(rvec , rvec_param);
2588 __ movptr(len_reg , len_param);
2590 // the java expanded key ordering is rotated one position from what we want
2591 // so we start from 0x10 here and hit 0x00 last
2592 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front
2593 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
2594 // load up xmm regs 2 thru 6 with first 5 keys
2595 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
2596 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
2597 offset += 0x10;
2598 }
2600 // inside here, use the rvec register to point to previous block cipher
2601 // with which we xor at the end of each newly decrypted block
2602 const Register prev_block_cipher_ptr = rvec;
2604 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
2605 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
2606 __ cmpl(rax, 44);
2607 __ jcc(Assembler::notEqual, L_key_192_256);
2610 // 128-bit code follows here, parallelized
2611 __ movl(pos, 0);
2612 __ align(OptoLoopAlignment);
2613 __ BIND(L_singleBlock_loopTop_128);
2614 __ cmpptr(len_reg, 0); // any blocks left??
2615 __ jcc(Assembler::equal, L_exit);
2616 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
2617 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
2618 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
2619 __ aesdec(xmm_result, as_XMMRegister(rnum));
2620 }
2621 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xa0; key_offset += 0x10) { // 128-bit runs up to key offset a0
2622 aes_dec_key(xmm_result, xmm_temp, key, key_offset);
2623 }
2624 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0
2625 __ aesdeclast(xmm_result, xmm_temp);
2626 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
2627 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
2628 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
2629 // no need to store r to memory until we exit
2630 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr
2631 __ addptr(pos, AESBlockSize);
2632 __ subptr(len_reg, AESBlockSize);
2633 __ jmp(L_singleBlock_loopTop_128);
2636 __ BIND(L_exit);
2637 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
2638 __ movptr(rvec , rvec_param); // restore this since used in loop
2639 __ movdqu(Address(rvec, 0), xmm_temp); // final value of r stored in rvec of CipherBlockChaining object
2640 handleSOERegisters(false /*restoring*/);
2641 __ movl(rax, 0); // return 0 (why?)
2642 __ leave(); // required for proper stackwalking of RuntimeStub frame
2643 __ ret(0);
2646 __ BIND(L_key_192_256);
2647 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
2648 __ cmpl(rax, 52);
2649 __ jcc(Assembler::notEqual, L_key_256);
2651 // 192-bit code follows here (could be optimized to use parallelism)
2652 __ movl(pos, 0);
2653 __ align(OptoLoopAlignment);
2654 __ BIND(L_singleBlock_loopTop_192);
2655 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
2656 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
2657 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
2658 __ aesdec(xmm_result, as_XMMRegister(rnum));
2659 }
2660 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xc0; key_offset += 0x10) { // 192-bit runs up to key offset c0
2661 aes_dec_key(xmm_result, xmm_temp, key, key_offset);
2662 }
2663 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0
2664 __ aesdeclast(xmm_result, xmm_temp);
2665 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
2666 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
2667 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
2668 // no need to store r to memory until we exit
2669 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr
2670 __ addptr(pos, AESBlockSize);
2671 __ subptr(len_reg, AESBlockSize);
2672 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192);
2673 __ jmp(L_exit);
2675 __ BIND(L_key_256);
2676 // 256-bit code follows here (could be optimized to use parallelism)
2677 __ movl(pos, 0);
2678 __ align(OptoLoopAlignment);
2679 __ BIND(L_singleBlock_loopTop_256);
2680 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
2681 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
2682 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
2683 __ aesdec(xmm_result, as_XMMRegister(rnum));
2684 }
2685 for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xe0; key_offset += 0x10) { // 256-bit runs up to key offset e0
2686 aes_dec_key(xmm_result, xmm_temp, key, key_offset);
2687 }
2688 load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0
2689 __ aesdeclast(xmm_result, xmm_temp);
2690 __ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
2691 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
2692 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
2693 // no need to store r to memory until we exit
2694 __ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr
2695 __ addptr(pos, AESBlockSize);
2696 __ subptr(len_reg, AESBlockSize);
2697 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256);
2698 __ jmp(L_exit);
2700 return start;
2701 }
2704 public:
2705 // Information about frame layout at time of blocking runtime call.
2706 // Note that we only have to preserve callee-saved registers since
2707 // the compilers are responsible for supplying a continuation point
2708 // if they expect all registers to be preserved.
2709 enum layout {
2710 thread_off, // last_java_sp
2711 arg1_off,
2712 arg2_off,
2713 rbp_off, // callee saved register
2714 ret_pc,
2715 framesize
2716 };
2718 private:
2720 #undef __
2721 #define __ masm->
2723 //------------------------------------------------------------------------------------------------------------------------
2724 // Continuation point for throwing of implicit exceptions that are not handled in
2725 // the current activation. Fabricates an exception oop and initiates normal
2726 // exception dispatching in this frame.
2727 //
2728 // Previously the compiler (c2) allowed for callee save registers on Java calls.
2729 // This is no longer true after adapter frames were removed but could possibly
2730 // be brought back in the future if the interpreter code was reworked and it
2731 // was deemed worthwhile. The comment below was left to describe what must
2732 // happen here if callee saves were resurrected. As it stands now this stub
2733 // could actually be a vanilla BufferBlob and have now oopMap at all.
2734 // Since it doesn't make much difference we've chosen to leave it the
2735 // way it was in the callee save days and keep the comment.
2737 // If we need to preserve callee-saved values we need a callee-saved oop map and
2738 // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs.
2739 // If the compiler needs all registers to be preserved between the fault
2740 // point and the exception handler then it must assume responsibility for that in
2741 // AbstractCompiler::continuation_for_implicit_null_exception or
2742 // continuation_for_implicit_division_by_zero_exception. All other implicit
2743 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
2744 // either at call sites or otherwise assume that stack unwinding will be initiated,
2745 // so caller saved registers were assumed volatile in the compiler.
2746 address generate_throw_exception(const char* name, address runtime_entry,
2747 Register arg1 = noreg, Register arg2 = noreg) {
2749 int insts_size = 256;
2750 int locs_size = 32;
2752 CodeBuffer code(name, insts_size, locs_size);
2753 OopMapSet* oop_maps = new OopMapSet();
2754 MacroAssembler* masm = new MacroAssembler(&code);
2756 address start = __ pc();
2758 // This is an inlined and slightly modified version of call_VM
2759 // which has the ability to fetch the return PC out of
2760 // thread-local storage and also sets up last_Java_sp slightly
2761 // differently than the real call_VM
2762 Register java_thread = rbx;
2763 __ get_thread(java_thread);
2765 __ enter(); // required for proper stackwalking of RuntimeStub frame
2767 // pc and rbp, already pushed
2768 __ subptr(rsp, (framesize-2) * wordSize); // prolog
2770 // Frame is now completed as far as size and linkage.
2772 int frame_complete = __ pc() - start;
2774 // push java thread (becomes first argument of C function)
2775 __ movptr(Address(rsp, thread_off * wordSize), java_thread);
2776 if (arg1 != noreg) {
2777 __ movptr(Address(rsp, arg1_off * wordSize), arg1);
2778 }
2779 if (arg2 != noreg) {
2780 assert(arg1 != noreg, "missing reg arg");
2781 __ movptr(Address(rsp, arg2_off * wordSize), arg2);
2782 }
2784 // Set up last_Java_sp and last_Java_fp
2785 __ set_last_Java_frame(java_thread, rsp, rbp, NULL);
2787 // Call runtime
2788 BLOCK_COMMENT("call runtime_entry");
2789 __ call(RuntimeAddress(runtime_entry));
2790 // Generate oop map
2791 OopMap* map = new OopMap(framesize, 0);
2792 oop_maps->add_gc_map(__ pc() - start, map);
2794 // restore the thread (cannot use the pushed argument since arguments
2795 // may be overwritten by C code generated by an optimizing compiler);
2796 // however can use the register value directly if it is callee saved.
2797 __ get_thread(java_thread);
2799 __ reset_last_Java_frame(java_thread, true, false);
2801 __ leave(); // required for proper stackwalking of RuntimeStub frame
2803 // check for pending exceptions
2804 #ifdef ASSERT
2805 Label L;
2806 __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
2807 __ jcc(Assembler::notEqual, L);
2808 __ should_not_reach_here();
2809 __ bind(L);
2810 #endif /* ASSERT */
2811 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2814 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false);
2815 return stub->entry_point();
2816 }
2819 void create_control_words() {
2820 // Round to nearest, 53-bit mode, exceptions masked
2821 StubRoutines::_fpu_cntrl_wrd_std = 0x027F;
2822 // Round to zero, 53-bit mode, exception mased
2823 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F;
2824 // Round to nearest, 24-bit mode, exceptions masked
2825 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F;
2826 // Round to nearest, 64-bit mode, exceptions masked
2827 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F;
2828 // Round to nearest, 64-bit mode, exceptions masked
2829 StubRoutines::_mxcsr_std = 0x1F80;
2830 // Note: the following two constants are 80-bit values
2831 // layout is critical for correct loading by FPU.
2832 // Bias for strict fp multiply/divide
2833 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000
2834 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000;
2835 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff;
2836 // Un-Bias for strict fp multiply/divide
2837 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000
2838 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000;
2839 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff;
2840 }
2842 //---------------------------------------------------------------------------
2843 // Initialization
2845 void generate_initial() {
2846 // Generates all stubs and initializes the entry points
2848 //------------------------------------------------------------------------------------------------------------------------
2849 // entry points that exist in all platforms
2850 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
2851 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
2852 StubRoutines::_forward_exception_entry = generate_forward_exception();
2854 StubRoutines::_call_stub_entry =
2855 generate_call_stub(StubRoutines::_call_stub_return_address);
2856 // is referenced by megamorphic call
2857 StubRoutines::_catch_exception_entry = generate_catch_exception();
2859 // These are currently used by Solaris/Intel
2860 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
2862 StubRoutines::_handler_for_unsafe_access_entry =
2863 generate_handler_for_unsafe_access();
2865 // platform dependent
2866 create_control_words();
2868 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
2869 StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd();
2870 StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT,
2871 CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
2872 StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG,
2873 CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
2875 // Build this early so it's available for the interpreter
2876 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
2877 }
2880 void generate_all() {
2881 // Generates all stubs and initializes the entry points
2883 // These entry points require SharedInfo::stack0 to be set up in non-core builds
2884 // and need to be relocatable, so they each fabricate a RuntimeStub internally.
2885 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
2886 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
2887 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
2889 //------------------------------------------------------------------------------------------------------------------------
2890 // entry points that are platform specific
2892 // support for verify_oop (must happen after universe_init)
2893 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
2895 // arraycopy stubs used by compilers
2896 generate_arraycopy_stubs();
2898 generate_math_stubs();
2900 // don't bother generating these AES intrinsic stubs unless global flag is set
2901 if (UseAESIntrinsics) {
2902 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // might be needed by the others
2904 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
2905 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
2906 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
2907 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
2908 }
2909 }
2912 public:
2913 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2914 if (all) {
2915 generate_all();
2916 } else {
2917 generate_initial();
2918 }
2919 }
2920 }; // end class declaration
2923 void StubGenerator_generate(CodeBuffer* code, bool all) {
2924 StubGenerator g(code, all);
2925 }