Wed, 21 Jan 2015 12:38:11 +0100
8068013: [TESTBUG] Aix support in hotspot jtreg tests
Reviewed-by: ctornqvi, fzhinkin, farvidsson
1 /*
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "nativeInst_x86.hpp"
30 #include "oops/instanceOop.hpp"
31 #include "oops/method.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/frame.inline.hpp"
36 #include "runtime/handles.inline.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubCodeGenerator.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "runtime/thread.inline.hpp"
41 #include "utilities/top.hpp"
42 #ifdef COMPILER2
43 #include "opto/runtime.hpp"
44 #endif
46 // Declaration and definition of StubGenerator (no .hpp file).
47 // For a more detailed description of the stub routine structure
48 // see the comment in stubRoutines.hpp
50 #define __ _masm->
51 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
52 #define a__ ((Assembler*)_masm)->
54 #ifdef PRODUCT
55 #define BLOCK_COMMENT(str) /* nothing */
56 #else
57 #define BLOCK_COMMENT(str) __ block_comment(str)
58 #endif
60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
63 // Stub Code definitions
65 static address handle_unsafe_access() {
66 JavaThread* thread = JavaThread::current();
67 address pc = thread->saved_exception_pc();
68 // pc is the instruction which we must emulate
69 // doing a no-op is fine: return garbage from the load
70 // therefore, compute npc
71 address npc = Assembler::locate_next_instruction(pc);
73 // request an async exception
74 thread->set_pending_unsafe_access_error();
76 // return address of next instruction to execute
77 return npc;
78 }
80 class StubGenerator: public StubCodeGenerator {
81 private:
83 #ifdef PRODUCT
84 #define inc_counter_np(counter) ((void)0)
85 #else
86 void inc_counter_np_(int& counter) {
87 // This can destroy rscratch1 if counter is far from the code cache
88 __ incrementl(ExternalAddress((address)&counter));
89 }
90 #define inc_counter_np(counter) \
91 BLOCK_COMMENT("inc_counter " #counter); \
92 inc_counter_np_(counter);
93 #endif
95 // Call stubs are used to call Java from C
96 //
97 // Linux Arguments:
98 // c_rarg0: call wrapper address address
99 // c_rarg1: result address
100 // c_rarg2: result type BasicType
101 // c_rarg3: method Method*
102 // c_rarg4: (interpreter) entry point address
103 // c_rarg5: parameters intptr_t*
104 // 16(rbp): parameter size (in words) int
105 // 24(rbp): thread Thread*
106 //
107 // [ return_from_Java ] <--- rsp
108 // [ argument word n ]
109 // ...
110 // -12 [ argument word 1 ]
111 // -11 [ saved r15 ] <--- rsp_after_call
112 // -10 [ saved r14 ]
113 // -9 [ saved r13 ]
114 // -8 [ saved r12 ]
115 // -7 [ saved rbx ]
116 // -6 [ call wrapper ]
117 // -5 [ result ]
118 // -4 [ result type ]
119 // -3 [ method ]
120 // -2 [ entry point ]
121 // -1 [ parameters ]
122 // 0 [ saved rbp ] <--- rbp
123 // 1 [ return address ]
124 // 2 [ parameter size ]
125 // 3 [ thread ]
126 //
127 // Windows Arguments:
128 // c_rarg0: call wrapper address address
129 // c_rarg1: result address
130 // c_rarg2: result type BasicType
131 // c_rarg3: method Method*
132 // 48(rbp): (interpreter) entry point address
133 // 56(rbp): parameters intptr_t*
134 // 64(rbp): parameter size (in words) int
135 // 72(rbp): thread Thread*
136 //
137 // [ return_from_Java ] <--- rsp
138 // [ argument word n ]
139 // ...
140 // -28 [ argument word 1 ]
141 // -27 [ saved xmm15 ] <--- rsp_after_call
142 // [ saved xmm7-xmm14 ]
143 // -9 [ saved xmm6 ] (each xmm register takes 2 slots)
144 // -7 [ saved r15 ]
145 // -6 [ saved r14 ]
146 // -5 [ saved r13 ]
147 // -4 [ saved r12 ]
148 // -3 [ saved rdi ]
149 // -2 [ saved rsi ]
150 // -1 [ saved rbx ]
151 // 0 [ saved rbp ] <--- rbp
152 // 1 [ return address ]
153 // 2 [ call wrapper ]
154 // 3 [ result ]
155 // 4 [ result type ]
156 // 5 [ method ]
157 // 6 [ entry point ]
158 // 7 [ parameters ]
159 // 8 [ parameter size ]
160 // 9 [ thread ]
161 //
162 // Windows reserves the callers stack space for arguments 1-4.
163 // We spill c_rarg0-c_rarg3 to this space.
165 // Call stub stack layout word offsets from rbp
166 enum call_stub_layout {
167 #ifdef _WIN64
168 xmm_save_first = 6, // save from xmm6
169 xmm_save_last = 15, // to xmm15
170 xmm_save_base = -9,
171 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27
172 r15_off = -7,
173 r14_off = -6,
174 r13_off = -5,
175 r12_off = -4,
176 rdi_off = -3,
177 rsi_off = -2,
178 rbx_off = -1,
179 rbp_off = 0,
180 retaddr_off = 1,
181 call_wrapper_off = 2,
182 result_off = 3,
183 result_type_off = 4,
184 method_off = 5,
185 entry_point_off = 6,
186 parameters_off = 7,
187 parameter_size_off = 8,
188 thread_off = 9
189 #else
190 rsp_after_call_off = -12,
191 mxcsr_off = rsp_after_call_off,
192 r15_off = -11,
193 r14_off = -10,
194 r13_off = -9,
195 r12_off = -8,
196 rbx_off = -7,
197 call_wrapper_off = -6,
198 result_off = -5,
199 result_type_off = -4,
200 method_off = -3,
201 entry_point_off = -2,
202 parameters_off = -1,
203 rbp_off = 0,
204 retaddr_off = 1,
205 parameter_size_off = 2,
206 thread_off = 3
207 #endif
208 };
210 #ifdef _WIN64
211 Address xmm_save(int reg) {
212 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range");
213 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize);
214 }
215 #endif
217 address generate_call_stub(address& return_address) {
218 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
219 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
220 "adjust this code");
221 StubCodeMark mark(this, "StubRoutines", "call_stub");
222 address start = __ pc();
224 // same as in generate_catch_exception()!
225 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
227 const Address call_wrapper (rbp, call_wrapper_off * wordSize);
228 const Address result (rbp, result_off * wordSize);
229 const Address result_type (rbp, result_type_off * wordSize);
230 const Address method (rbp, method_off * wordSize);
231 const Address entry_point (rbp, entry_point_off * wordSize);
232 const Address parameters (rbp, parameters_off * wordSize);
233 const Address parameter_size(rbp, parameter_size_off * wordSize);
235 // same as in generate_catch_exception()!
236 const Address thread (rbp, thread_off * wordSize);
238 const Address r15_save(rbp, r15_off * wordSize);
239 const Address r14_save(rbp, r14_off * wordSize);
240 const Address r13_save(rbp, r13_off * wordSize);
241 const Address r12_save(rbp, r12_off * wordSize);
242 const Address rbx_save(rbp, rbx_off * wordSize);
244 // stub code
245 __ enter();
246 __ subptr(rsp, -rsp_after_call_off * wordSize);
248 // save register parameters
249 #ifndef _WIN64
250 __ movptr(parameters, c_rarg5); // parameters
251 __ movptr(entry_point, c_rarg4); // entry_point
252 #endif
254 __ movptr(method, c_rarg3); // method
255 __ movl(result_type, c_rarg2); // result type
256 __ movptr(result, c_rarg1); // result
257 __ movptr(call_wrapper, c_rarg0); // call wrapper
259 // save regs belonging to calling function
260 __ movptr(rbx_save, rbx);
261 __ movptr(r12_save, r12);
262 __ movptr(r13_save, r13);
263 __ movptr(r14_save, r14);
264 __ movptr(r15_save, r15);
265 #ifdef _WIN64
266 for (int i = 6; i <= 15; i++) {
267 __ movdqu(xmm_save(i), as_XMMRegister(i));
268 }
270 const Address rdi_save(rbp, rdi_off * wordSize);
271 const Address rsi_save(rbp, rsi_off * wordSize);
273 __ movptr(rsi_save, rsi);
274 __ movptr(rdi_save, rdi);
275 #else
276 const Address mxcsr_save(rbp, mxcsr_off * wordSize);
277 {
278 Label skip_ldmx;
279 __ stmxcsr(mxcsr_save);
280 __ movl(rax, mxcsr_save);
281 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
282 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
283 __ cmp32(rax, mxcsr_std);
284 __ jcc(Assembler::equal, skip_ldmx);
285 __ ldmxcsr(mxcsr_std);
286 __ bind(skip_ldmx);
287 }
288 #endif
290 // Load up thread register
291 __ movptr(r15_thread, thread);
292 __ reinit_heapbase();
294 #ifdef ASSERT
295 // make sure we have no pending exceptions
296 {
297 Label L;
298 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
299 __ jcc(Assembler::equal, L);
300 __ stop("StubRoutines::call_stub: entered with pending exception");
301 __ bind(L);
302 }
303 #endif
305 // pass parameters if any
306 BLOCK_COMMENT("pass parameters if any");
307 Label parameters_done;
308 __ movl(c_rarg3, parameter_size);
309 __ testl(c_rarg3, c_rarg3);
310 __ jcc(Assembler::zero, parameters_done);
312 Label loop;
313 __ movptr(c_rarg2, parameters); // parameter pointer
314 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
315 __ BIND(loop);
316 __ movptr(rax, Address(c_rarg2, 0));// get parameter
317 __ addptr(c_rarg2, wordSize); // advance to next parameter
318 __ decrementl(c_rarg1); // decrement counter
319 __ push(rax); // pass parameter
320 __ jcc(Assembler::notZero, loop);
322 // call Java function
323 __ BIND(parameters_done);
324 __ movptr(rbx, method); // get Method*
325 __ movptr(c_rarg1, entry_point); // get entry_point
326 __ mov(r13, rsp); // set sender sp
327 BLOCK_COMMENT("call Java function");
328 __ call(c_rarg1);
330 BLOCK_COMMENT("call_stub_return_address:");
331 return_address = __ pc();
333 // store result depending on type (everything that is not
334 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
335 __ movptr(c_rarg0, result);
336 Label is_long, is_float, is_double, exit;
337 __ movl(c_rarg1, result_type);
338 __ cmpl(c_rarg1, T_OBJECT);
339 __ jcc(Assembler::equal, is_long);
340 __ cmpl(c_rarg1, T_LONG);
341 __ jcc(Assembler::equal, is_long);
342 __ cmpl(c_rarg1, T_FLOAT);
343 __ jcc(Assembler::equal, is_float);
344 __ cmpl(c_rarg1, T_DOUBLE);
345 __ jcc(Assembler::equal, is_double);
347 // handle T_INT case
348 __ movl(Address(c_rarg0, 0), rax);
350 __ BIND(exit);
352 // pop parameters
353 __ lea(rsp, rsp_after_call);
355 #ifdef ASSERT
356 // verify that threads correspond
357 {
358 Label L, S;
359 __ cmpptr(r15_thread, thread);
360 __ jcc(Assembler::notEqual, S);
361 __ get_thread(rbx);
362 __ cmpptr(r15_thread, rbx);
363 __ jcc(Assembler::equal, L);
364 __ bind(S);
365 __ jcc(Assembler::equal, L);
366 __ stop("StubRoutines::call_stub: threads must correspond");
367 __ bind(L);
368 }
369 #endif
371 // restore regs belonging to calling function
372 #ifdef _WIN64
373 for (int i = 15; i >= 6; i--) {
374 __ movdqu(as_XMMRegister(i), xmm_save(i));
375 }
376 #endif
377 __ movptr(r15, r15_save);
378 __ movptr(r14, r14_save);
379 __ movptr(r13, r13_save);
380 __ movptr(r12, r12_save);
381 __ movptr(rbx, rbx_save);
383 #ifdef _WIN64
384 __ movptr(rdi, rdi_save);
385 __ movptr(rsi, rsi_save);
386 #else
387 __ ldmxcsr(mxcsr_save);
388 #endif
390 // restore rsp
391 __ addptr(rsp, -rsp_after_call_off * wordSize);
393 // return
394 __ pop(rbp);
395 __ ret(0);
397 // handle return types different from T_INT
398 __ BIND(is_long);
399 __ movq(Address(c_rarg0, 0), rax);
400 __ jmp(exit);
402 __ BIND(is_float);
403 __ movflt(Address(c_rarg0, 0), xmm0);
404 __ jmp(exit);
406 __ BIND(is_double);
407 __ movdbl(Address(c_rarg0, 0), xmm0);
408 __ jmp(exit);
410 return start;
411 }
413 // Return point for a Java call if there's an exception thrown in
414 // Java code. The exception is caught and transformed into a
415 // pending exception stored in JavaThread that can be tested from
416 // within the VM.
417 //
418 // Note: Usually the parameters are removed by the callee. In case
419 // of an exception crossing an activation frame boundary, that is
420 // not the case if the callee is compiled code => need to setup the
421 // rsp.
422 //
423 // rax: exception oop
425 address generate_catch_exception() {
426 StubCodeMark mark(this, "StubRoutines", "catch_exception");
427 address start = __ pc();
429 // same as in generate_call_stub():
430 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
431 const Address thread (rbp, thread_off * wordSize);
433 #ifdef ASSERT
434 // verify that threads correspond
435 {
436 Label L, S;
437 __ cmpptr(r15_thread, thread);
438 __ jcc(Assembler::notEqual, S);
439 __ get_thread(rbx);
440 __ cmpptr(r15_thread, rbx);
441 __ jcc(Assembler::equal, L);
442 __ bind(S);
443 __ stop("StubRoutines::catch_exception: threads must correspond");
444 __ bind(L);
445 }
446 #endif
448 // set pending exception
449 __ verify_oop(rax);
451 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
452 __ lea(rscratch1, ExternalAddress((address)__FILE__));
453 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
454 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
456 // complete return to VM
457 assert(StubRoutines::_call_stub_return_address != NULL,
458 "_call_stub_return_address must have been generated before");
459 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
461 return start;
462 }
464 // Continuation point for runtime calls returning with a pending
465 // exception. The pending exception check happened in the runtime
466 // or native call stub. The pending exception in Thread is
467 // converted into a Java-level exception.
468 //
469 // Contract with Java-level exception handlers:
470 // rax: exception
471 // rdx: throwing pc
472 //
473 // NOTE: At entry of this stub, exception-pc must be on stack !!
475 address generate_forward_exception() {
476 StubCodeMark mark(this, "StubRoutines", "forward exception");
477 address start = __ pc();
479 // Upon entry, the sp points to the return address returning into
480 // Java (interpreted or compiled) code; i.e., the return address
481 // becomes the throwing pc.
482 //
483 // Arguments pushed before the runtime call are still on the stack
484 // but the exception handler will reset the stack pointer ->
485 // ignore them. A potential result in registers can be ignored as
486 // well.
488 #ifdef ASSERT
489 // make sure this code is only executed if there is a pending exception
490 {
491 Label L;
492 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL);
493 __ jcc(Assembler::notEqual, L);
494 __ stop("StubRoutines::forward exception: no pending exception (1)");
495 __ bind(L);
496 }
497 #endif
499 // compute exception handler into rbx
500 __ movptr(c_rarg0, Address(rsp, 0));
501 BLOCK_COMMENT("call exception_handler_for_return_address");
502 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
503 SharedRuntime::exception_handler_for_return_address),
504 r15_thread, c_rarg0);
505 __ mov(rbx, rax);
507 // setup rax & rdx, remove return address & clear pending exception
508 __ pop(rdx);
509 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
510 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
512 #ifdef ASSERT
513 // make sure exception is set
514 {
515 Label L;
516 __ testptr(rax, rax);
517 __ jcc(Assembler::notEqual, L);
518 __ stop("StubRoutines::forward exception: no pending exception (2)");
519 __ bind(L);
520 }
521 #endif
523 // continue at exception handler (return address removed)
524 // rax: exception
525 // rbx: exception handler
526 // rdx: throwing pc
527 __ verify_oop(rax);
528 __ jmp(rbx);
530 return start;
531 }
533 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
534 //
535 // Arguments :
536 // c_rarg0: exchange_value
537 // c_rarg0: dest
538 //
539 // Result:
540 // *dest <- ex, return (orig *dest)
541 address generate_atomic_xchg() {
542 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
543 address start = __ pc();
545 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
546 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
547 __ ret(0);
549 return start;
550 }
552 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
553 //
554 // Arguments :
555 // c_rarg0: exchange_value
556 // c_rarg1: dest
557 //
558 // Result:
559 // *dest <- ex, return (orig *dest)
560 address generate_atomic_xchg_ptr() {
561 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
562 address start = __ pc();
564 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
565 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
566 __ ret(0);
568 return start;
569 }
571 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
572 // jint compare_value)
573 //
574 // Arguments :
575 // c_rarg0: exchange_value
576 // c_rarg1: dest
577 // c_rarg2: compare_value
578 //
579 // Result:
580 // if ( compare_value == *dest ) {
581 // *dest = exchange_value
582 // return compare_value;
583 // else
584 // return *dest;
585 address generate_atomic_cmpxchg() {
586 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
587 address start = __ pc();
589 __ movl(rax, c_rarg2);
590 if ( os::is_MP() ) __ lock();
591 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
592 __ ret(0);
594 return start;
595 }
597 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
598 // volatile jlong* dest,
599 // jlong compare_value)
600 // Arguments :
601 // c_rarg0: exchange_value
602 // c_rarg1: dest
603 // c_rarg2: compare_value
604 //
605 // Result:
606 // if ( compare_value == *dest ) {
607 // *dest = exchange_value
608 // return compare_value;
609 // else
610 // return *dest;
611 address generate_atomic_cmpxchg_long() {
612 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
613 address start = __ pc();
615 __ movq(rax, c_rarg2);
616 if ( os::is_MP() ) __ lock();
617 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
618 __ ret(0);
620 return start;
621 }
623 // Support for jint atomic::add(jint add_value, volatile jint* dest)
624 //
625 // Arguments :
626 // c_rarg0: add_value
627 // c_rarg1: dest
628 //
629 // Result:
630 // *dest += add_value
631 // return *dest;
632 address generate_atomic_add() {
633 StubCodeMark mark(this, "StubRoutines", "atomic_add");
634 address start = __ pc();
636 __ movl(rax, c_rarg0);
637 if ( os::is_MP() ) __ lock();
638 __ xaddl(Address(c_rarg1, 0), c_rarg0);
639 __ addl(rax, c_rarg0);
640 __ ret(0);
642 return start;
643 }
645 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
646 //
647 // Arguments :
648 // c_rarg0: add_value
649 // c_rarg1: dest
650 //
651 // Result:
652 // *dest += add_value
653 // return *dest;
654 address generate_atomic_add_ptr() {
655 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
656 address start = __ pc();
658 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
659 if ( os::is_MP() ) __ lock();
660 __ xaddptr(Address(c_rarg1, 0), c_rarg0);
661 __ addptr(rax, c_rarg0);
662 __ ret(0);
664 return start;
665 }
667 // Support for intptr_t OrderAccess::fence()
668 //
669 // Arguments :
670 //
671 // Result:
672 address generate_orderaccess_fence() {
673 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
674 address start = __ pc();
675 __ membar(Assembler::StoreLoad);
676 __ ret(0);
678 return start;
679 }
681 // Support for intptr_t get_previous_fp()
682 //
683 // This routine is used to find the previous frame pointer for the
684 // caller (current_frame_guess). This is used as part of debugging
685 // ps() is seemingly lost trying to find frames.
686 // This code assumes that caller current_frame_guess) has a frame.
687 address generate_get_previous_fp() {
688 StubCodeMark mark(this, "StubRoutines", "get_previous_fp");
689 const Address old_fp(rbp, 0);
690 const Address older_fp(rax, 0);
691 address start = __ pc();
693 __ enter();
694 __ movptr(rax, old_fp); // callers fp
695 __ movptr(rax, older_fp); // the frame for ps()
696 __ pop(rbp);
697 __ ret(0);
699 return start;
700 }
702 // Support for intptr_t get_previous_sp()
703 //
704 // This routine is used to find the previous stack pointer for the
705 // caller.
706 address generate_get_previous_sp() {
707 StubCodeMark mark(this, "StubRoutines", "get_previous_sp");
708 address start = __ pc();
710 __ movptr(rax, rsp);
711 __ addptr(rax, 8); // return address is at the top of the stack.
712 __ ret(0);
714 return start;
715 }
717 //----------------------------------------------------------------------------------------------------
718 // Support for void verify_mxcsr()
719 //
720 // This routine is used with -Xcheck:jni to verify that native
721 // JNI code does not return to Java code without restoring the
722 // MXCSR register to our expected state.
724 address generate_verify_mxcsr() {
725 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
726 address start = __ pc();
728 const Address mxcsr_save(rsp, 0);
730 if (CheckJNICalls) {
731 Label ok_ret;
732 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
733 __ push(rax);
734 __ subptr(rsp, wordSize); // allocate a temp location
735 __ stmxcsr(mxcsr_save);
736 __ movl(rax, mxcsr_save);
737 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
738 __ cmp32(rax, mxcsr_std);
739 __ jcc(Assembler::equal, ok_ret);
741 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
743 __ ldmxcsr(mxcsr_std);
745 __ bind(ok_ret);
746 __ addptr(rsp, wordSize);
747 __ pop(rax);
748 }
750 __ ret(0);
752 return start;
753 }
755 address generate_f2i_fixup() {
756 StubCodeMark mark(this, "StubRoutines", "f2i_fixup");
757 Address inout(rsp, 5 * wordSize); // return address + 4 saves
759 address start = __ pc();
761 Label L;
763 __ push(rax);
764 __ push(c_rarg3);
765 __ push(c_rarg2);
766 __ push(c_rarg1);
768 __ movl(rax, 0x7f800000);
769 __ xorl(c_rarg3, c_rarg3);
770 __ movl(c_rarg2, inout);
771 __ movl(c_rarg1, c_rarg2);
772 __ andl(c_rarg1, 0x7fffffff);
773 __ cmpl(rax, c_rarg1); // NaN? -> 0
774 __ jcc(Assembler::negative, L);
775 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
776 __ movl(c_rarg3, 0x80000000);
777 __ movl(rax, 0x7fffffff);
778 __ cmovl(Assembler::positive, c_rarg3, rax);
780 __ bind(L);
781 __ movptr(inout, c_rarg3);
783 __ pop(c_rarg1);
784 __ pop(c_rarg2);
785 __ pop(c_rarg3);
786 __ pop(rax);
788 __ ret(0);
790 return start;
791 }
793 address generate_f2l_fixup() {
794 StubCodeMark mark(this, "StubRoutines", "f2l_fixup");
795 Address inout(rsp, 5 * wordSize); // return address + 4 saves
796 address start = __ pc();
798 Label L;
800 __ push(rax);
801 __ push(c_rarg3);
802 __ push(c_rarg2);
803 __ push(c_rarg1);
805 __ movl(rax, 0x7f800000);
806 __ xorl(c_rarg3, c_rarg3);
807 __ movl(c_rarg2, inout);
808 __ movl(c_rarg1, c_rarg2);
809 __ andl(c_rarg1, 0x7fffffff);
810 __ cmpl(rax, c_rarg1); // NaN? -> 0
811 __ jcc(Assembler::negative, L);
812 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
813 __ mov64(c_rarg3, 0x8000000000000000);
814 __ mov64(rax, 0x7fffffffffffffff);
815 __ cmov(Assembler::positive, c_rarg3, rax);
817 __ bind(L);
818 __ movptr(inout, c_rarg3);
820 __ pop(c_rarg1);
821 __ pop(c_rarg2);
822 __ pop(c_rarg3);
823 __ pop(rax);
825 __ ret(0);
827 return start;
828 }
830 address generate_d2i_fixup() {
831 StubCodeMark mark(this, "StubRoutines", "d2i_fixup");
832 Address inout(rsp, 6 * wordSize); // return address + 5 saves
834 address start = __ pc();
836 Label L;
838 __ push(rax);
839 __ push(c_rarg3);
840 __ push(c_rarg2);
841 __ push(c_rarg1);
842 __ push(c_rarg0);
844 __ movl(rax, 0x7ff00000);
845 __ movq(c_rarg2, inout);
846 __ movl(c_rarg3, c_rarg2);
847 __ mov(c_rarg1, c_rarg2);
848 __ mov(c_rarg0, c_rarg2);
849 __ negl(c_rarg3);
850 __ shrptr(c_rarg1, 0x20);
851 __ orl(c_rarg3, c_rarg2);
852 __ andl(c_rarg1, 0x7fffffff);
853 __ xorl(c_rarg2, c_rarg2);
854 __ shrl(c_rarg3, 0x1f);
855 __ orl(c_rarg1, c_rarg3);
856 __ cmpl(rax, c_rarg1);
857 __ jcc(Assembler::negative, L); // NaN -> 0
858 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
859 __ movl(c_rarg2, 0x80000000);
860 __ movl(rax, 0x7fffffff);
861 __ cmov(Assembler::positive, c_rarg2, rax);
863 __ bind(L);
864 __ movptr(inout, c_rarg2);
866 __ pop(c_rarg0);
867 __ pop(c_rarg1);
868 __ pop(c_rarg2);
869 __ pop(c_rarg3);
870 __ pop(rax);
872 __ ret(0);
874 return start;
875 }
877 address generate_d2l_fixup() {
878 StubCodeMark mark(this, "StubRoutines", "d2l_fixup");
879 Address inout(rsp, 6 * wordSize); // return address + 5 saves
881 address start = __ pc();
883 Label L;
885 __ push(rax);
886 __ push(c_rarg3);
887 __ push(c_rarg2);
888 __ push(c_rarg1);
889 __ push(c_rarg0);
891 __ movl(rax, 0x7ff00000);
892 __ movq(c_rarg2, inout);
893 __ movl(c_rarg3, c_rarg2);
894 __ mov(c_rarg1, c_rarg2);
895 __ mov(c_rarg0, c_rarg2);
896 __ negl(c_rarg3);
897 __ shrptr(c_rarg1, 0x20);
898 __ orl(c_rarg3, c_rarg2);
899 __ andl(c_rarg1, 0x7fffffff);
900 __ xorl(c_rarg2, c_rarg2);
901 __ shrl(c_rarg3, 0x1f);
902 __ orl(c_rarg1, c_rarg3);
903 __ cmpl(rax, c_rarg1);
904 __ jcc(Assembler::negative, L); // NaN -> 0
905 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
906 __ mov64(c_rarg2, 0x8000000000000000);
907 __ mov64(rax, 0x7fffffffffffffff);
908 __ cmovq(Assembler::positive, c_rarg2, rax);
910 __ bind(L);
911 __ movq(inout, c_rarg2);
913 __ pop(c_rarg0);
914 __ pop(c_rarg1);
915 __ pop(c_rarg2);
916 __ pop(c_rarg3);
917 __ pop(rax);
919 __ ret(0);
921 return start;
922 }
924 address generate_fp_mask(const char *stub_name, int64_t mask) {
925 __ align(CodeEntryAlignment);
926 StubCodeMark mark(this, "StubRoutines", stub_name);
927 address start = __ pc();
929 __ emit_data64( mask, relocInfo::none );
930 __ emit_data64( mask, relocInfo::none );
932 return start;
933 }
935 // The following routine generates a subroutine to throw an
936 // asynchronous UnknownError when an unsafe access gets a fault that
937 // could not be reasonably prevented by the programmer. (Example:
938 // SIGBUS/OBJERR.)
939 address generate_handler_for_unsafe_access() {
940 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
941 address start = __ pc();
943 __ push(0); // hole for return address-to-be
944 __ pusha(); // push registers
945 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
947 // FIXME: this probably needs alignment logic
949 __ subptr(rsp, frame::arg_reg_save_area_bytes);
950 BLOCK_COMMENT("call handle_unsafe_access");
951 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
952 __ addptr(rsp, frame::arg_reg_save_area_bytes);
954 __ movptr(next_pc, rax); // stuff next address
955 __ popa();
956 __ ret(0); // jump to next address
958 return start;
959 }
961 // Non-destructive plausibility checks for oops
962 //
963 // Arguments:
964 // all args on stack!
965 //
966 // Stack after saving c_rarg3:
967 // [tos + 0]: saved c_rarg3
968 // [tos + 1]: saved c_rarg2
969 // [tos + 2]: saved r12 (several TemplateTable methods use it)
970 // [tos + 3]: saved flags
971 // [tos + 4]: return address
972 // * [tos + 5]: error message (char*)
973 // * [tos + 6]: object to verify (oop)
974 // * [tos + 7]: saved rax - saved by caller and bashed
975 // * [tos + 8]: saved r10 (rscratch1) - saved by caller
976 // * = popped on exit
977 address generate_verify_oop() {
978 StubCodeMark mark(this, "StubRoutines", "verify_oop");
979 address start = __ pc();
981 Label exit, error;
983 __ pushf();
984 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
986 __ push(r12);
988 // save c_rarg2 and c_rarg3
989 __ push(c_rarg2);
990 __ push(c_rarg3);
992 enum {
993 // After previous pushes.
994 oop_to_verify = 6 * wordSize,
995 saved_rax = 7 * wordSize,
996 saved_r10 = 8 * wordSize,
998 // Before the call to MacroAssembler::debug(), see below.
999 return_addr = 16 * wordSize,
1000 error_msg = 17 * wordSize
1001 };
1003 // get object
1004 __ movptr(rax, Address(rsp, oop_to_verify));
1006 // make sure object is 'reasonable'
1007 __ testptr(rax, rax);
1008 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
1009 // Check if the oop is in the right area of memory
1010 __ movptr(c_rarg2, rax);
1011 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
1012 __ andptr(c_rarg2, c_rarg3);
1013 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits());
1014 __ cmpptr(c_rarg2, c_rarg3);
1015 __ jcc(Assembler::notZero, error);
1017 // set r12 to heapbase for load_klass()
1018 __ reinit_heapbase();
1020 // make sure klass is 'reasonable', which is not zero.
1021 __ load_klass(rax, rax); // get klass
1022 __ testptr(rax, rax);
1023 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
1025 // return if everything seems ok
1026 __ bind(exit);
1027 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1028 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1029 __ pop(c_rarg3); // restore c_rarg3
1030 __ pop(c_rarg2); // restore c_rarg2
1031 __ pop(r12); // restore r12
1032 __ popf(); // restore flags
1033 __ ret(4 * wordSize); // pop caller saved stuff
1035 // handle errors
1036 __ bind(error);
1037 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1038 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1039 __ pop(c_rarg3); // get saved c_rarg3 back
1040 __ pop(c_rarg2); // get saved c_rarg2 back
1041 __ pop(r12); // get saved r12 back
1042 __ popf(); // get saved flags off stack --
1043 // will be ignored
1045 __ pusha(); // push registers
1046 // (rip is already
1047 // already pushed)
1048 // debug(char* msg, int64_t pc, int64_t regs[])
1049 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
1050 // pushed all the registers, so now the stack looks like:
1051 // [tos + 0] 16 saved registers
1052 // [tos + 16] return address
1053 // * [tos + 17] error message (char*)
1054 // * [tos + 18] object to verify (oop)
1055 // * [tos + 19] saved rax - saved by caller and bashed
1056 // * [tos + 20] saved r10 (rscratch1) - saved by caller
1057 // * = popped on exit
1059 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
1060 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address
1061 __ movq(c_rarg2, rsp); // pass address of regs on stack
1062 __ mov(r12, rsp); // remember rsp
1063 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1064 __ andptr(rsp, -16); // align stack as required by ABI
1065 BLOCK_COMMENT("call MacroAssembler::debug");
1066 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1067 __ mov(rsp, r12); // restore rsp
1068 __ popa(); // pop registers (includes r12)
1069 __ ret(4 * wordSize); // pop caller saved stuff
1071 return start;
1072 }
1074 //
1075 // Verify that a register contains clean 32-bits positive value
1076 // (high 32-bits are 0) so it could be used in 64-bits shifts.
1077 //
1078 // Input:
1079 // Rint - 32-bits value
1080 // Rtmp - scratch
1081 //
1082 void assert_clean_int(Register Rint, Register Rtmp) {
1083 #ifdef ASSERT
1084 Label L;
1085 assert_different_registers(Rtmp, Rint);
1086 __ movslq(Rtmp, Rint);
1087 __ cmpq(Rtmp, Rint);
1088 __ jcc(Assembler::equal, L);
1089 __ stop("high 32-bits of int value are not 0");
1090 __ bind(L);
1091 #endif
1092 }
1094 // Generate overlap test for array copy stubs
1095 //
1096 // Input:
1097 // c_rarg0 - from
1098 // c_rarg1 - to
1099 // c_rarg2 - element count
1100 //
1101 // Output:
1102 // rax - &from[element count - 1]
1103 //
1104 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
1105 assert(no_overlap_target != NULL, "must be generated");
1106 array_overlap_test(no_overlap_target, NULL, sf);
1107 }
1108 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
1109 array_overlap_test(NULL, &L_no_overlap, sf);
1110 }
1111 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) {
1112 const Register from = c_rarg0;
1113 const Register to = c_rarg1;
1114 const Register count = c_rarg2;
1115 const Register end_from = rax;
1117 __ cmpptr(to, from);
1118 __ lea(end_from, Address(from, count, sf, 0));
1119 if (NOLp == NULL) {
1120 ExternalAddress no_overlap(no_overlap_target);
1121 __ jump_cc(Assembler::belowEqual, no_overlap);
1122 __ cmpptr(to, end_from);
1123 __ jump_cc(Assembler::aboveEqual, no_overlap);
1124 } else {
1125 __ jcc(Assembler::belowEqual, (*NOLp));
1126 __ cmpptr(to, end_from);
1127 __ jcc(Assembler::aboveEqual, (*NOLp));
1128 }
1129 }
1131 // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1132 //
1133 // Outputs:
1134 // rdi - rcx
1135 // rsi - rdx
1136 // rdx - r8
1137 // rcx - r9
1138 //
1139 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1140 // are non-volatile. r9 and r10 should not be used by the caller.
1141 //
1142 void setup_arg_regs(int nargs = 3) {
1143 const Register saved_rdi = r9;
1144 const Register saved_rsi = r10;
1145 assert(nargs == 3 || nargs == 4, "else fix");
1146 #ifdef _WIN64
1147 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1148 "unexpected argument registers");
1149 if (nargs >= 4)
1150 __ mov(rax, r9); // r9 is also saved_rdi
1151 __ movptr(saved_rdi, rdi);
1152 __ movptr(saved_rsi, rsi);
1153 __ mov(rdi, rcx); // c_rarg0
1154 __ mov(rsi, rdx); // c_rarg1
1155 __ mov(rdx, r8); // c_rarg2
1156 if (nargs >= 4)
1157 __ mov(rcx, rax); // c_rarg3 (via rax)
1158 #else
1159 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1160 "unexpected argument registers");
1161 #endif
1162 }
1164 void restore_arg_regs() {
1165 const Register saved_rdi = r9;
1166 const Register saved_rsi = r10;
1167 #ifdef _WIN64
1168 __ movptr(rdi, saved_rdi);
1169 __ movptr(rsi, saved_rsi);
1170 #endif
1171 }
1173 // Generate code for an array write pre barrier
1174 //
1175 // addr - starting address
1176 // count - element count
1177 // tmp - scratch register
1178 //
1179 // Destroy no registers!
1180 //
1181 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
1182 BarrierSet* bs = Universe::heap()->barrier_set();
1183 switch (bs->kind()) {
1184 case BarrierSet::G1SATBCT:
1185 case BarrierSet::G1SATBCTLogging:
1186 // With G1, don't generate the call if we statically know that the target in uninitialized
1187 if (!dest_uninitialized) {
1188 __ pusha(); // push registers
1189 if (count == c_rarg0) {
1190 if (addr == c_rarg1) {
1191 // exactly backwards!!
1192 __ xchgptr(c_rarg1, c_rarg0);
1193 } else {
1194 __ movptr(c_rarg1, count);
1195 __ movptr(c_rarg0, addr);
1196 }
1197 } else {
1198 __ movptr(c_rarg0, addr);
1199 __ movptr(c_rarg1, count);
1200 }
1201 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
1202 __ popa();
1203 }
1204 break;
1205 case BarrierSet::CardTableModRef:
1206 case BarrierSet::CardTableExtension:
1207 case BarrierSet::ModRef:
1208 break;
1209 default:
1210 ShouldNotReachHere();
1212 }
1213 }
1215 //
1216 // Generate code for an array write post barrier
1217 //
1218 // Input:
1219 // start - register containing starting address of destination array
1220 // count - elements count
1221 // scratch - scratch register
1222 //
1223 // The input registers are overwritten.
1224 //
1225 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) {
1226 assert_different_registers(start, count, scratch);
1227 BarrierSet* bs = Universe::heap()->barrier_set();
1228 switch (bs->kind()) {
1229 case BarrierSet::G1SATBCT:
1230 case BarrierSet::G1SATBCTLogging:
1231 {
1232 __ pusha(); // push registers (overkill)
1233 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
1234 assert_different_registers(c_rarg1, start);
1235 __ mov(c_rarg1, count);
1236 __ mov(c_rarg0, start);
1237 } else {
1238 assert_different_registers(c_rarg0, count);
1239 __ mov(c_rarg0, start);
1240 __ mov(c_rarg1, count);
1241 }
1242 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
1243 __ popa();
1244 }
1245 break;
1246 case BarrierSet::CardTableModRef:
1247 case BarrierSet::CardTableExtension:
1248 {
1249 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1250 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1252 Label L_loop;
1253 const Register end = count;
1255 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size
1256 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
1257 __ shrptr(start, CardTableModRefBS::card_shift);
1258 __ shrptr(end, CardTableModRefBS::card_shift);
1259 __ subptr(end, start); // end --> cards count
1261 int64_t disp = (int64_t) ct->byte_map_base;
1262 __ mov64(scratch, disp);
1263 __ addptr(start, scratch);
1264 __ BIND(L_loop);
1265 __ movb(Address(start, count, Address::times_1), 0);
1266 __ decrement(count);
1267 __ jcc(Assembler::greaterEqual, L_loop);
1268 }
1269 break;
1270 default:
1271 ShouldNotReachHere();
1273 }
1274 }
1277 // Copy big chunks forward
1278 //
1279 // Inputs:
1280 // end_from - source arrays end address
1281 // end_to - destination array end address
1282 // qword_count - 64-bits element count, negative
1283 // to - scratch
1284 // L_copy_bytes - entry label
1285 // L_copy_8_bytes - exit label
1286 //
1287 void copy_bytes_forward(Register end_from, Register end_to,
1288 Register qword_count, Register to,
1289 Label& L_copy_bytes, Label& L_copy_8_bytes) {
1290 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1291 Label L_loop;
1292 __ align(OptoLoopAlignment);
1293 if (UseUnalignedLoadStores) {
1294 Label L_end;
1295 // Copy 64-bytes per iteration
1296 __ BIND(L_loop);
1297 if (UseAVX >= 2) {
1298 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
1299 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
1300 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24));
1301 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1);
1302 } else {
1303 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56));
1304 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0);
1305 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40));
1306 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1);
1307 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24));
1308 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2);
1309 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8));
1310 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3);
1311 }
1312 __ BIND(L_copy_bytes);
1313 __ addptr(qword_count, 8);
1314 __ jcc(Assembler::lessEqual, L_loop);
1315 __ subptr(qword_count, 4); // sub(8) and add(4)
1316 __ jccb(Assembler::greater, L_end);
1317 // Copy trailing 32 bytes
1318 if (UseAVX >= 2) {
1319 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
1320 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0);
1321 } else {
1322 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
1323 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0);
1324 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8));
1325 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1);
1326 }
1327 __ addptr(qword_count, 4);
1328 __ BIND(L_end);
1329 if (UseAVX >= 2) {
1330 // clean upper bits of YMM registers
1331 __ vzeroupper();
1332 }
1333 } else {
1334 // Copy 32-bytes per iteration
1335 __ BIND(L_loop);
1336 __ movq(to, Address(end_from, qword_count, Address::times_8, -24));
1337 __ movq(Address(end_to, qword_count, Address::times_8, -24), to);
1338 __ movq(to, Address(end_from, qword_count, Address::times_8, -16));
1339 __ movq(Address(end_to, qword_count, Address::times_8, -16), to);
1340 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
1341 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
1342 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
1343 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
1345 __ BIND(L_copy_bytes);
1346 __ addptr(qword_count, 4);
1347 __ jcc(Assembler::lessEqual, L_loop);
1348 }
1349 __ subptr(qword_count, 4);
1350 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
1351 }
1353 // Copy big chunks backward
1354 //
1355 // Inputs:
1356 // from - source arrays address
1357 // dest - destination array address
1358 // qword_count - 64-bits element count
1359 // to - scratch
1360 // L_copy_bytes - entry label
1361 // L_copy_8_bytes - exit label
1362 //
1363 void copy_bytes_backward(Register from, Register dest,
1364 Register qword_count, Register to,
1365 Label& L_copy_bytes, Label& L_copy_8_bytes) {
1366 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1367 Label L_loop;
1368 __ align(OptoLoopAlignment);
1369 if (UseUnalignedLoadStores) {
1370 Label L_end;
1371 // Copy 64-bytes per iteration
1372 __ BIND(L_loop);
1373 if (UseAVX >= 2) {
1374 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32));
1375 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0);
1376 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
1377 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
1378 } else {
1379 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48));
1380 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0);
1381 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32));
1382 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1);
1383 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16));
1384 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2);
1385 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0));
1386 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3);
1387 }
1388 __ BIND(L_copy_bytes);
1389 __ subptr(qword_count, 8);
1390 __ jcc(Assembler::greaterEqual, L_loop);
1392 __ addptr(qword_count, 4); // add(8) and sub(4)
1393 __ jccb(Assembler::less, L_end);
1394 // Copy trailing 32 bytes
1395 if (UseAVX >= 2) {
1396 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0));
1397 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0);
1398 } else {
1399 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
1400 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0);
1401 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
1402 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
1403 }
1404 __ subptr(qword_count, 4);
1405 __ BIND(L_end);
1406 if (UseAVX >= 2) {
1407 // clean upper bits of YMM registers
1408 __ vzeroupper();
1409 }
1410 } else {
1411 // Copy 32-bytes per iteration
1412 __ BIND(L_loop);
1413 __ movq(to, Address(from, qword_count, Address::times_8, 24));
1414 __ movq(Address(dest, qword_count, Address::times_8, 24), to);
1415 __ movq(to, Address(from, qword_count, Address::times_8, 16));
1416 __ movq(Address(dest, qword_count, Address::times_8, 16), to);
1417 __ movq(to, Address(from, qword_count, Address::times_8, 8));
1418 __ movq(Address(dest, qword_count, Address::times_8, 8), to);
1419 __ movq(to, Address(from, qword_count, Address::times_8, 0));
1420 __ movq(Address(dest, qword_count, Address::times_8, 0), to);
1422 __ BIND(L_copy_bytes);
1423 __ subptr(qword_count, 4);
1424 __ jcc(Assembler::greaterEqual, L_loop);
1425 }
1426 __ addptr(qword_count, 4);
1427 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
1428 }
1431 // Arguments:
1432 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1433 // ignored
1434 // name - stub name string
1435 //
1436 // Inputs:
1437 // c_rarg0 - source array address
1438 // c_rarg1 - destination array address
1439 // c_rarg2 - element count, treated as ssize_t, can be zero
1440 //
1441 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1442 // we let the hardware handle it. The one to eight bytes within words,
1443 // dwords or qwords that span cache line boundaries will still be loaded
1444 // and stored atomically.
1445 //
1446 // Side Effects:
1447 // disjoint_byte_copy_entry is set to the no-overlap entry point
1448 // used by generate_conjoint_byte_copy().
1449 //
1450 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) {
1451 __ align(CodeEntryAlignment);
1452 StubCodeMark mark(this, "StubRoutines", name);
1453 address start = __ pc();
1455 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1456 Label L_copy_byte, L_exit;
1457 const Register from = rdi; // source array address
1458 const Register to = rsi; // destination array address
1459 const Register count = rdx; // elements count
1460 const Register byte_count = rcx;
1461 const Register qword_count = count;
1462 const Register end_from = from; // source array end address
1463 const Register end_to = to; // destination array end address
1464 // End pointers are inclusive, and if count is not zero they point
1465 // to the last unit copied: end_to[0] := end_from[0]
1467 __ enter(); // required for proper stackwalking of RuntimeStub frame
1468 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1470 if (entry != NULL) {
1471 *entry = __ pc();
1472 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1473 BLOCK_COMMENT("Entry:");
1474 }
1476 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1477 // r9 and r10 may be used to save non-volatile registers
1479 // 'from', 'to' and 'count' are now valid
1480 __ movptr(byte_count, count);
1481 __ shrptr(count, 3); // count => qword_count
1483 // Copy from low to high addresses. Use 'to' as scratch.
1484 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1485 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1486 __ negptr(qword_count); // make the count negative
1487 __ jmp(L_copy_bytes);
1489 // Copy trailing qwords
1490 __ BIND(L_copy_8_bytes);
1491 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1492 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1493 __ increment(qword_count);
1494 __ jcc(Assembler::notZero, L_copy_8_bytes);
1496 // Check for and copy trailing dword
1497 __ BIND(L_copy_4_bytes);
1498 __ testl(byte_count, 4);
1499 __ jccb(Assembler::zero, L_copy_2_bytes);
1500 __ movl(rax, Address(end_from, 8));
1501 __ movl(Address(end_to, 8), rax);
1503 __ addptr(end_from, 4);
1504 __ addptr(end_to, 4);
1506 // Check for and copy trailing word
1507 __ BIND(L_copy_2_bytes);
1508 __ testl(byte_count, 2);
1509 __ jccb(Assembler::zero, L_copy_byte);
1510 __ movw(rax, Address(end_from, 8));
1511 __ movw(Address(end_to, 8), rax);
1513 __ addptr(end_from, 2);
1514 __ addptr(end_to, 2);
1516 // Check for and copy trailing byte
1517 __ BIND(L_copy_byte);
1518 __ testl(byte_count, 1);
1519 __ jccb(Assembler::zero, L_exit);
1520 __ movb(rax, Address(end_from, 8));
1521 __ movb(Address(end_to, 8), rax);
1523 __ BIND(L_exit);
1524 restore_arg_regs();
1525 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
1526 __ xorptr(rax, rax); // return 0
1527 __ leave(); // required for proper stackwalking of RuntimeStub frame
1528 __ ret(0);
1530 // Copy in multi-bytes chunks
1531 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
1532 __ jmp(L_copy_4_bytes);
1534 return start;
1535 }
1537 // Arguments:
1538 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1539 // ignored
1540 // name - stub name string
1541 //
1542 // Inputs:
1543 // c_rarg0 - source array address
1544 // c_rarg1 - destination array address
1545 // c_rarg2 - element count, treated as ssize_t, can be zero
1546 //
1547 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1548 // we let the hardware handle it. The one to eight bytes within words,
1549 // dwords or qwords that span cache line boundaries will still be loaded
1550 // and stored atomically.
1551 //
1552 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
1553 address* entry, const char *name) {
1554 __ align(CodeEntryAlignment);
1555 StubCodeMark mark(this, "StubRoutines", name);
1556 address start = __ pc();
1558 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1559 const Register from = rdi; // source array address
1560 const Register to = rsi; // destination array address
1561 const Register count = rdx; // elements count
1562 const Register byte_count = rcx;
1563 const Register qword_count = count;
1565 __ enter(); // required for proper stackwalking of RuntimeStub frame
1566 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1568 if (entry != NULL) {
1569 *entry = __ pc();
1570 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1571 BLOCK_COMMENT("Entry:");
1572 }
1574 array_overlap_test(nooverlap_target, Address::times_1);
1575 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1576 // r9 and r10 may be used to save non-volatile registers
1578 // 'from', 'to' and 'count' are now valid
1579 __ movptr(byte_count, count);
1580 __ shrptr(count, 3); // count => qword_count
1582 // Copy from high to low addresses.
1584 // Check for and copy trailing byte
1585 __ testl(byte_count, 1);
1586 __ jcc(Assembler::zero, L_copy_2_bytes);
1587 __ movb(rax, Address(from, byte_count, Address::times_1, -1));
1588 __ movb(Address(to, byte_count, Address::times_1, -1), rax);
1589 __ decrement(byte_count); // Adjust for possible trailing word
1591 // Check for and copy trailing word
1592 __ BIND(L_copy_2_bytes);
1593 __ testl(byte_count, 2);
1594 __ jcc(Assembler::zero, L_copy_4_bytes);
1595 __ movw(rax, Address(from, byte_count, Address::times_1, -2));
1596 __ movw(Address(to, byte_count, Address::times_1, -2), rax);
1598 // Check for and copy trailing dword
1599 __ BIND(L_copy_4_bytes);
1600 __ testl(byte_count, 4);
1601 __ jcc(Assembler::zero, L_copy_bytes);
1602 __ movl(rax, Address(from, qword_count, Address::times_8));
1603 __ movl(Address(to, qword_count, Address::times_8), rax);
1604 __ jmp(L_copy_bytes);
1606 // Copy trailing qwords
1607 __ BIND(L_copy_8_bytes);
1608 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1609 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1610 __ decrement(qword_count);
1611 __ jcc(Assembler::notZero, L_copy_8_bytes);
1613 restore_arg_regs();
1614 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
1615 __ xorptr(rax, rax); // return 0
1616 __ leave(); // required for proper stackwalking of RuntimeStub frame
1617 __ ret(0);
1619 // Copy in multi-bytes chunks
1620 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
1622 restore_arg_regs();
1623 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
1624 __ xorptr(rax, rax); // return 0
1625 __ leave(); // required for proper stackwalking of RuntimeStub frame
1626 __ ret(0);
1628 return start;
1629 }
1631 // Arguments:
1632 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1633 // ignored
1634 // name - stub name string
1635 //
1636 // Inputs:
1637 // c_rarg0 - source array address
1638 // c_rarg1 - destination array address
1639 // c_rarg2 - element count, treated as ssize_t, can be zero
1640 //
1641 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1642 // let the hardware handle it. The two or four words within dwords
1643 // or qwords that span cache line boundaries will still be loaded
1644 // and stored atomically.
1645 //
1646 // Side Effects:
1647 // disjoint_short_copy_entry is set to the no-overlap entry point
1648 // used by generate_conjoint_short_copy().
1649 //
1650 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) {
1651 __ align(CodeEntryAlignment);
1652 StubCodeMark mark(this, "StubRoutines", name);
1653 address start = __ pc();
1655 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit;
1656 const Register from = rdi; // source array address
1657 const Register to = rsi; // destination array address
1658 const Register count = rdx; // elements count
1659 const Register word_count = rcx;
1660 const Register qword_count = count;
1661 const Register end_from = from; // source array end address
1662 const Register end_to = to; // destination array end address
1663 // End pointers are inclusive, and if count is not zero they point
1664 // to the last unit copied: end_to[0] := end_from[0]
1666 __ enter(); // required for proper stackwalking of RuntimeStub frame
1667 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1669 if (entry != NULL) {
1670 *entry = __ pc();
1671 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1672 BLOCK_COMMENT("Entry:");
1673 }
1675 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1676 // r9 and r10 may be used to save non-volatile registers
1678 // 'from', 'to' and 'count' are now valid
1679 __ movptr(word_count, count);
1680 __ shrptr(count, 2); // count => qword_count
1682 // Copy from low to high addresses. Use 'to' as scratch.
1683 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1684 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1685 __ negptr(qword_count);
1686 __ jmp(L_copy_bytes);
1688 // Copy trailing qwords
1689 __ BIND(L_copy_8_bytes);
1690 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1691 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1692 __ increment(qword_count);
1693 __ jcc(Assembler::notZero, L_copy_8_bytes);
1695 // Original 'dest' is trashed, so we can't use it as a
1696 // base register for a possible trailing word copy
1698 // Check for and copy trailing dword
1699 __ BIND(L_copy_4_bytes);
1700 __ testl(word_count, 2);
1701 __ jccb(Assembler::zero, L_copy_2_bytes);
1702 __ movl(rax, Address(end_from, 8));
1703 __ movl(Address(end_to, 8), rax);
1705 __ addptr(end_from, 4);
1706 __ addptr(end_to, 4);
1708 // Check for and copy trailing word
1709 __ BIND(L_copy_2_bytes);
1710 __ testl(word_count, 1);
1711 __ jccb(Assembler::zero, L_exit);
1712 __ movw(rax, Address(end_from, 8));
1713 __ movw(Address(end_to, 8), rax);
1715 __ BIND(L_exit);
1716 restore_arg_regs();
1717 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
1718 __ xorptr(rax, rax); // return 0
1719 __ leave(); // required for proper stackwalking of RuntimeStub frame
1720 __ ret(0);
1722 // Copy in multi-bytes chunks
1723 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
1724 __ jmp(L_copy_4_bytes);
1726 return start;
1727 }
1729 address generate_fill(BasicType t, bool aligned, const char *name) {
1730 __ align(CodeEntryAlignment);
1731 StubCodeMark mark(this, "StubRoutines", name);
1732 address start = __ pc();
1734 BLOCK_COMMENT("Entry:");
1736 const Register to = c_rarg0; // source array address
1737 const Register value = c_rarg1; // value
1738 const Register count = c_rarg2; // elements count
1740 __ enter(); // required for proper stackwalking of RuntimeStub frame
1742 __ generate_fill(t, aligned, to, value, count, rax, xmm0);
1744 __ leave(); // required for proper stackwalking of RuntimeStub frame
1745 __ ret(0);
1746 return start;
1747 }
1749 // Arguments:
1750 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1751 // ignored
1752 // name - stub name string
1753 //
1754 // Inputs:
1755 // c_rarg0 - source array address
1756 // c_rarg1 - destination array address
1757 // c_rarg2 - element count, treated as ssize_t, can be zero
1758 //
1759 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1760 // let the hardware handle it. The two or four words within dwords
1761 // or qwords that span cache line boundaries will still be loaded
1762 // and stored atomically.
1763 //
1764 address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
1765 address *entry, const char *name) {
1766 __ align(CodeEntryAlignment);
1767 StubCodeMark mark(this, "StubRoutines", name);
1768 address start = __ pc();
1770 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes;
1771 const Register from = rdi; // source array address
1772 const Register to = rsi; // destination array address
1773 const Register count = rdx; // elements count
1774 const Register word_count = rcx;
1775 const Register qword_count = count;
1777 __ enter(); // required for proper stackwalking of RuntimeStub frame
1778 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1780 if (entry != NULL) {
1781 *entry = __ pc();
1782 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1783 BLOCK_COMMENT("Entry:");
1784 }
1786 array_overlap_test(nooverlap_target, Address::times_2);
1787 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1788 // r9 and r10 may be used to save non-volatile registers
1790 // 'from', 'to' and 'count' are now valid
1791 __ movptr(word_count, count);
1792 __ shrptr(count, 2); // count => qword_count
1794 // Copy from high to low addresses. Use 'to' as scratch.
1796 // Check for and copy trailing word
1797 __ testl(word_count, 1);
1798 __ jccb(Assembler::zero, L_copy_4_bytes);
1799 __ movw(rax, Address(from, word_count, Address::times_2, -2));
1800 __ movw(Address(to, word_count, Address::times_2, -2), rax);
1802 // Check for and copy trailing dword
1803 __ BIND(L_copy_4_bytes);
1804 __ testl(word_count, 2);
1805 __ jcc(Assembler::zero, L_copy_bytes);
1806 __ movl(rax, Address(from, qword_count, Address::times_8));
1807 __ movl(Address(to, qword_count, Address::times_8), rax);
1808 __ jmp(L_copy_bytes);
1810 // Copy trailing qwords
1811 __ BIND(L_copy_8_bytes);
1812 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1813 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1814 __ decrement(qword_count);
1815 __ jcc(Assembler::notZero, L_copy_8_bytes);
1817 restore_arg_regs();
1818 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
1819 __ xorptr(rax, rax); // return 0
1820 __ leave(); // required for proper stackwalking of RuntimeStub frame
1821 __ ret(0);
1823 // Copy in multi-bytes chunks
1824 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
1826 restore_arg_regs();
1827 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
1828 __ xorptr(rax, rax); // return 0
1829 __ leave(); // required for proper stackwalking of RuntimeStub frame
1830 __ ret(0);
1832 return start;
1833 }
1835 // Arguments:
1836 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1837 // ignored
1838 // is_oop - true => oop array, so generate store check code
1839 // name - stub name string
1840 //
1841 // Inputs:
1842 // c_rarg0 - source array address
1843 // c_rarg1 - destination array address
1844 // c_rarg2 - element count, treated as ssize_t, can be zero
1845 //
1846 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1847 // the hardware handle it. The two dwords within qwords that span
1848 // cache line boundaries will still be loaded and stored atomicly.
1849 //
1850 // Side Effects:
1851 // disjoint_int_copy_entry is set to the no-overlap entry point
1852 // used by generate_conjoint_int_oop_copy().
1853 //
1854 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry,
1855 const char *name, bool dest_uninitialized = false) {
1856 __ align(CodeEntryAlignment);
1857 StubCodeMark mark(this, "StubRoutines", name);
1858 address start = __ pc();
1860 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
1861 const Register from = rdi; // source array address
1862 const Register to = rsi; // destination array address
1863 const Register count = rdx; // elements count
1864 const Register dword_count = rcx;
1865 const Register qword_count = count;
1866 const Register end_from = from; // source array end address
1867 const Register end_to = to; // destination array end address
1868 const Register saved_to = r11; // saved destination array address
1869 // End pointers are inclusive, and if count is not zero they point
1870 // to the last unit copied: end_to[0] := end_from[0]
1872 __ enter(); // required for proper stackwalking of RuntimeStub frame
1873 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1875 if (entry != NULL) {
1876 *entry = __ pc();
1877 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1878 BLOCK_COMMENT("Entry:");
1879 }
1881 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1882 // r9 and r10 may be used to save non-volatile registers
1883 if (is_oop) {
1884 __ movq(saved_to, to);
1885 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
1886 }
1888 // 'from', 'to' and 'count' are now valid
1889 __ movptr(dword_count, count);
1890 __ shrptr(count, 1); // count => qword_count
1892 // Copy from low to high addresses. Use 'to' as scratch.
1893 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1894 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1895 __ negptr(qword_count);
1896 __ jmp(L_copy_bytes);
1898 // Copy trailing qwords
1899 __ BIND(L_copy_8_bytes);
1900 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1901 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1902 __ increment(qword_count);
1903 __ jcc(Assembler::notZero, L_copy_8_bytes);
1905 // Check for and copy trailing dword
1906 __ BIND(L_copy_4_bytes);
1907 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1
1908 __ jccb(Assembler::zero, L_exit);
1909 __ movl(rax, Address(end_from, 8));
1910 __ movl(Address(end_to, 8), rax);
1912 __ BIND(L_exit);
1913 if (is_oop) {
1914 gen_write_ref_array_post_barrier(saved_to, dword_count, rax);
1915 }
1916 restore_arg_regs();
1917 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
1918 __ xorptr(rax, rax); // return 0
1919 __ leave(); // required for proper stackwalking of RuntimeStub frame
1920 __ ret(0);
1922 // Copy in multi-bytes chunks
1923 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
1924 __ jmp(L_copy_4_bytes);
1926 return start;
1927 }
1929 // Arguments:
1930 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1931 // ignored
1932 // is_oop - true => oop array, so generate store check code
1933 // name - stub name string
1934 //
1935 // Inputs:
1936 // c_rarg0 - source array address
1937 // c_rarg1 - destination array address
1938 // c_rarg2 - element count, treated as ssize_t, can be zero
1939 //
1940 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1941 // the hardware handle it. The two dwords within qwords that span
1942 // cache line boundaries will still be loaded and stored atomicly.
1943 //
1944 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
1945 address *entry, const char *name,
1946 bool dest_uninitialized = false) {
1947 __ align(CodeEntryAlignment);
1948 StubCodeMark mark(this, "StubRoutines", name);
1949 address start = __ pc();
1951 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit;
1952 const Register from = rdi; // source array address
1953 const Register to = rsi; // destination array address
1954 const Register count = rdx; // elements count
1955 const Register dword_count = rcx;
1956 const Register qword_count = count;
1958 __ enter(); // required for proper stackwalking of RuntimeStub frame
1959 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1961 if (entry != NULL) {
1962 *entry = __ pc();
1963 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1964 BLOCK_COMMENT("Entry:");
1965 }
1967 array_overlap_test(nooverlap_target, Address::times_4);
1968 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1969 // r9 and r10 may be used to save non-volatile registers
1971 if (is_oop) {
1972 // no registers are destroyed by this call
1973 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
1974 }
1976 assert_clean_int(count, rax); // Make sure 'count' is clean int.
1977 // 'from', 'to' and 'count' are now valid
1978 __ movptr(dword_count, count);
1979 __ shrptr(count, 1); // count => qword_count
1981 // Copy from high to low addresses. Use 'to' as scratch.
1983 // Check for and copy trailing dword
1984 __ testl(dword_count, 1);
1985 __ jcc(Assembler::zero, L_copy_bytes);
1986 __ movl(rax, Address(from, dword_count, Address::times_4, -4));
1987 __ movl(Address(to, dword_count, Address::times_4, -4), rax);
1988 __ jmp(L_copy_bytes);
1990 // Copy trailing qwords
1991 __ BIND(L_copy_8_bytes);
1992 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1993 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1994 __ decrement(qword_count);
1995 __ jcc(Assembler::notZero, L_copy_8_bytes);
1997 if (is_oop) {
1998 __ jmp(L_exit);
1999 }
2000 restore_arg_regs();
2001 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
2002 __ xorptr(rax, rax); // return 0
2003 __ leave(); // required for proper stackwalking of RuntimeStub frame
2004 __ ret(0);
2006 // Copy in multi-bytes chunks
2007 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
2009 __ BIND(L_exit);
2010 if (is_oop) {
2011 gen_write_ref_array_post_barrier(to, dword_count, rax);
2012 }
2013 restore_arg_regs();
2014 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
2015 __ xorptr(rax, rax); // return 0
2016 __ leave(); // required for proper stackwalking of RuntimeStub frame
2017 __ ret(0);
2019 return start;
2020 }
2022 // Arguments:
2023 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
2024 // ignored
2025 // is_oop - true => oop array, so generate store check code
2026 // name - stub name string
2027 //
2028 // Inputs:
2029 // c_rarg0 - source array address
2030 // c_rarg1 - destination array address
2031 // c_rarg2 - element count, treated as ssize_t, can be zero
2032 //
2033 // Side Effects:
2034 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
2035 // no-overlap entry point used by generate_conjoint_long_oop_copy().
2036 //
2037 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry,
2038 const char *name, bool dest_uninitialized = false) {
2039 __ align(CodeEntryAlignment);
2040 StubCodeMark mark(this, "StubRoutines", name);
2041 address start = __ pc();
2043 Label L_copy_bytes, L_copy_8_bytes, L_exit;
2044 const Register from = rdi; // source array address
2045 const Register to = rsi; // destination array address
2046 const Register qword_count = rdx; // elements count
2047 const Register end_from = from; // source array end address
2048 const Register end_to = rcx; // destination array end address
2049 const Register saved_to = to;
2050 const Register saved_count = r11;
2051 // End pointers are inclusive, and if count is not zero they point
2052 // to the last unit copied: end_to[0] := end_from[0]
2054 __ enter(); // required for proper stackwalking of RuntimeStub frame
2055 // Save no-overlap entry point for generate_conjoint_long_oop_copy()
2056 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
2058 if (entry != NULL) {
2059 *entry = __ pc();
2060 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2061 BLOCK_COMMENT("Entry:");
2062 }
2064 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2065 // r9 and r10 may be used to save non-volatile registers
2066 // 'from', 'to' and 'qword_count' are now valid
2067 if (is_oop) {
2068 // Save to and count for store barrier
2069 __ movptr(saved_count, qword_count);
2070 // no registers are destroyed by this call
2071 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized);
2072 }
2074 // Copy from low to high addresses. Use 'to' as scratch.
2075 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
2076 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
2077 __ negptr(qword_count);
2078 __ jmp(L_copy_bytes);
2080 // Copy trailing qwords
2081 __ BIND(L_copy_8_bytes);
2082 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
2083 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
2084 __ increment(qword_count);
2085 __ jcc(Assembler::notZero, L_copy_8_bytes);
2087 if (is_oop) {
2088 __ jmp(L_exit);
2089 } else {
2090 restore_arg_regs();
2091 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
2092 __ xorptr(rax, rax); // return 0
2093 __ leave(); // required for proper stackwalking of RuntimeStub frame
2094 __ ret(0);
2095 }
2097 // Copy in multi-bytes chunks
2098 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
2100 if (is_oop) {
2101 __ BIND(L_exit);
2102 gen_write_ref_array_post_barrier(saved_to, saved_count, rax);
2103 }
2104 restore_arg_regs();
2105 if (is_oop) {
2106 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
2107 } else {
2108 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
2109 }
2110 __ xorptr(rax, rax); // return 0
2111 __ leave(); // required for proper stackwalking of RuntimeStub frame
2112 __ ret(0);
2114 return start;
2115 }
2117 // Arguments:
2118 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
2119 // ignored
2120 // is_oop - true => oop array, so generate store check code
2121 // name - stub name string
2122 //
2123 // Inputs:
2124 // c_rarg0 - source array address
2125 // c_rarg1 - destination array address
2126 // c_rarg2 - element count, treated as ssize_t, can be zero
2127 //
2128 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop,
2129 address nooverlap_target, address *entry,
2130 const char *name, bool dest_uninitialized = false) {
2131 __ align(CodeEntryAlignment);
2132 StubCodeMark mark(this, "StubRoutines", name);
2133 address start = __ pc();
2135 Label L_copy_bytes, L_copy_8_bytes, L_exit;
2136 const Register from = rdi; // source array address
2137 const Register to = rsi; // destination array address
2138 const Register qword_count = rdx; // elements count
2139 const Register saved_count = rcx;
2141 __ enter(); // required for proper stackwalking of RuntimeStub frame
2142 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
2144 if (entry != NULL) {
2145 *entry = __ pc();
2146 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2147 BLOCK_COMMENT("Entry:");
2148 }
2150 array_overlap_test(nooverlap_target, Address::times_8);
2151 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2152 // r9 and r10 may be used to save non-volatile registers
2153 // 'from', 'to' and 'qword_count' are now valid
2154 if (is_oop) {
2155 // Save to and count for store barrier
2156 __ movptr(saved_count, qword_count);
2157 // No registers are destroyed by this call
2158 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized);
2159 }
2161 __ jmp(L_copy_bytes);
2163 // Copy trailing qwords
2164 __ BIND(L_copy_8_bytes);
2165 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
2166 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
2167 __ decrement(qword_count);
2168 __ jcc(Assembler::notZero, L_copy_8_bytes);
2170 if (is_oop) {
2171 __ jmp(L_exit);
2172 } else {
2173 restore_arg_regs();
2174 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
2175 __ xorptr(rax, rax); // return 0
2176 __ leave(); // required for proper stackwalking of RuntimeStub frame
2177 __ ret(0);
2178 }
2180 // Copy in multi-bytes chunks
2181 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
2183 if (is_oop) {
2184 __ BIND(L_exit);
2185 gen_write_ref_array_post_barrier(to, saved_count, rax);
2186 }
2187 restore_arg_regs();
2188 if (is_oop) {
2189 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
2190 } else {
2191 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
2192 }
2193 __ xorptr(rax, rax); // return 0
2194 __ leave(); // required for proper stackwalking of RuntimeStub frame
2195 __ ret(0);
2197 return start;
2198 }
2201 // Helper for generating a dynamic type check.
2202 // Smashes no registers.
2203 void generate_type_check(Register sub_klass,
2204 Register super_check_offset,
2205 Register super_klass,
2206 Label& L_success) {
2207 assert_different_registers(sub_klass, super_check_offset, super_klass);
2209 BLOCK_COMMENT("type_check:");
2211 Label L_miss;
2213 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL,
2214 super_check_offset);
2215 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL);
2217 // Fall through on failure!
2218 __ BIND(L_miss);
2219 }
2221 //
2222 // Generate checkcasting array copy stub
2223 //
2224 // Input:
2225 // c_rarg0 - source array address
2226 // c_rarg1 - destination array address
2227 // c_rarg2 - element count, treated as ssize_t, can be zero
2228 // c_rarg3 - size_t ckoff (super_check_offset)
2229 // not Win64
2230 // c_rarg4 - oop ckval (super_klass)
2231 // Win64
2232 // rsp+40 - oop ckval (super_klass)
2233 //
2234 // Output:
2235 // rax == 0 - success
2236 // rax == -1^K - failure, where K is partial transfer count
2237 //
2238 address generate_checkcast_copy(const char *name, address *entry,
2239 bool dest_uninitialized = false) {
2241 Label L_load_element, L_store_element, L_do_card_marks, L_done;
2243 // Input registers (after setup_arg_regs)
2244 const Register from = rdi; // source array address
2245 const Register to = rsi; // destination array address
2246 const Register length = rdx; // elements count
2247 const Register ckoff = rcx; // super_check_offset
2248 const Register ckval = r8; // super_klass
2250 // Registers used as temps (r13, r14 are save-on-entry)
2251 const Register end_from = from; // source array end address
2252 const Register end_to = r13; // destination array end address
2253 const Register count = rdx; // -(count_remaining)
2254 const Register r14_length = r14; // saved copy of length
2255 // End pointers are inclusive, and if length is not zero they point
2256 // to the last unit copied: end_to[0] := end_from[0]
2258 const Register rax_oop = rax; // actual oop copied
2259 const Register r11_klass = r11; // oop._klass
2261 //---------------------------------------------------------------
2262 // Assembler stub will be used for this call to arraycopy
2263 // if the two arrays are subtypes of Object[] but the
2264 // destination array type is not equal to or a supertype
2265 // of the source type. Each element must be separately
2266 // checked.
2268 __ align(CodeEntryAlignment);
2269 StubCodeMark mark(this, "StubRoutines", name);
2270 address start = __ pc();
2272 __ enter(); // required for proper stackwalking of RuntimeStub frame
2274 #ifdef ASSERT
2275 // caller guarantees that the arrays really are different
2276 // otherwise, we would have to make conjoint checks
2277 { Label L;
2278 array_overlap_test(L, TIMES_OOP);
2279 __ stop("checkcast_copy within a single array");
2280 __ bind(L);
2281 }
2282 #endif //ASSERT
2284 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
2285 // ckoff => rcx, ckval => r8
2286 // r9 and r10 may be used to save non-volatile registers
2287 #ifdef _WIN64
2288 // last argument (#4) is on stack on Win64
2289 __ movptr(ckval, Address(rsp, 6 * wordSize));
2290 #endif
2292 // Caller of this entry point must set up the argument registers.
2293 if (entry != NULL) {
2294 *entry = __ pc();
2295 BLOCK_COMMENT("Entry:");
2296 }
2298 // allocate spill slots for r13, r14
2299 enum {
2300 saved_r13_offset,
2301 saved_r14_offset,
2302 saved_rbp_offset
2303 };
2304 __ subptr(rsp, saved_rbp_offset * wordSize);
2305 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
2306 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
2308 // check that int operands are properly extended to size_t
2309 assert_clean_int(length, rax);
2310 assert_clean_int(ckoff, rax);
2312 #ifdef ASSERT
2313 BLOCK_COMMENT("assert consistent ckoff/ckval");
2314 // The ckoff and ckval must be mutually consistent,
2315 // even though caller generates both.
2316 { Label L;
2317 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2318 __ cmpl(ckoff, Address(ckval, sco_offset));
2319 __ jcc(Assembler::equal, L);
2320 __ stop("super_check_offset inconsistent");
2321 __ bind(L);
2322 }
2323 #endif //ASSERT
2325 // Loop-invariant addresses. They are exclusive end pointers.
2326 Address end_from_addr(from, length, TIMES_OOP, 0);
2327 Address end_to_addr(to, length, TIMES_OOP, 0);
2328 // Loop-variant addresses. They assume post-incremented count < 0.
2329 Address from_element_addr(end_from, count, TIMES_OOP, 0);
2330 Address to_element_addr(end_to, count, TIMES_OOP, 0);
2332 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
2334 // Copy from low to high addresses, indexed from the end of each array.
2335 __ lea(end_from, end_from_addr);
2336 __ lea(end_to, end_to_addr);
2337 __ movptr(r14_length, length); // save a copy of the length
2338 assert(length == count, ""); // else fix next line:
2339 __ negptr(count); // negate and test the length
2340 __ jcc(Assembler::notZero, L_load_element);
2342 // Empty array: Nothing to do.
2343 __ xorptr(rax, rax); // return 0 on (trivial) success
2344 __ jmp(L_done);
2346 // ======== begin loop ========
2347 // (Loop is rotated; its entry is L_load_element.)
2348 // Loop control:
2349 // for (count = -count; count != 0; count++)
2350 // Base pointers src, dst are biased by 8*(count-1),to last element.
2351 __ align(OptoLoopAlignment);
2353 __ BIND(L_store_element);
2354 __ store_heap_oop(to_element_addr, rax_oop); // store the oop
2355 __ increment(count); // increment the count toward zero
2356 __ jcc(Assembler::zero, L_do_card_marks);
2358 // ======== loop entry is here ========
2359 __ BIND(L_load_element);
2360 __ load_heap_oop(rax_oop, from_element_addr); // load the oop
2361 __ testptr(rax_oop, rax_oop);
2362 __ jcc(Assembler::zero, L_store_element);
2364 __ load_klass(r11_klass, rax_oop);// query the object klass
2365 generate_type_check(r11_klass, ckoff, ckval, L_store_element);
2366 // ======== end loop ========
2368 // It was a real error; we must depend on the caller to finish the job.
2369 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
2370 // Emit GC store barriers for the oops we have copied (r14 + rdx),
2371 // and report their number to the caller.
2372 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1);
2373 Label L_post_barrier;
2374 __ addptr(r14_length, count); // K = (original - remaining) oops
2375 __ movptr(rax, r14_length); // save the value
2376 __ notptr(rax); // report (-1^K) to caller (does not affect flags)
2377 __ jccb(Assembler::notZero, L_post_barrier);
2378 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier
2380 // Come here on success only.
2381 __ BIND(L_do_card_marks);
2382 __ xorptr(rax, rax); // return 0 on success
2384 __ BIND(L_post_barrier);
2385 gen_write_ref_array_post_barrier(to, r14_length, rscratch1);
2387 // Common exit point (success or failure).
2388 __ BIND(L_done);
2389 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
2390 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
2391 restore_arg_regs();
2392 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free
2393 __ leave(); // required for proper stackwalking of RuntimeStub frame
2394 __ ret(0);
2396 return start;
2397 }
2399 //
2400 // Generate 'unsafe' array copy stub
2401 // Though just as safe as the other stubs, it takes an unscaled
2402 // size_t argument instead of an element count.
2403 //
2404 // Input:
2405 // c_rarg0 - source array address
2406 // c_rarg1 - destination array address
2407 // c_rarg2 - byte count, treated as ssize_t, can be zero
2408 //
2409 // Examines the alignment of the operands and dispatches
2410 // to a long, int, short, or byte copy loop.
2411 //
2412 address generate_unsafe_copy(const char *name,
2413 address byte_copy_entry, address short_copy_entry,
2414 address int_copy_entry, address long_copy_entry) {
2416 Label L_long_aligned, L_int_aligned, L_short_aligned;
2418 // Input registers (before setup_arg_regs)
2419 const Register from = c_rarg0; // source array address
2420 const Register to = c_rarg1; // destination array address
2421 const Register size = c_rarg2; // byte count (size_t)
2423 // Register used as a temp
2424 const Register bits = rax; // test copy of low bits
2426 __ align(CodeEntryAlignment);
2427 StubCodeMark mark(this, "StubRoutines", name);
2428 address start = __ pc();
2430 __ enter(); // required for proper stackwalking of RuntimeStub frame
2432 // bump this on entry, not on exit:
2433 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
2435 __ mov(bits, from);
2436 __ orptr(bits, to);
2437 __ orptr(bits, size);
2439 __ testb(bits, BytesPerLong-1);
2440 __ jccb(Assembler::zero, L_long_aligned);
2442 __ testb(bits, BytesPerInt-1);
2443 __ jccb(Assembler::zero, L_int_aligned);
2445 __ testb(bits, BytesPerShort-1);
2446 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
2448 __ BIND(L_short_aligned);
2449 __ shrptr(size, LogBytesPerShort); // size => short_count
2450 __ jump(RuntimeAddress(short_copy_entry));
2452 __ BIND(L_int_aligned);
2453 __ shrptr(size, LogBytesPerInt); // size => int_count
2454 __ jump(RuntimeAddress(int_copy_entry));
2456 __ BIND(L_long_aligned);
2457 __ shrptr(size, LogBytesPerLong); // size => qword_count
2458 __ jump(RuntimeAddress(long_copy_entry));
2460 return start;
2461 }
2463 // Perform range checks on the proposed arraycopy.
2464 // Kills temp, but nothing else.
2465 // Also, clean the sign bits of src_pos and dst_pos.
2466 void arraycopy_range_checks(Register src, // source array oop (c_rarg0)
2467 Register src_pos, // source position (c_rarg1)
2468 Register dst, // destination array oo (c_rarg2)
2469 Register dst_pos, // destination position (c_rarg3)
2470 Register length,
2471 Register temp,
2472 Label& L_failed) {
2473 BLOCK_COMMENT("arraycopy_range_checks:");
2475 // if (src_pos + length > arrayOop(src)->length()) FAIL;
2476 __ movl(temp, length);
2477 __ addl(temp, src_pos); // src_pos + length
2478 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes()));
2479 __ jcc(Assembler::above, L_failed);
2481 // if (dst_pos + length > arrayOop(dst)->length()) FAIL;
2482 __ movl(temp, length);
2483 __ addl(temp, dst_pos); // dst_pos + length
2484 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2485 __ jcc(Assembler::above, L_failed);
2487 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2488 // Move with sign extension can be used since they are positive.
2489 __ movslq(src_pos, src_pos);
2490 __ movslq(dst_pos, dst_pos);
2492 BLOCK_COMMENT("arraycopy_range_checks done");
2493 }
2495 //
2496 // Generate generic array copy stubs
2497 //
2498 // Input:
2499 // c_rarg0 - src oop
2500 // c_rarg1 - src_pos (32-bits)
2501 // c_rarg2 - dst oop
2502 // c_rarg3 - dst_pos (32-bits)
2503 // not Win64
2504 // c_rarg4 - element count (32-bits)
2505 // Win64
2506 // rsp+40 - element count (32-bits)
2507 //
2508 // Output:
2509 // rax == 0 - success
2510 // rax == -1^K - failure, where K is partial transfer count
2511 //
2512 address generate_generic_copy(const char *name,
2513 address byte_copy_entry, address short_copy_entry,
2514 address int_copy_entry, address oop_copy_entry,
2515 address long_copy_entry, address checkcast_copy_entry) {
2517 Label L_failed, L_failed_0, L_objArray;
2518 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs;
2520 // Input registers
2521 const Register src = c_rarg0; // source array oop
2522 const Register src_pos = c_rarg1; // source position
2523 const Register dst = c_rarg2; // destination array oop
2524 const Register dst_pos = c_rarg3; // destination position
2525 #ifndef _WIN64
2526 const Register length = c_rarg4;
2527 #else
2528 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64
2529 #endif
2531 { int modulus = CodeEntryAlignment;
2532 int target = modulus - 5; // 5 = sizeof jmp(L_failed)
2533 int advance = target - (__ offset() % modulus);
2534 if (advance < 0) advance += modulus;
2535 if (advance > 0) __ nop(advance);
2536 }
2537 StubCodeMark mark(this, "StubRoutines", name);
2539 // Short-hop target to L_failed. Makes for denser prologue code.
2540 __ BIND(L_failed_0);
2541 __ jmp(L_failed);
2542 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed");
2544 __ align(CodeEntryAlignment);
2545 address start = __ pc();
2547 __ enter(); // required for proper stackwalking of RuntimeStub frame
2549 // bump this on entry, not on exit:
2550 inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
2552 //-----------------------------------------------------------------------
2553 // Assembler stub will be used for this call to arraycopy
2554 // if the following conditions are met:
2555 //
2556 // (1) src and dst must not be null.
2557 // (2) src_pos must not be negative.
2558 // (3) dst_pos must not be negative.
2559 // (4) length must not be negative.
2560 // (5) src klass and dst klass should be the same and not NULL.
2561 // (6) src and dst should be arrays.
2562 // (7) src_pos + length must not exceed length of src.
2563 // (8) dst_pos + length must not exceed length of dst.
2564 //
2566 // if (src == NULL) return -1;
2567 __ testptr(src, src); // src oop
2568 size_t j1off = __ offset();
2569 __ jccb(Assembler::zero, L_failed_0);
2571 // if (src_pos < 0) return -1;
2572 __ testl(src_pos, src_pos); // src_pos (32-bits)
2573 __ jccb(Assembler::negative, L_failed_0);
2575 // if (dst == NULL) return -1;
2576 __ testptr(dst, dst); // dst oop
2577 __ jccb(Assembler::zero, L_failed_0);
2579 // if (dst_pos < 0) return -1;
2580 __ testl(dst_pos, dst_pos); // dst_pos (32-bits)
2581 size_t j4off = __ offset();
2582 __ jccb(Assembler::negative, L_failed_0);
2584 // The first four tests are very dense code,
2585 // but not quite dense enough to put four
2586 // jumps in a 16-byte instruction fetch buffer.
2587 // That's good, because some branch predicters
2588 // do not like jumps so close together.
2589 // Make sure of this.
2590 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps");
2592 // registers used as temp
2593 const Register r11_length = r11; // elements count to copy
2594 const Register r10_src_klass = r10; // array klass
2596 // if (length < 0) return -1;
2597 __ movl(r11_length, length); // length (elements count, 32-bits value)
2598 __ testl(r11_length, r11_length);
2599 __ jccb(Assembler::negative, L_failed_0);
2601 __ load_klass(r10_src_klass, src);
2602 #ifdef ASSERT
2603 // assert(src->klass() != NULL);
2604 {
2605 BLOCK_COMMENT("assert klasses not null {");
2606 Label L1, L2;
2607 __ testptr(r10_src_klass, r10_src_klass);
2608 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
2609 __ bind(L1);
2610 __ stop("broken null klass");
2611 __ bind(L2);
2612 __ load_klass(rax, dst);
2613 __ cmpq(rax, 0);
2614 __ jcc(Assembler::equal, L1); // this would be broken also
2615 BLOCK_COMMENT("} assert klasses not null done");
2616 }
2617 #endif
2619 // Load layout helper (32-bits)
2620 //
2621 // |array_tag| | header_size | element_type | |log2_element_size|
2622 // 32 30 24 16 8 2 0
2623 //
2624 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2625 //
2627 const int lh_offset = in_bytes(Klass::layout_helper_offset());
2629 // Handle objArrays completely differently...
2630 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2631 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh);
2632 __ jcc(Assembler::equal, L_objArray);
2634 // if (src->klass() != dst->klass()) return -1;
2635 __ load_klass(rax, dst);
2636 __ cmpq(r10_src_klass, rax);
2637 __ jcc(Assembler::notEqual, L_failed);
2639 const Register rax_lh = rax; // layout helper
2640 __ movl(rax_lh, Address(r10_src_klass, lh_offset));
2642 // if (!src->is_Array()) return -1;
2643 __ cmpl(rax_lh, Klass::_lh_neutral_value);
2644 __ jcc(Assembler::greaterEqual, L_failed);
2646 // At this point, it is known to be a typeArray (array_tag 0x3).
2647 #ifdef ASSERT
2648 {
2649 BLOCK_COMMENT("assert primitive array {");
2650 Label L;
2651 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
2652 __ jcc(Assembler::greaterEqual, L);
2653 __ stop("must be a primitive array");
2654 __ bind(L);
2655 BLOCK_COMMENT("} assert primitive array done");
2656 }
2657 #endif
2659 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2660 r10, L_failed);
2662 // TypeArrayKlass
2663 //
2664 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2665 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2666 //
2668 const Register r10_offset = r10; // array offset
2669 const Register rax_elsize = rax_lh; // element size
2671 __ movl(r10_offset, rax_lh);
2672 __ shrl(r10_offset, Klass::_lh_header_size_shift);
2673 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset
2674 __ addptr(src, r10_offset); // src array offset
2675 __ addptr(dst, r10_offset); // dst array offset
2676 BLOCK_COMMENT("choose copy loop based on element size");
2677 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
2679 // next registers should be set before the jump to corresponding stub
2680 const Register from = c_rarg0; // source array address
2681 const Register to = c_rarg1; // destination array address
2682 const Register count = c_rarg2; // elements count
2684 // 'from', 'to', 'count' registers should be set in such order
2685 // since they are the same as 'src', 'src_pos', 'dst'.
2687 __ BIND(L_copy_bytes);
2688 __ cmpl(rax_elsize, 0);
2689 __ jccb(Assembler::notEqual, L_copy_shorts);
2690 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr
2691 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr
2692 __ movl2ptr(count, r11_length); // length
2693 __ jump(RuntimeAddress(byte_copy_entry));
2695 __ BIND(L_copy_shorts);
2696 __ cmpl(rax_elsize, LogBytesPerShort);
2697 __ jccb(Assembler::notEqual, L_copy_ints);
2698 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr
2699 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr
2700 __ movl2ptr(count, r11_length); // length
2701 __ jump(RuntimeAddress(short_copy_entry));
2703 __ BIND(L_copy_ints);
2704 __ cmpl(rax_elsize, LogBytesPerInt);
2705 __ jccb(Assembler::notEqual, L_copy_longs);
2706 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr
2707 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr
2708 __ movl2ptr(count, r11_length); // length
2709 __ jump(RuntimeAddress(int_copy_entry));
2711 __ BIND(L_copy_longs);
2712 #ifdef ASSERT
2713 {
2714 BLOCK_COMMENT("assert long copy {");
2715 Label L;
2716 __ cmpl(rax_elsize, LogBytesPerLong);
2717 __ jcc(Assembler::equal, L);
2718 __ stop("must be long copy, but elsize is wrong");
2719 __ bind(L);
2720 BLOCK_COMMENT("} assert long copy done");
2721 }
2722 #endif
2723 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr
2724 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr
2725 __ movl2ptr(count, r11_length); // length
2726 __ jump(RuntimeAddress(long_copy_entry));
2728 // ObjArrayKlass
2729 __ BIND(L_objArray);
2730 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos]
2732 Label L_plain_copy, L_checkcast_copy;
2733 // test array classes for subtyping
2734 __ load_klass(rax, dst);
2735 __ cmpq(r10_src_klass, rax); // usual case is exact equality
2736 __ jcc(Assembler::notEqual, L_checkcast_copy);
2738 // Identically typed arrays can be copied without element-wise checks.
2739 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2740 r10, L_failed);
2742 __ lea(from, Address(src, src_pos, TIMES_OOP,
2743 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
2744 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2745 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
2746 __ movl2ptr(count, r11_length); // length
2747 __ BIND(L_plain_copy);
2748 __ jump(RuntimeAddress(oop_copy_entry));
2750 __ BIND(L_checkcast_copy);
2751 // live at this point: r10_src_klass, r11_length, rax (dst_klass)
2752 {
2753 // Before looking at dst.length, make sure dst is also an objArray.
2754 __ cmpl(Address(rax, lh_offset), objArray_lh);
2755 __ jcc(Assembler::notEqual, L_failed);
2757 // It is safe to examine both src.length and dst.length.
2758 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2759 rax, L_failed);
2761 const Register r11_dst_klass = r11;
2762 __ load_klass(r11_dst_klass, dst); // reload
2764 // Marshal the base address arguments now, freeing registers.
2765 __ lea(from, Address(src, src_pos, TIMES_OOP,
2766 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2767 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2768 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2769 __ movl(count, length); // length (reloaded)
2770 Register sco_temp = c_rarg3; // this register is free now
2771 assert_different_registers(from, to, count, sco_temp,
2772 r11_dst_klass, r10_src_klass);
2773 assert_clean_int(count, sco_temp);
2775 // Generate the type check.
2776 const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2777 __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2778 assert_clean_int(sco_temp, rax);
2779 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
2781 // Fetch destination element klass from the ObjArrayKlass header.
2782 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2783 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
2784 __ movl( sco_temp, Address(r11_dst_klass, sco_offset));
2785 assert_clean_int(sco_temp, rax);
2787 // the checkcast_copy loop needs two extra arguments:
2788 assert(c_rarg3 == sco_temp, "#3 already in place");
2789 // Set up arguments for checkcast_copy_entry.
2790 setup_arg_regs(4);
2791 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris
2792 __ jump(RuntimeAddress(checkcast_copy_entry));
2793 }
2795 __ BIND(L_failed);
2796 __ xorptr(rax, rax);
2797 __ notptr(rax); // return -1
2798 __ leave(); // required for proper stackwalking of RuntimeStub frame
2799 __ ret(0);
2801 return start;
2802 }
2804 void generate_arraycopy_stubs() {
2805 address entry;
2806 address entry_jbyte_arraycopy;
2807 address entry_jshort_arraycopy;
2808 address entry_jint_arraycopy;
2809 address entry_oop_arraycopy;
2810 address entry_jlong_arraycopy;
2811 address entry_checkcast_arraycopy;
2813 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,
2814 "jbyte_disjoint_arraycopy");
2815 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy,
2816 "jbyte_arraycopy");
2818 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
2819 "jshort_disjoint_arraycopy");
2820 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy,
2821 "jshort_arraycopy");
2823 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry,
2824 "jint_disjoint_arraycopy");
2825 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry,
2826 &entry_jint_arraycopy, "jint_arraycopy");
2828 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry,
2829 "jlong_disjoint_arraycopy");
2830 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry,
2831 &entry_jlong_arraycopy, "jlong_arraycopy");
2834 if (UseCompressedOops) {
2835 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry,
2836 "oop_disjoint_arraycopy");
2837 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry,
2838 &entry_oop_arraycopy, "oop_arraycopy");
2839 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry,
2840 "oop_disjoint_arraycopy_uninit",
2841 /*dest_uninitialized*/true);
2842 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry,
2843 NULL, "oop_arraycopy_uninit",
2844 /*dest_uninitialized*/true);
2845 } else {
2846 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry,
2847 "oop_disjoint_arraycopy");
2848 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry,
2849 &entry_oop_arraycopy, "oop_arraycopy");
2850 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry,
2851 "oop_disjoint_arraycopy_uninit",
2852 /*dest_uninitialized*/true);
2853 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry,
2854 NULL, "oop_arraycopy_uninit",
2855 /*dest_uninitialized*/true);
2856 }
2858 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
2859 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
2860 /*dest_uninitialized*/true);
2862 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
2863 entry_jbyte_arraycopy,
2864 entry_jshort_arraycopy,
2865 entry_jint_arraycopy,
2866 entry_jlong_arraycopy);
2867 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
2868 entry_jbyte_arraycopy,
2869 entry_jshort_arraycopy,
2870 entry_jint_arraycopy,
2871 entry_oop_arraycopy,
2872 entry_jlong_arraycopy,
2873 entry_checkcast_arraycopy);
2875 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
2876 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
2877 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
2878 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
2879 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2880 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
2882 // We don't generate specialized code for HeapWord-aligned source
2883 // arrays, so just use the code we've already generated
2884 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy;
2885 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy;
2887 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy;
2888 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy;
2890 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy;
2891 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2893 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy;
2894 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2896 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy;
2897 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2899 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit;
2900 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit;
2901 }
2903 void generate_math_stubs() {
2904 {
2905 StubCodeMark mark(this, "StubRoutines", "log");
2906 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
2908 __ subq(rsp, 8);
2909 __ movdbl(Address(rsp, 0), xmm0);
2910 __ fld_d(Address(rsp, 0));
2911 __ flog();
2912 __ fstp_d(Address(rsp, 0));
2913 __ movdbl(xmm0, Address(rsp, 0));
2914 __ addq(rsp, 8);
2915 __ ret(0);
2916 }
2917 {
2918 StubCodeMark mark(this, "StubRoutines", "log10");
2919 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
2921 __ subq(rsp, 8);
2922 __ movdbl(Address(rsp, 0), xmm0);
2923 __ fld_d(Address(rsp, 0));
2924 __ flog10();
2925 __ fstp_d(Address(rsp, 0));
2926 __ movdbl(xmm0, Address(rsp, 0));
2927 __ addq(rsp, 8);
2928 __ ret(0);
2929 }
2930 {
2931 StubCodeMark mark(this, "StubRoutines", "sin");
2932 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
2934 __ subq(rsp, 8);
2935 __ movdbl(Address(rsp, 0), xmm0);
2936 __ fld_d(Address(rsp, 0));
2937 __ trigfunc('s');
2938 __ fstp_d(Address(rsp, 0));
2939 __ movdbl(xmm0, Address(rsp, 0));
2940 __ addq(rsp, 8);
2941 __ ret(0);
2942 }
2943 {
2944 StubCodeMark mark(this, "StubRoutines", "cos");
2945 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
2947 __ subq(rsp, 8);
2948 __ movdbl(Address(rsp, 0), xmm0);
2949 __ fld_d(Address(rsp, 0));
2950 __ trigfunc('c');
2951 __ fstp_d(Address(rsp, 0));
2952 __ movdbl(xmm0, Address(rsp, 0));
2953 __ addq(rsp, 8);
2954 __ ret(0);
2955 }
2956 {
2957 StubCodeMark mark(this, "StubRoutines", "tan");
2958 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
2960 __ subq(rsp, 8);
2961 __ movdbl(Address(rsp, 0), xmm0);
2962 __ fld_d(Address(rsp, 0));
2963 __ trigfunc('t');
2964 __ fstp_d(Address(rsp, 0));
2965 __ movdbl(xmm0, Address(rsp, 0));
2966 __ addq(rsp, 8);
2967 __ ret(0);
2968 }
2969 {
2970 StubCodeMark mark(this, "StubRoutines", "exp");
2971 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc();
2973 __ subq(rsp, 8);
2974 __ movdbl(Address(rsp, 0), xmm0);
2975 __ fld_d(Address(rsp, 0));
2976 __ exp_with_fallback(0);
2977 __ fstp_d(Address(rsp, 0));
2978 __ movdbl(xmm0, Address(rsp, 0));
2979 __ addq(rsp, 8);
2980 __ ret(0);
2981 }
2982 {
2983 StubCodeMark mark(this, "StubRoutines", "pow");
2984 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc();
2986 __ subq(rsp, 8);
2987 __ movdbl(Address(rsp, 0), xmm1);
2988 __ fld_d(Address(rsp, 0));
2989 __ movdbl(Address(rsp, 0), xmm0);
2990 __ fld_d(Address(rsp, 0));
2991 __ pow_with_fallback(0);
2992 __ fstp_d(Address(rsp, 0));
2993 __ movdbl(xmm0, Address(rsp, 0));
2994 __ addq(rsp, 8);
2995 __ ret(0);
2996 }
2997 }
2999 // AES intrinsic stubs
3000 enum {AESBlockSize = 16};
3002 address generate_key_shuffle_mask() {
3003 __ align(16);
3004 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask");
3005 address start = __ pc();
3006 __ emit_data64( 0x0405060700010203, relocInfo::none );
3007 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none );
3008 return start;
3009 }
3011 // Utility routine for loading a 128-bit key word in little endian format
3012 // can optionally specify that the shuffle mask is already in an xmmregister
3013 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
3014 __ movdqu(xmmdst, Address(key, offset));
3015 if (xmm_shuf_mask != NULL) {
3016 __ pshufb(xmmdst, xmm_shuf_mask);
3017 } else {
3018 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
3019 }
3020 }
3022 // Arguments:
3023 //
3024 // Inputs:
3025 // c_rarg0 - source byte array address
3026 // c_rarg1 - destination byte array address
3027 // c_rarg2 - K (key) in little endian int array
3028 //
3029 address generate_aescrypt_encryptBlock() {
3030 assert(UseAES, "need AES instructions and misaligned SSE support");
3031 __ align(CodeEntryAlignment);
3032 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
3033 Label L_doLast;
3034 address start = __ pc();
3036 const Register from = c_rarg0; // source array address
3037 const Register to = c_rarg1; // destination array address
3038 const Register key = c_rarg2; // key array address
3039 const Register keylen = rax;
3041 const XMMRegister xmm_result = xmm0;
3042 const XMMRegister xmm_key_shuf_mask = xmm1;
3043 // On win64 xmm6-xmm15 must be preserved so don't use them.
3044 const XMMRegister xmm_temp1 = xmm2;
3045 const XMMRegister xmm_temp2 = xmm3;
3046 const XMMRegister xmm_temp3 = xmm4;
3047 const XMMRegister xmm_temp4 = xmm5;
3049 __ enter(); // required for proper stackwalking of RuntimeStub frame
3051 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
3052 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
3054 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
3055 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input
3057 // For encryption, the java expanded key ordering is just what we need
3058 // we don't know if the key is aligned, hence not using load-execute form
3060 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask);
3061 __ pxor(xmm_result, xmm_temp1);
3063 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
3064 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
3065 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
3066 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
3068 __ aesenc(xmm_result, xmm_temp1);
3069 __ aesenc(xmm_result, xmm_temp2);
3070 __ aesenc(xmm_result, xmm_temp3);
3071 __ aesenc(xmm_result, xmm_temp4);
3073 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
3074 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
3075 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
3076 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
3078 __ aesenc(xmm_result, xmm_temp1);
3079 __ aesenc(xmm_result, xmm_temp2);
3080 __ aesenc(xmm_result, xmm_temp3);
3081 __ aesenc(xmm_result, xmm_temp4);
3083 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
3084 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
3086 __ cmpl(keylen, 44);
3087 __ jccb(Assembler::equal, L_doLast);
3089 __ aesenc(xmm_result, xmm_temp1);
3090 __ aesenc(xmm_result, xmm_temp2);
3092 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
3093 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
3095 __ cmpl(keylen, 52);
3096 __ jccb(Assembler::equal, L_doLast);
3098 __ aesenc(xmm_result, xmm_temp1);
3099 __ aesenc(xmm_result, xmm_temp2);
3101 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
3102 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
3104 __ BIND(L_doLast);
3105 __ aesenc(xmm_result, xmm_temp1);
3106 __ aesenclast(xmm_result, xmm_temp2);
3107 __ movdqu(Address(to, 0), xmm_result); // store the result
3108 __ xorptr(rax, rax); // return 0
3109 __ leave(); // required for proper stackwalking of RuntimeStub frame
3110 __ ret(0);
3112 return start;
3113 }
3116 // Arguments:
3117 //
3118 // Inputs:
3119 // c_rarg0 - source byte array address
3120 // c_rarg1 - destination byte array address
3121 // c_rarg2 - K (key) in little endian int array
3122 //
3123 address generate_aescrypt_decryptBlock() {
3124 assert(UseAES, "need AES instructions and misaligned SSE support");
3125 __ align(CodeEntryAlignment);
3126 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
3127 Label L_doLast;
3128 address start = __ pc();
3130 const Register from = c_rarg0; // source array address
3131 const Register to = c_rarg1; // destination array address
3132 const Register key = c_rarg2; // key array address
3133 const Register keylen = rax;
3135 const XMMRegister xmm_result = xmm0;
3136 const XMMRegister xmm_key_shuf_mask = xmm1;
3137 // On win64 xmm6-xmm15 must be preserved so don't use them.
3138 const XMMRegister xmm_temp1 = xmm2;
3139 const XMMRegister xmm_temp2 = xmm3;
3140 const XMMRegister xmm_temp3 = xmm4;
3141 const XMMRegister xmm_temp4 = xmm5;
3143 __ enter(); // required for proper stackwalking of RuntimeStub frame
3145 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
3146 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
3148 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
3149 __ movdqu(xmm_result, Address(from, 0));
3151 // for decryption java expanded key ordering is rotated one position from what we want
3152 // so we start from 0x10 here and hit 0x00 last
3153 // we don't know if the key is aligned, hence not using load-execute form
3154 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
3155 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
3156 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
3157 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
3159 __ pxor (xmm_result, xmm_temp1);
3160 __ aesdec(xmm_result, xmm_temp2);
3161 __ aesdec(xmm_result, xmm_temp3);
3162 __ aesdec(xmm_result, xmm_temp4);
3164 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
3165 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
3166 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
3167 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
3169 __ aesdec(xmm_result, xmm_temp1);
3170 __ aesdec(xmm_result, xmm_temp2);
3171 __ aesdec(xmm_result, xmm_temp3);
3172 __ aesdec(xmm_result, xmm_temp4);
3174 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
3175 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
3176 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask);
3178 __ cmpl(keylen, 44);
3179 __ jccb(Assembler::equal, L_doLast);
3181 __ aesdec(xmm_result, xmm_temp1);
3182 __ aesdec(xmm_result, xmm_temp2);
3184 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
3185 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
3187 __ cmpl(keylen, 52);
3188 __ jccb(Assembler::equal, L_doLast);
3190 __ aesdec(xmm_result, xmm_temp1);
3191 __ aesdec(xmm_result, xmm_temp2);
3193 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
3194 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
3196 __ BIND(L_doLast);
3197 __ aesdec(xmm_result, xmm_temp1);
3198 __ aesdec(xmm_result, xmm_temp2);
3200 // for decryption the aesdeclast operation is always on key+0x00
3201 __ aesdeclast(xmm_result, xmm_temp3);
3202 __ movdqu(Address(to, 0), xmm_result); // store the result
3203 __ xorptr(rax, rax); // return 0
3204 __ leave(); // required for proper stackwalking of RuntimeStub frame
3205 __ ret(0);
3207 return start;
3208 }
3211 // Arguments:
3212 //
3213 // Inputs:
3214 // c_rarg0 - source byte array address
3215 // c_rarg1 - destination byte array address
3216 // c_rarg2 - K (key) in little endian int array
3217 // c_rarg3 - r vector byte array address
3218 // c_rarg4 - input length
3219 //
3220 // Output:
3221 // rax - input length
3222 //
3223 address generate_cipherBlockChaining_encryptAESCrypt() {
3224 assert(UseAES, "need AES instructions and misaligned SSE support");
3225 __ align(CodeEntryAlignment);
3226 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
3227 address start = __ pc();
3229 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256;
3230 const Register from = c_rarg0; // source array address
3231 const Register to = c_rarg1; // destination array address
3232 const Register key = c_rarg2; // key array address
3233 const Register rvec = c_rarg3; // r byte array initialized from initvector array address
3234 // and left with the results of the last encryption block
3235 #ifndef _WIN64
3236 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16)
3237 #else
3238 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64
3239 const Register len_reg = r10; // pick the first volatile windows register
3240 #endif
3241 const Register pos = rax;
3243 // xmm register assignments for the loops below
3244 const XMMRegister xmm_result = xmm0;
3245 const XMMRegister xmm_temp = xmm1;
3246 // keys 0-10 preloaded into xmm2-xmm12
3247 const int XMM_REG_NUM_KEY_FIRST = 2;
3248 const int XMM_REG_NUM_KEY_LAST = 15;
3249 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
3250 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10);
3251 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11);
3252 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12);
3253 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13);
3255 __ enter(); // required for proper stackwalking of RuntimeStub frame
3257 #ifdef _WIN64
3258 // on win64, fill len_reg from stack position
3259 __ movl(len_reg, len_mem);
3260 // save the xmm registers which must be preserved 6-15
3261 __ subptr(rsp, -rsp_after_call_off * wordSize);
3262 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
3263 __ movdqu(xmm_save(i), as_XMMRegister(i));
3264 }
3265 #else
3266 __ push(len_reg); // Save
3267 #endif
3269 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front
3270 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
3271 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0
3272 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) {
3273 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
3274 offset += 0x10;
3275 }
3276 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec
3278 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
3279 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
3280 __ cmpl(rax, 44);
3281 __ jcc(Assembler::notEqual, L_key_192_256);
3283 // 128 bit code follows here
3284 __ movptr(pos, 0);
3285 __ align(OptoLoopAlignment);
3287 __ BIND(L_loopTop_128);
3288 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
3289 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
3290 __ pxor (xmm_result, xmm_key0); // do the aes rounds
3291 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) {
3292 __ aesenc(xmm_result, as_XMMRegister(rnum));
3293 }
3294 __ aesenclast(xmm_result, xmm_key10);
3295 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3296 // no need to store r to memory until we exit
3297 __ addptr(pos, AESBlockSize);
3298 __ subptr(len_reg, AESBlockSize);
3299 __ jcc(Assembler::notEqual, L_loopTop_128);
3301 __ BIND(L_exit);
3302 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object
3304 #ifdef _WIN64
3305 // restore xmm regs belonging to calling function
3306 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
3307 __ movdqu(as_XMMRegister(i), xmm_save(i));
3308 }
3309 __ movl(rax, len_mem);
3310 #else
3311 __ pop(rax); // return length
3312 #endif
3313 __ leave(); // required for proper stackwalking of RuntimeStub frame
3314 __ ret(0);
3316 __ BIND(L_key_192_256);
3317 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
3318 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask);
3319 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask);
3320 __ cmpl(rax, 52);
3321 __ jcc(Assembler::notEqual, L_key_256);
3323 // 192-bit code follows here (could be changed to use more xmm registers)
3324 __ movptr(pos, 0);
3325 __ align(OptoLoopAlignment);
3327 __ BIND(L_loopTop_192);
3328 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
3329 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
3330 __ pxor (xmm_result, xmm_key0); // do the aes rounds
3331 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) {
3332 __ aesenc(xmm_result, as_XMMRegister(rnum));
3333 }
3334 __ aesenclast(xmm_result, xmm_key12);
3335 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3336 // no need to store r to memory until we exit
3337 __ addptr(pos, AESBlockSize);
3338 __ subptr(len_reg, AESBlockSize);
3339 __ jcc(Assembler::notEqual, L_loopTop_192);
3340 __ jmp(L_exit);
3342 __ BIND(L_key_256);
3343 // 256-bit code follows here (could be changed to use more xmm registers)
3344 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask);
3345 __ movptr(pos, 0);
3346 __ align(OptoLoopAlignment);
3348 __ BIND(L_loopTop_256);
3349 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input
3350 __ pxor (xmm_result, xmm_temp); // xor with the current r vector
3351 __ pxor (xmm_result, xmm_key0); // do the aes rounds
3352 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) {
3353 __ aesenc(xmm_result, as_XMMRegister(rnum));
3354 }
3355 load_key(xmm_temp, key, 0xe0);
3356 __ aesenclast(xmm_result, xmm_temp);
3357 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3358 // no need to store r to memory until we exit
3359 __ addptr(pos, AESBlockSize);
3360 __ subptr(len_reg, AESBlockSize);
3361 __ jcc(Assembler::notEqual, L_loopTop_256);
3362 __ jmp(L_exit);
3364 return start;
3365 }
3367 // Safefetch stubs.
3368 void generate_safefetch(const char* name, int size, address* entry,
3369 address* fault_pc, address* continuation_pc) {
3370 // safefetch signatures:
3371 // int SafeFetch32(int* adr, int errValue);
3372 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
3373 //
3374 // arguments:
3375 // c_rarg0 = adr
3376 // c_rarg1 = errValue
3377 //
3378 // result:
3379 // PPC_RET = *adr or errValue
3381 StubCodeMark mark(this, "StubRoutines", name);
3383 // Entry point, pc or function descriptor.
3384 *entry = __ pc();
3386 // Load *adr into c_rarg1, may fault.
3387 *fault_pc = __ pc();
3388 switch (size) {
3389 case 4:
3390 // int32_t
3391 __ movl(c_rarg1, Address(c_rarg0, 0));
3392 break;
3393 case 8:
3394 // int64_t
3395 __ movq(c_rarg1, Address(c_rarg0, 0));
3396 break;
3397 default:
3398 ShouldNotReachHere();
3399 }
3401 // return errValue or *adr
3402 *continuation_pc = __ pc();
3403 __ movq(rax, c_rarg1);
3404 __ ret(0);
3405 }
3407 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time
3408 // to hide instruction latency
3409 //
3410 // Arguments:
3411 //
3412 // Inputs:
3413 // c_rarg0 - source byte array address
3414 // c_rarg1 - destination byte array address
3415 // c_rarg2 - K (key) in little endian int array
3416 // c_rarg3 - r vector byte array address
3417 // c_rarg4 - input length
3418 //
3419 // Output:
3420 // rax - input length
3421 //
3423 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
3424 assert(UseAES, "need AES instructions and misaligned SSE support");
3425 __ align(CodeEntryAlignment);
3426 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
3427 address start = __ pc();
3429 Label L_exit, L_key_192_256, L_key_256;
3430 Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128;
3431 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256;
3432 const Register from = c_rarg0; // source array address
3433 const Register to = c_rarg1; // destination array address
3434 const Register key = c_rarg2; // key array address
3435 const Register rvec = c_rarg3; // r byte array initialized from initvector array address
3436 // and left with the results of the last encryption block
3437 #ifndef _WIN64
3438 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16)
3439 #else
3440 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64
3441 const Register len_reg = r10; // pick the first volatile windows register
3442 #endif
3443 const Register pos = rax;
3445 // keys 0-10 preloaded into xmm2-xmm12
3446 const int XMM_REG_NUM_KEY_FIRST = 5;
3447 const int XMM_REG_NUM_KEY_LAST = 15;
3448 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
3449 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST);
3451 __ enter(); // required for proper stackwalking of RuntimeStub frame
3453 #ifdef _WIN64
3454 // on win64, fill len_reg from stack position
3455 __ movl(len_reg, len_mem);
3456 // save the xmm registers which must be preserved 6-15
3457 __ subptr(rsp, -rsp_after_call_off * wordSize);
3458 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
3459 __ movdqu(xmm_save(i), as_XMMRegister(i));
3460 }
3461 #else
3462 __ push(len_reg); // Save
3463 #endif
3465 // the java expanded key ordering is rotated one position from what we want
3466 // so we start from 0x10 here and hit 0x00 last
3467 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front
3468 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
3469 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00
3470 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) {
3471 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
3472 offset += 0x10;
3473 }
3474 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask);
3476 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block
3478 // registers holding the four results in the parallelized loop
3479 const XMMRegister xmm_result0 = xmm0;
3480 const XMMRegister xmm_result1 = xmm2;
3481 const XMMRegister xmm_result2 = xmm3;
3482 const XMMRegister xmm_result3 = xmm4;
3484 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec
3486 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
3487 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
3488 __ cmpl(rax, 44);
3489 __ jcc(Assembler::notEqual, L_key_192_256);
3492 // 128-bit code follows here, parallelized
3493 __ movptr(pos, 0);
3494 __ align(OptoLoopAlignment);
3495 __ BIND(L_multiBlock_loopTop_128);
3496 __ cmpptr(len_reg, 4*AESBlockSize); // see if at least 4 blocks left
3497 __ jcc(Assembler::less, L_singleBlock_loopTop_128);
3499 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize)); // get next 4 blocks into xmmresult registers
3500 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize));
3501 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize));
3502 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize));
3504 #define DoFour(opc, src_reg) \
3505 __ opc(xmm_result0, src_reg); \
3506 __ opc(xmm_result1, src_reg); \
3507 __ opc(xmm_result2, src_reg); \
3508 __ opc(xmm_result3, src_reg);
3510 DoFour(pxor, xmm_key_first);
3511 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
3512 DoFour(aesdec, as_XMMRegister(rnum));
3513 }
3514 DoFour(aesdeclast, xmm_key_last);
3515 // for each result, xor with the r vector of previous cipher block
3516 __ pxor(xmm_result0, xmm_prev_block_cipher);
3517 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize));
3518 __ pxor(xmm_result1, xmm_prev_block_cipher);
3519 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize));
3520 __ pxor(xmm_result2, xmm_prev_block_cipher);
3521 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize));
3522 __ pxor(xmm_result3, xmm_prev_block_cipher);
3523 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize)); // this will carry over to next set of blocks
3525 __ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output
3526 __ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1);
3527 __ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2);
3528 __ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3);
3530 __ addptr(pos, 4*AESBlockSize);
3531 __ subptr(len_reg, 4*AESBlockSize);
3532 __ jmp(L_multiBlock_loopTop_128);
3534 // registers used in the non-parallelized loops
3535 // xmm register assignments for the loops below
3536 const XMMRegister xmm_result = xmm0;
3537 const XMMRegister xmm_prev_block_cipher_save = xmm2;
3538 const XMMRegister xmm_key11 = xmm3;
3539 const XMMRegister xmm_key12 = xmm4;
3540 const XMMRegister xmm_temp = xmm4;
3542 __ align(OptoLoopAlignment);
3543 __ BIND(L_singleBlock_loopTop_128);
3544 __ cmpptr(len_reg, 0); // any blocks left??
3545 __ jcc(Assembler::equal, L_exit);
3546 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
3547 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector
3548 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
3549 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
3550 __ aesdec(xmm_result, as_XMMRegister(rnum));
3551 }
3552 __ aesdeclast(xmm_result, xmm_key_last);
3553 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector
3554 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3555 // no need to store r to memory until we exit
3556 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block
3558 __ addptr(pos, AESBlockSize);
3559 __ subptr(len_reg, AESBlockSize);
3560 __ jmp(L_singleBlock_loopTop_128);
3563 __ BIND(L_exit);
3564 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object
3565 #ifdef _WIN64
3566 // restore regs belonging to calling function
3567 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
3568 __ movdqu(as_XMMRegister(i), xmm_save(i));
3569 }
3570 __ movl(rax, len_mem);
3571 #else
3572 __ pop(rax); // return length
3573 #endif
3574 __ leave(); // required for proper stackwalking of RuntimeStub frame
3575 __ ret(0);
3578 __ BIND(L_key_192_256);
3579 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
3580 load_key(xmm_key11, key, 0xb0);
3581 __ cmpl(rax, 52);
3582 __ jcc(Assembler::notEqual, L_key_256);
3584 // 192-bit code follows here (could be optimized to use parallelism)
3585 load_key(xmm_key12, key, 0xc0); // 192-bit key goes up to c0
3586 __ movptr(pos, 0);
3587 __ align(OptoLoopAlignment);
3589 __ BIND(L_singleBlock_loopTop_192);
3590 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
3591 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector
3592 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
3593 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
3594 __ aesdec(xmm_result, as_XMMRegister(rnum));
3595 }
3596 __ aesdec(xmm_result, xmm_key11);
3597 __ aesdec(xmm_result, xmm_key12);
3598 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0
3599 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector
3600 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3601 // no need to store r to memory until we exit
3602 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block
3603 __ addptr(pos, AESBlockSize);
3604 __ subptr(len_reg, AESBlockSize);
3605 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192);
3606 __ jmp(L_exit);
3608 __ BIND(L_key_256);
3609 // 256-bit code follows here (could be optimized to use parallelism)
3610 __ movptr(pos, 0);
3611 __ align(OptoLoopAlignment);
3613 __ BIND(L_singleBlock_loopTop_256);
3614 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
3615 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector
3616 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
3617 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
3618 __ aesdec(xmm_result, as_XMMRegister(rnum));
3619 }
3620 __ aesdec(xmm_result, xmm_key11);
3621 load_key(xmm_temp, key, 0xc0);
3622 __ aesdec(xmm_result, xmm_temp);
3623 load_key(xmm_temp, key, 0xd0);
3624 __ aesdec(xmm_result, xmm_temp);
3625 load_key(xmm_temp, key, 0xe0); // 256-bit key goes up to e0
3626 __ aesdec(xmm_result, xmm_temp);
3627 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 came from key+0
3628 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector
3629 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
3630 // no need to store r to memory until we exit
3631 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block
3632 __ addptr(pos, AESBlockSize);
3633 __ subptr(len_reg, AESBlockSize);
3634 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256);
3635 __ jmp(L_exit);
3637 return start;
3638 }
3640 /**
3641 * Arguments:
3642 *
3643 * Inputs:
3644 * c_rarg0 - int crc
3645 * c_rarg1 - byte* buf
3646 * c_rarg2 - int length
3647 *
3648 * Ouput:
3649 * rax - int crc result
3650 */
3651 address generate_updateBytesCRC32() {
3652 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions");
3654 __ align(CodeEntryAlignment);
3655 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32");
3657 address start = __ pc();
3658 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3659 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
3660 // rscratch1: r10
3661 const Register crc = c_rarg0; // crc
3662 const Register buf = c_rarg1; // source java byte array address
3663 const Register len = c_rarg2; // length
3664 const Register table = c_rarg3; // crc_table address (reuse register)
3665 const Register tmp = r11;
3666 assert_different_registers(crc, buf, len, table, tmp, rax);
3668 BLOCK_COMMENT("Entry:");
3669 __ enter(); // required for proper stackwalking of RuntimeStub frame
3671 __ kernel_crc32(crc, buf, len, table, tmp);
3673 __ movl(rax, crc);
3674 __ leave(); // required for proper stackwalking of RuntimeStub frame
3675 __ ret(0);
3677 return start;
3678 }
3681 /**
3682 * Arguments:
3683 *
3684 * Input:
3685 * c_rarg0 - x address
3686 * c_rarg1 - x length
3687 * c_rarg2 - y address
3688 * c_rarg3 - y lenth
3689 * not Win64
3690 * c_rarg4 - z address
3691 * c_rarg5 - z length
3692 * Win64
3693 * rsp+40 - z address
3694 * rsp+48 - z length
3695 */
3696 address generate_multiplyToLen() {
3697 __ align(CodeEntryAlignment);
3698 StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
3700 address start = __ pc();
3701 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
3702 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
3703 const Register x = rdi;
3704 const Register xlen = rax;
3705 const Register y = rsi;
3706 const Register ylen = rcx;
3707 const Register z = r8;
3708 const Register zlen = r11;
3710 // Next registers will be saved on stack in multiply_to_len().
3711 const Register tmp1 = r12;
3712 const Register tmp2 = r13;
3713 const Register tmp3 = r14;
3714 const Register tmp4 = r15;
3715 const Register tmp5 = rbx;
3717 BLOCK_COMMENT("Entry:");
3718 __ enter(); // required for proper stackwalking of RuntimeStub frame
3720 #ifndef _WIN64
3721 __ movptr(zlen, r9); // Save r9 in r11 - zlen
3722 #endif
3723 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx
3724 // ylen => rcx, z => r8, zlen => r11
3725 // r9 and r10 may be used to save non-volatile registers
3726 #ifdef _WIN64
3727 // last 2 arguments (#4, #5) are on stack on Win64
3728 __ movptr(z, Address(rsp, 6 * wordSize));
3729 __ movptr(zlen, Address(rsp, 7 * wordSize));
3730 #endif
3732 __ movptr(xlen, rsi);
3733 __ movptr(y, rdx);
3734 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5);
3736 restore_arg_regs();
3738 __ leave(); // required for proper stackwalking of RuntimeStub frame
3739 __ ret(0);
3741 return start;
3742 }
3744 #undef __
3745 #define __ masm->
3747 // Continuation point for throwing of implicit exceptions that are
3748 // not handled in the current activation. Fabricates an exception
3749 // oop and initiates normal exception dispatching in this
3750 // frame. Since we need to preserve callee-saved values (currently
3751 // only for C2, but done for C1 as well) we need a callee-saved oop
3752 // map and therefore have to make these stubs into RuntimeStubs
3753 // rather than BufferBlobs. If the compiler needs all registers to
3754 // be preserved between the fault point and the exception handler
3755 // then it must assume responsibility for that in
3756 // AbstractCompiler::continuation_for_implicit_null_exception or
3757 // continuation_for_implicit_division_by_zero_exception. All other
3758 // implicit exceptions (e.g., NullPointerException or
3759 // AbstractMethodError on entry) are either at call sites or
3760 // otherwise assume that stack unwinding will be initiated, so
3761 // caller saved registers were assumed volatile in the compiler.
3762 address generate_throw_exception(const char* name,
3763 address runtime_entry,
3764 Register arg1 = noreg,
3765 Register arg2 = noreg) {
3766 // Information about frame layout at time of blocking runtime call.
3767 // Note that we only have to preserve callee-saved registers since
3768 // the compilers are responsible for supplying a continuation point
3769 // if they expect all registers to be preserved.
3770 enum layout {
3771 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
3772 rbp_off2,
3773 return_off,
3774 return_off2,
3775 framesize // inclusive of return address
3776 };
3778 int insts_size = 512;
3779 int locs_size = 64;
3781 CodeBuffer code(name, insts_size, locs_size);
3782 OopMapSet* oop_maps = new OopMapSet();
3783 MacroAssembler* masm = new MacroAssembler(&code);
3785 address start = __ pc();
3787 // This is an inlined and slightly modified version of call_VM
3788 // which has the ability to fetch the return PC out of
3789 // thread-local storage and also sets up last_Java_sp slightly
3790 // differently than the real call_VM
3792 __ enter(); // required for proper stackwalking of RuntimeStub frame
3794 assert(is_even(framesize/2), "sp not 16-byte aligned");
3796 // return address and rbp are already in place
3797 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
3799 int frame_complete = __ pc() - start;
3801 // Set up last_Java_sp and last_Java_fp
3802 address the_pc = __ pc();
3803 __ set_last_Java_frame(rsp, rbp, the_pc);
3804 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
3806 // Call runtime
3807 if (arg1 != noreg) {
3808 assert(arg2 != c_rarg1, "clobbered");
3809 __ movptr(c_rarg1, arg1);
3810 }
3811 if (arg2 != noreg) {
3812 __ movptr(c_rarg2, arg2);
3813 }
3814 __ movptr(c_rarg0, r15_thread);
3815 BLOCK_COMMENT("call runtime_entry");
3816 __ call(RuntimeAddress(runtime_entry));
3818 // Generate oop map
3819 OopMap* map = new OopMap(framesize, 0);
3821 oop_maps->add_gc_map(the_pc - start, map);
3823 __ reset_last_Java_frame(true, true);
3825 __ leave(); // required for proper stackwalking of RuntimeStub frame
3827 // check for pending exceptions
3828 #ifdef ASSERT
3829 Label L;
3830 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()),
3831 (int32_t) NULL_WORD);
3832 __ jcc(Assembler::notEqual, L);
3833 __ should_not_reach_here();
3834 __ bind(L);
3835 #endif // ASSERT
3836 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3839 // codeBlob framesize is in words (not VMRegImpl::slot_size)
3840 RuntimeStub* stub =
3841 RuntimeStub::new_runtime_stub(name,
3842 &code,
3843 frame_complete,
3844 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3845 oop_maps, false);
3846 return stub->entry_point();
3847 }
3849 void create_control_words() {
3850 // Round to nearest, 53-bit mode, exceptions masked
3851 StubRoutines::_fpu_cntrl_wrd_std = 0x027F;
3852 // Round to zero, 53-bit mode, exception mased
3853 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F;
3854 // Round to nearest, 24-bit mode, exceptions masked
3855 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F;
3856 // Round to nearest, 64-bit mode, exceptions masked
3857 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F;
3858 // Round to nearest, 64-bit mode, exceptions masked
3859 StubRoutines::_mxcsr_std = 0x1F80;
3860 // Note: the following two constants are 80-bit values
3861 // layout is critical for correct loading by FPU.
3862 // Bias for strict fp multiply/divide
3863 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000
3864 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000;
3865 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff;
3866 // Un-Bias for strict fp multiply/divide
3867 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000
3868 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000;
3869 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff;
3870 }
3872 // Initialization
3873 void generate_initial() {
3874 // Generates all stubs and initializes the entry points
3876 // This platform-specific settings are needed by generate_call_stub()
3877 create_control_words();
3879 // entry points that exist in all platforms Note: This is code
3880 // that could be shared among different platforms - however the
3881 // benefit seems to be smaller than the disadvantage of having a
3882 // much more complicated generator structure. See also comment in
3883 // stubRoutines.hpp.
3885 StubRoutines::_forward_exception_entry = generate_forward_exception();
3887 StubRoutines::_call_stub_entry =
3888 generate_call_stub(StubRoutines::_call_stub_return_address);
3890 // is referenced by megamorphic call
3891 StubRoutines::_catch_exception_entry = generate_catch_exception();
3893 // atomic calls
3894 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
3895 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr();
3896 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
3897 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
3898 StubRoutines::_atomic_add_entry = generate_atomic_add();
3899 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
3900 StubRoutines::_fence_entry = generate_orderaccess_fence();
3902 StubRoutines::_handler_for_unsafe_access_entry =
3903 generate_handler_for_unsafe_access();
3905 // platform dependent
3906 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
3907 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
3909 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
3911 // Build this early so it's available for the interpreter.
3912 StubRoutines::_throw_StackOverflowError_entry =
3913 generate_throw_exception("StackOverflowError throw_exception",
3914 CAST_FROM_FN_PTR(address,
3915 SharedRuntime::
3916 throw_StackOverflowError));
3917 if (UseCRC32Intrinsics) {
3918 // set table address before stub generation which use it
3919 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
3920 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
3921 }
3922 }
3924 void generate_all() {
3925 // Generates all stubs and initializes the entry points
3927 // These entry points require SharedInfo::stack0 to be set up in
3928 // non-core builds and need to be relocatable, so they each
3929 // fabricate a RuntimeStub internally.
3930 StubRoutines::_throw_AbstractMethodError_entry =
3931 generate_throw_exception("AbstractMethodError throw_exception",
3932 CAST_FROM_FN_PTR(address,
3933 SharedRuntime::
3934 throw_AbstractMethodError));
3936 StubRoutines::_throw_IncompatibleClassChangeError_entry =
3937 generate_throw_exception("IncompatibleClassChangeError throw_exception",
3938 CAST_FROM_FN_PTR(address,
3939 SharedRuntime::
3940 throw_IncompatibleClassChangeError));
3942 StubRoutines::_throw_NullPointerException_at_call_entry =
3943 generate_throw_exception("NullPointerException at call throw_exception",
3944 CAST_FROM_FN_PTR(address,
3945 SharedRuntime::
3946 throw_NullPointerException_at_call));
3948 // entry points that are platform specific
3949 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
3950 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
3951 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
3952 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
3954 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
3955 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
3956 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
3957 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
3959 // support for verify_oop (must happen after universe_init)
3960 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
3962 // arraycopy stubs used by compilers
3963 generate_arraycopy_stubs();
3965 generate_math_stubs();
3967 // don't bother generating these AES intrinsic stubs unless global flag is set
3968 if (UseAESIntrinsics) {
3969 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others
3971 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
3972 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
3973 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
3974 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
3975 }
3977 // Safefetch stubs.
3978 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
3979 &StubRoutines::_safefetch32_fault_pc,
3980 &StubRoutines::_safefetch32_continuation_pc);
3981 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
3982 &StubRoutines::_safefetchN_fault_pc,
3983 &StubRoutines::_safefetchN_continuation_pc);
3984 #ifdef COMPILER2
3985 if (UseMultiplyToLenIntrinsic) {
3986 StubRoutines::_multiplyToLen = generate_multiplyToLen();
3987 }
3988 #endif
3989 }
3991 public:
3992 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3993 if (all) {
3994 generate_all();
3995 } else {
3996 generate_initial();
3997 }
3998 }
3999 }; // end class declaration
4001 void StubGenerator_generate(CodeBuffer* code, bool all) {
4002 StubGenerator g(code, all);
4003 }