Sun, 25 Sep 2011 16:03:29 -0700
7089790: integrate bsd-port changes
Reviewed-by: kvn, twisti, jrose
Contributed-by: Kurt Miller <kurt@intricatesoftware.com>, Greg Lewis <glewis@eyesbeyond.com>, Jung-uk Kim <jkim@freebsd.org>, Christos Zoulas <christos@zoulas.com>, Landon Fuller <landonf@plausible.coop>, The FreeBSD Foundation <board@freebsdfoundation.org>, Michael Franz <mvfranz@gmail.com>, Roger Hoover <rhoover@apple.com>, Alexander Strange <astrange@apple.com>
1 /*
2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "assembler_x86.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "nativeInst_x86.hpp"
30 #include "oops/instanceOop.hpp"
31 #include "oops/methodOop.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/frame.inline.hpp"
36 #include "runtime/handles.inline.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubCodeGenerator.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/top.hpp"
41 #ifdef TARGET_OS_FAMILY_linux
42 # include "thread_linux.inline.hpp"
43 #endif
44 #ifdef TARGET_OS_FAMILY_solaris
45 # include "thread_solaris.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_FAMILY_windows
48 # include "thread_windows.inline.hpp"
49 #endif
50 #ifdef TARGET_OS_FAMILY_bsd
51 # include "thread_bsd.inline.hpp"
52 #endif
53 #ifdef COMPILER2
54 #include "opto/runtime.hpp"
55 #endif
57 // Declaration and definition of StubGenerator (no .hpp file).
58 // For a more detailed description of the stub routine structure
59 // see the comment in stubRoutines.hpp
61 #define __ _masm->
62 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
63 #define a__ ((Assembler*)_masm)->
65 #ifdef PRODUCT
66 #define BLOCK_COMMENT(str) /* nothing */
67 #else
68 #define BLOCK_COMMENT(str) __ block_comment(str)
69 #endif
71 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
72 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
74 // Stub Code definitions
76 static address handle_unsafe_access() {
77 JavaThread* thread = JavaThread::current();
78 address pc = thread->saved_exception_pc();
79 // pc is the instruction which we must emulate
80 // doing a no-op is fine: return garbage from the load
81 // therefore, compute npc
82 address npc = Assembler::locate_next_instruction(pc);
84 // request an async exception
85 thread->set_pending_unsafe_access_error();
87 // return address of next instruction to execute
88 return npc;
89 }
91 class StubGenerator: public StubCodeGenerator {
92 private:
94 #ifdef PRODUCT
95 #define inc_counter_np(counter) (0)
96 #else
97 void inc_counter_np_(int& counter) {
98 __ incrementl(ExternalAddress((address)&counter));
99 }
100 #define inc_counter_np(counter) \
101 BLOCK_COMMENT("inc_counter " #counter); \
102 inc_counter_np_(counter);
103 #endif
105 // Call stubs are used to call Java from C
106 //
107 // Linux Arguments:
108 // c_rarg0: call wrapper address address
109 // c_rarg1: result address
110 // c_rarg2: result type BasicType
111 // c_rarg3: method methodOop
112 // c_rarg4: (interpreter) entry point address
113 // c_rarg5: parameters intptr_t*
114 // 16(rbp): parameter size (in words) int
115 // 24(rbp): thread Thread*
116 //
117 // [ return_from_Java ] <--- rsp
118 // [ argument word n ]
119 // ...
120 // -12 [ argument word 1 ]
121 // -11 [ saved r15 ] <--- rsp_after_call
122 // -10 [ saved r14 ]
123 // -9 [ saved r13 ]
124 // -8 [ saved r12 ]
125 // -7 [ saved rbx ]
126 // -6 [ call wrapper ]
127 // -5 [ result ]
128 // -4 [ result type ]
129 // -3 [ method ]
130 // -2 [ entry point ]
131 // -1 [ parameters ]
132 // 0 [ saved rbp ] <--- rbp
133 // 1 [ return address ]
134 // 2 [ parameter size ]
135 // 3 [ thread ]
136 //
137 // Windows Arguments:
138 // c_rarg0: call wrapper address address
139 // c_rarg1: result address
140 // c_rarg2: result type BasicType
141 // c_rarg3: method methodOop
142 // 48(rbp): (interpreter) entry point address
143 // 56(rbp): parameters intptr_t*
144 // 64(rbp): parameter size (in words) int
145 // 72(rbp): thread Thread*
146 //
147 // [ return_from_Java ] <--- rsp
148 // [ argument word n ]
149 // ...
150 // -28 [ argument word 1 ]
151 // -27 [ saved xmm15 ] <--- rsp_after_call
152 // [ saved xmm7-xmm14 ]
153 // -9 [ saved xmm6 ] (each xmm register takes 2 slots)
154 // -7 [ saved r15 ]
155 // -6 [ saved r14 ]
156 // -5 [ saved r13 ]
157 // -4 [ saved r12 ]
158 // -3 [ saved rdi ]
159 // -2 [ saved rsi ]
160 // -1 [ saved rbx ]
161 // 0 [ saved rbp ] <--- rbp
162 // 1 [ return address ]
163 // 2 [ call wrapper ]
164 // 3 [ result ]
165 // 4 [ result type ]
166 // 5 [ method ]
167 // 6 [ entry point ]
168 // 7 [ parameters ]
169 // 8 [ parameter size ]
170 // 9 [ thread ]
171 //
172 // Windows reserves the callers stack space for arguments 1-4.
173 // We spill c_rarg0-c_rarg3 to this space.
175 // Call stub stack layout word offsets from rbp
176 enum call_stub_layout {
177 #ifdef _WIN64
178 xmm_save_first = 6, // save from xmm6
179 xmm_save_last = 15, // to xmm15
180 xmm_save_base = -9,
181 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27
182 r15_off = -7,
183 r14_off = -6,
184 r13_off = -5,
185 r12_off = -4,
186 rdi_off = -3,
187 rsi_off = -2,
188 rbx_off = -1,
189 rbp_off = 0,
190 retaddr_off = 1,
191 call_wrapper_off = 2,
192 result_off = 3,
193 result_type_off = 4,
194 method_off = 5,
195 entry_point_off = 6,
196 parameters_off = 7,
197 parameter_size_off = 8,
198 thread_off = 9
199 #else
200 rsp_after_call_off = -12,
201 mxcsr_off = rsp_after_call_off,
202 r15_off = -11,
203 r14_off = -10,
204 r13_off = -9,
205 r12_off = -8,
206 rbx_off = -7,
207 call_wrapper_off = -6,
208 result_off = -5,
209 result_type_off = -4,
210 method_off = -3,
211 entry_point_off = -2,
212 parameters_off = -1,
213 rbp_off = 0,
214 retaddr_off = 1,
215 parameter_size_off = 2,
216 thread_off = 3
217 #endif
218 };
220 #ifdef _WIN64
221 Address xmm_save(int reg) {
222 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range");
223 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize);
224 }
225 #endif
227 address generate_call_stub(address& return_address) {
228 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
229 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
230 "adjust this code");
231 StubCodeMark mark(this, "StubRoutines", "call_stub");
232 address start = __ pc();
234 // same as in generate_catch_exception()!
235 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
237 const Address call_wrapper (rbp, call_wrapper_off * wordSize);
238 const Address result (rbp, result_off * wordSize);
239 const Address result_type (rbp, result_type_off * wordSize);
240 const Address method (rbp, method_off * wordSize);
241 const Address entry_point (rbp, entry_point_off * wordSize);
242 const Address parameters (rbp, parameters_off * wordSize);
243 const Address parameter_size(rbp, parameter_size_off * wordSize);
245 // same as in generate_catch_exception()!
246 const Address thread (rbp, thread_off * wordSize);
248 const Address r15_save(rbp, r15_off * wordSize);
249 const Address r14_save(rbp, r14_off * wordSize);
250 const Address r13_save(rbp, r13_off * wordSize);
251 const Address r12_save(rbp, r12_off * wordSize);
252 const Address rbx_save(rbp, rbx_off * wordSize);
254 // stub code
255 __ enter();
256 __ subptr(rsp, -rsp_after_call_off * wordSize);
258 // save register parameters
259 #ifndef _WIN64
260 __ movptr(parameters, c_rarg5); // parameters
261 __ movptr(entry_point, c_rarg4); // entry_point
262 #endif
264 __ movptr(method, c_rarg3); // method
265 __ movl(result_type, c_rarg2); // result type
266 __ movptr(result, c_rarg1); // result
267 __ movptr(call_wrapper, c_rarg0); // call wrapper
269 // save regs belonging to calling function
270 __ movptr(rbx_save, rbx);
271 __ movptr(r12_save, r12);
272 __ movptr(r13_save, r13);
273 __ movptr(r14_save, r14);
274 __ movptr(r15_save, r15);
275 #ifdef _WIN64
276 for (int i = 6; i <= 15; i++) {
277 __ movdqu(xmm_save(i), as_XMMRegister(i));
278 }
280 const Address rdi_save(rbp, rdi_off * wordSize);
281 const Address rsi_save(rbp, rsi_off * wordSize);
283 __ movptr(rsi_save, rsi);
284 __ movptr(rdi_save, rdi);
285 #else
286 const Address mxcsr_save(rbp, mxcsr_off * wordSize);
287 {
288 Label skip_ldmx;
289 __ stmxcsr(mxcsr_save);
290 __ movl(rax, mxcsr_save);
291 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
292 ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std());
293 __ cmp32(rax, mxcsr_std);
294 __ jcc(Assembler::equal, skip_ldmx);
295 __ ldmxcsr(mxcsr_std);
296 __ bind(skip_ldmx);
297 }
298 #endif
300 // Load up thread register
301 __ movptr(r15_thread, thread);
302 __ reinit_heapbase();
304 #ifdef ASSERT
305 // make sure we have no pending exceptions
306 {
307 Label L;
308 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
309 __ jcc(Assembler::equal, L);
310 __ stop("StubRoutines::call_stub: entered with pending exception");
311 __ bind(L);
312 }
313 #endif
315 // pass parameters if any
316 BLOCK_COMMENT("pass parameters if any");
317 Label parameters_done;
318 __ movl(c_rarg3, parameter_size);
319 __ testl(c_rarg3, c_rarg3);
320 __ jcc(Assembler::zero, parameters_done);
322 Label loop;
323 __ movptr(c_rarg2, parameters); // parameter pointer
324 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
325 __ BIND(loop);
326 __ movptr(rax, Address(c_rarg2, 0));// get parameter
327 __ addptr(c_rarg2, wordSize); // advance to next parameter
328 __ decrementl(c_rarg1); // decrement counter
329 __ push(rax); // pass parameter
330 __ jcc(Assembler::notZero, loop);
332 // call Java function
333 __ BIND(parameters_done);
334 __ movptr(rbx, method); // get methodOop
335 __ movptr(c_rarg1, entry_point); // get entry_point
336 __ mov(r13, rsp); // set sender sp
337 BLOCK_COMMENT("call Java function");
338 __ call(c_rarg1);
340 BLOCK_COMMENT("call_stub_return_address:");
341 return_address = __ pc();
343 // store result depending on type (everything that is not
344 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
345 __ movptr(c_rarg0, result);
346 Label is_long, is_float, is_double, exit;
347 __ movl(c_rarg1, result_type);
348 __ cmpl(c_rarg1, T_OBJECT);
349 __ jcc(Assembler::equal, is_long);
350 __ cmpl(c_rarg1, T_LONG);
351 __ jcc(Assembler::equal, is_long);
352 __ cmpl(c_rarg1, T_FLOAT);
353 __ jcc(Assembler::equal, is_float);
354 __ cmpl(c_rarg1, T_DOUBLE);
355 __ jcc(Assembler::equal, is_double);
357 // handle T_INT case
358 __ movl(Address(c_rarg0, 0), rax);
360 __ BIND(exit);
362 // pop parameters
363 __ lea(rsp, rsp_after_call);
365 #ifdef ASSERT
366 // verify that threads correspond
367 {
368 Label L, S;
369 __ cmpptr(r15_thread, thread);
370 __ jcc(Assembler::notEqual, S);
371 __ get_thread(rbx);
372 __ cmpptr(r15_thread, rbx);
373 __ jcc(Assembler::equal, L);
374 __ bind(S);
375 __ jcc(Assembler::equal, L);
376 __ stop("StubRoutines::call_stub: threads must correspond");
377 __ bind(L);
378 }
379 #endif
381 // restore regs belonging to calling function
382 #ifdef _WIN64
383 for (int i = 15; i >= 6; i--) {
384 __ movdqu(as_XMMRegister(i), xmm_save(i));
385 }
386 #endif
387 __ movptr(r15, r15_save);
388 __ movptr(r14, r14_save);
389 __ movptr(r13, r13_save);
390 __ movptr(r12, r12_save);
391 __ movptr(rbx, rbx_save);
393 #ifdef _WIN64
394 __ movptr(rdi, rdi_save);
395 __ movptr(rsi, rsi_save);
396 #else
397 __ ldmxcsr(mxcsr_save);
398 #endif
400 // restore rsp
401 __ addptr(rsp, -rsp_after_call_off * wordSize);
403 // return
404 __ pop(rbp);
405 __ ret(0);
407 // handle return types different from T_INT
408 __ BIND(is_long);
409 __ movq(Address(c_rarg0, 0), rax);
410 __ jmp(exit);
412 __ BIND(is_float);
413 __ movflt(Address(c_rarg0, 0), xmm0);
414 __ jmp(exit);
416 __ BIND(is_double);
417 __ movdbl(Address(c_rarg0, 0), xmm0);
418 __ jmp(exit);
420 return start;
421 }
423 // Return point for a Java call if there's an exception thrown in
424 // Java code. The exception is caught and transformed into a
425 // pending exception stored in JavaThread that can be tested from
426 // within the VM.
427 //
428 // Note: Usually the parameters are removed by the callee. In case
429 // of an exception crossing an activation frame boundary, that is
430 // not the case if the callee is compiled code => need to setup the
431 // rsp.
432 //
433 // rax: exception oop
435 address generate_catch_exception() {
436 StubCodeMark mark(this, "StubRoutines", "catch_exception");
437 address start = __ pc();
439 // same as in generate_call_stub():
440 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
441 const Address thread (rbp, thread_off * wordSize);
443 #ifdef ASSERT
444 // verify that threads correspond
445 {
446 Label L, S;
447 __ cmpptr(r15_thread, thread);
448 __ jcc(Assembler::notEqual, S);
449 __ get_thread(rbx);
450 __ cmpptr(r15_thread, rbx);
451 __ jcc(Assembler::equal, L);
452 __ bind(S);
453 __ stop("StubRoutines::catch_exception: threads must correspond");
454 __ bind(L);
455 }
456 #endif
458 // set pending exception
459 __ verify_oop(rax);
461 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
462 __ lea(rscratch1, ExternalAddress((address)__FILE__));
463 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
464 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
466 // complete return to VM
467 assert(StubRoutines::_call_stub_return_address != NULL,
468 "_call_stub_return_address must have been generated before");
469 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
471 return start;
472 }
474 // Continuation point for runtime calls returning with a pending
475 // exception. The pending exception check happened in the runtime
476 // or native call stub. The pending exception in Thread is
477 // converted into a Java-level exception.
478 //
479 // Contract with Java-level exception handlers:
480 // rax: exception
481 // rdx: throwing pc
482 //
483 // NOTE: At entry of this stub, exception-pc must be on stack !!
485 address generate_forward_exception() {
486 StubCodeMark mark(this, "StubRoutines", "forward exception");
487 address start = __ pc();
489 // Upon entry, the sp points to the return address returning into
490 // Java (interpreted or compiled) code; i.e., the return address
491 // becomes the throwing pc.
492 //
493 // Arguments pushed before the runtime call are still on the stack
494 // but the exception handler will reset the stack pointer ->
495 // ignore them. A potential result in registers can be ignored as
496 // well.
498 #ifdef ASSERT
499 // make sure this code is only executed if there is a pending exception
500 {
501 Label L;
502 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL);
503 __ jcc(Assembler::notEqual, L);
504 __ stop("StubRoutines::forward exception: no pending exception (1)");
505 __ bind(L);
506 }
507 #endif
509 // compute exception handler into rbx
510 __ movptr(c_rarg0, Address(rsp, 0));
511 BLOCK_COMMENT("call exception_handler_for_return_address");
512 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
513 SharedRuntime::exception_handler_for_return_address),
514 r15_thread, c_rarg0);
515 __ mov(rbx, rax);
517 // setup rax & rdx, remove return address & clear pending exception
518 __ pop(rdx);
519 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
520 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
522 #ifdef ASSERT
523 // make sure exception is set
524 {
525 Label L;
526 __ testptr(rax, rax);
527 __ jcc(Assembler::notEqual, L);
528 __ stop("StubRoutines::forward exception: no pending exception (2)");
529 __ bind(L);
530 }
531 #endif
533 // continue at exception handler (return address removed)
534 // rax: exception
535 // rbx: exception handler
536 // rdx: throwing pc
537 __ verify_oop(rax);
538 __ jmp(rbx);
540 return start;
541 }
543 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
544 //
545 // Arguments :
546 // c_rarg0: exchange_value
547 // c_rarg0: dest
548 //
549 // Result:
550 // *dest <- ex, return (orig *dest)
551 address generate_atomic_xchg() {
552 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
553 address start = __ pc();
555 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
556 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
557 __ ret(0);
559 return start;
560 }
562 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
563 //
564 // Arguments :
565 // c_rarg0: exchange_value
566 // c_rarg1: dest
567 //
568 // Result:
569 // *dest <- ex, return (orig *dest)
570 address generate_atomic_xchg_ptr() {
571 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
572 address start = __ pc();
574 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
575 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
576 __ ret(0);
578 return start;
579 }
581 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
582 // jint compare_value)
583 //
584 // Arguments :
585 // c_rarg0: exchange_value
586 // c_rarg1: dest
587 // c_rarg2: compare_value
588 //
589 // Result:
590 // if ( compare_value == *dest ) {
591 // *dest = exchange_value
592 // return compare_value;
593 // else
594 // return *dest;
595 address generate_atomic_cmpxchg() {
596 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
597 address start = __ pc();
599 __ movl(rax, c_rarg2);
600 if ( os::is_MP() ) __ lock();
601 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
602 __ ret(0);
604 return start;
605 }
607 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
608 // volatile jlong* dest,
609 // jlong compare_value)
610 // Arguments :
611 // c_rarg0: exchange_value
612 // c_rarg1: dest
613 // c_rarg2: compare_value
614 //
615 // Result:
616 // if ( compare_value == *dest ) {
617 // *dest = exchange_value
618 // return compare_value;
619 // else
620 // return *dest;
621 address generate_atomic_cmpxchg_long() {
622 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
623 address start = __ pc();
625 __ movq(rax, c_rarg2);
626 if ( os::is_MP() ) __ lock();
627 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
628 __ ret(0);
630 return start;
631 }
633 // Support for jint atomic::add(jint add_value, volatile jint* dest)
634 //
635 // Arguments :
636 // c_rarg0: add_value
637 // c_rarg1: dest
638 //
639 // Result:
640 // *dest += add_value
641 // return *dest;
642 address generate_atomic_add() {
643 StubCodeMark mark(this, "StubRoutines", "atomic_add");
644 address start = __ pc();
646 __ movl(rax, c_rarg0);
647 if ( os::is_MP() ) __ lock();
648 __ xaddl(Address(c_rarg1, 0), c_rarg0);
649 __ addl(rax, c_rarg0);
650 __ ret(0);
652 return start;
653 }
655 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
656 //
657 // Arguments :
658 // c_rarg0: add_value
659 // c_rarg1: dest
660 //
661 // Result:
662 // *dest += add_value
663 // return *dest;
664 address generate_atomic_add_ptr() {
665 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
666 address start = __ pc();
668 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
669 if ( os::is_MP() ) __ lock();
670 __ xaddptr(Address(c_rarg1, 0), c_rarg0);
671 __ addptr(rax, c_rarg0);
672 __ ret(0);
674 return start;
675 }
677 // Support for intptr_t OrderAccess::fence()
678 //
679 // Arguments :
680 //
681 // Result:
682 address generate_orderaccess_fence() {
683 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
684 address start = __ pc();
685 __ membar(Assembler::StoreLoad);
686 __ ret(0);
688 return start;
689 }
691 // Support for intptr_t get_previous_fp()
692 //
693 // This routine is used to find the previous frame pointer for the
694 // caller (current_frame_guess). This is used as part of debugging
695 // ps() is seemingly lost trying to find frames.
696 // This code assumes that caller current_frame_guess) has a frame.
697 address generate_get_previous_fp() {
698 StubCodeMark mark(this, "StubRoutines", "get_previous_fp");
699 const Address old_fp(rbp, 0);
700 const Address older_fp(rax, 0);
701 address start = __ pc();
703 __ enter();
704 __ movptr(rax, old_fp); // callers fp
705 __ movptr(rax, older_fp); // the frame for ps()
706 __ pop(rbp);
707 __ ret(0);
709 return start;
710 }
712 //----------------------------------------------------------------------------------------------------
713 // Support for void verify_mxcsr()
714 //
715 // This routine is used with -Xcheck:jni to verify that native
716 // JNI code does not return to Java code without restoring the
717 // MXCSR register to our expected state.
719 address generate_verify_mxcsr() {
720 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
721 address start = __ pc();
723 const Address mxcsr_save(rsp, 0);
725 if (CheckJNICalls) {
726 Label ok_ret;
727 __ push(rax);
728 __ subptr(rsp, wordSize); // allocate a temp location
729 __ stmxcsr(mxcsr_save);
730 __ movl(rax, mxcsr_save);
731 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
732 __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std()));
733 __ jcc(Assembler::equal, ok_ret);
735 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
737 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
739 __ bind(ok_ret);
740 __ addptr(rsp, wordSize);
741 __ pop(rax);
742 }
744 __ ret(0);
746 return start;
747 }
749 address generate_f2i_fixup() {
750 StubCodeMark mark(this, "StubRoutines", "f2i_fixup");
751 Address inout(rsp, 5 * wordSize); // return address + 4 saves
753 address start = __ pc();
755 Label L;
757 __ push(rax);
758 __ push(c_rarg3);
759 __ push(c_rarg2);
760 __ push(c_rarg1);
762 __ movl(rax, 0x7f800000);
763 __ xorl(c_rarg3, c_rarg3);
764 __ movl(c_rarg2, inout);
765 __ movl(c_rarg1, c_rarg2);
766 __ andl(c_rarg1, 0x7fffffff);
767 __ cmpl(rax, c_rarg1); // NaN? -> 0
768 __ jcc(Assembler::negative, L);
769 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
770 __ movl(c_rarg3, 0x80000000);
771 __ movl(rax, 0x7fffffff);
772 __ cmovl(Assembler::positive, c_rarg3, rax);
774 __ bind(L);
775 __ movptr(inout, c_rarg3);
777 __ pop(c_rarg1);
778 __ pop(c_rarg2);
779 __ pop(c_rarg3);
780 __ pop(rax);
782 __ ret(0);
784 return start;
785 }
787 address generate_f2l_fixup() {
788 StubCodeMark mark(this, "StubRoutines", "f2l_fixup");
789 Address inout(rsp, 5 * wordSize); // return address + 4 saves
790 address start = __ pc();
792 Label L;
794 __ push(rax);
795 __ push(c_rarg3);
796 __ push(c_rarg2);
797 __ push(c_rarg1);
799 __ movl(rax, 0x7f800000);
800 __ xorl(c_rarg3, c_rarg3);
801 __ movl(c_rarg2, inout);
802 __ movl(c_rarg1, c_rarg2);
803 __ andl(c_rarg1, 0x7fffffff);
804 __ cmpl(rax, c_rarg1); // NaN? -> 0
805 __ jcc(Assembler::negative, L);
806 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
807 __ mov64(c_rarg3, 0x8000000000000000);
808 __ mov64(rax, 0x7fffffffffffffff);
809 __ cmov(Assembler::positive, c_rarg3, rax);
811 __ bind(L);
812 __ movptr(inout, c_rarg3);
814 __ pop(c_rarg1);
815 __ pop(c_rarg2);
816 __ pop(c_rarg3);
817 __ pop(rax);
819 __ ret(0);
821 return start;
822 }
824 address generate_d2i_fixup() {
825 StubCodeMark mark(this, "StubRoutines", "d2i_fixup");
826 Address inout(rsp, 6 * wordSize); // return address + 5 saves
828 address start = __ pc();
830 Label L;
832 __ push(rax);
833 __ push(c_rarg3);
834 __ push(c_rarg2);
835 __ push(c_rarg1);
836 __ push(c_rarg0);
838 __ movl(rax, 0x7ff00000);
839 __ movq(c_rarg2, inout);
840 __ movl(c_rarg3, c_rarg2);
841 __ mov(c_rarg1, c_rarg2);
842 __ mov(c_rarg0, c_rarg2);
843 __ negl(c_rarg3);
844 __ shrptr(c_rarg1, 0x20);
845 __ orl(c_rarg3, c_rarg2);
846 __ andl(c_rarg1, 0x7fffffff);
847 __ xorl(c_rarg2, c_rarg2);
848 __ shrl(c_rarg3, 0x1f);
849 __ orl(c_rarg1, c_rarg3);
850 __ cmpl(rax, c_rarg1);
851 __ jcc(Assembler::negative, L); // NaN -> 0
852 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
853 __ movl(c_rarg2, 0x80000000);
854 __ movl(rax, 0x7fffffff);
855 __ cmov(Assembler::positive, c_rarg2, rax);
857 __ bind(L);
858 __ movptr(inout, c_rarg2);
860 __ pop(c_rarg0);
861 __ pop(c_rarg1);
862 __ pop(c_rarg2);
863 __ pop(c_rarg3);
864 __ pop(rax);
866 __ ret(0);
868 return start;
869 }
871 address generate_d2l_fixup() {
872 StubCodeMark mark(this, "StubRoutines", "d2l_fixup");
873 Address inout(rsp, 6 * wordSize); // return address + 5 saves
875 address start = __ pc();
877 Label L;
879 __ push(rax);
880 __ push(c_rarg3);
881 __ push(c_rarg2);
882 __ push(c_rarg1);
883 __ push(c_rarg0);
885 __ movl(rax, 0x7ff00000);
886 __ movq(c_rarg2, inout);
887 __ movl(c_rarg3, c_rarg2);
888 __ mov(c_rarg1, c_rarg2);
889 __ mov(c_rarg0, c_rarg2);
890 __ negl(c_rarg3);
891 __ shrptr(c_rarg1, 0x20);
892 __ orl(c_rarg3, c_rarg2);
893 __ andl(c_rarg1, 0x7fffffff);
894 __ xorl(c_rarg2, c_rarg2);
895 __ shrl(c_rarg3, 0x1f);
896 __ orl(c_rarg1, c_rarg3);
897 __ cmpl(rax, c_rarg1);
898 __ jcc(Assembler::negative, L); // NaN -> 0
899 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
900 __ mov64(c_rarg2, 0x8000000000000000);
901 __ mov64(rax, 0x7fffffffffffffff);
902 __ cmovq(Assembler::positive, c_rarg2, rax);
904 __ bind(L);
905 __ movq(inout, c_rarg2);
907 __ pop(c_rarg0);
908 __ pop(c_rarg1);
909 __ pop(c_rarg2);
910 __ pop(c_rarg3);
911 __ pop(rax);
913 __ ret(0);
915 return start;
916 }
918 address generate_fp_mask(const char *stub_name, int64_t mask) {
919 __ align(CodeEntryAlignment);
920 StubCodeMark mark(this, "StubRoutines", stub_name);
921 address start = __ pc();
923 __ emit_data64( mask, relocInfo::none );
924 __ emit_data64( mask, relocInfo::none );
926 return start;
927 }
929 // The following routine generates a subroutine to throw an
930 // asynchronous UnknownError when an unsafe access gets a fault that
931 // could not be reasonably prevented by the programmer. (Example:
932 // SIGBUS/OBJERR.)
933 address generate_handler_for_unsafe_access() {
934 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
935 address start = __ pc();
937 __ push(0); // hole for return address-to-be
938 __ pusha(); // push registers
939 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
941 // FIXME: this probably needs alignment logic
943 __ subptr(rsp, frame::arg_reg_save_area_bytes);
944 BLOCK_COMMENT("call handle_unsafe_access");
945 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
946 __ addptr(rsp, frame::arg_reg_save_area_bytes);
948 __ movptr(next_pc, rax); // stuff next address
949 __ popa();
950 __ ret(0); // jump to next address
952 return start;
953 }
955 // Non-destructive plausibility checks for oops
956 //
957 // Arguments:
958 // all args on stack!
959 //
960 // Stack after saving c_rarg3:
961 // [tos + 0]: saved c_rarg3
962 // [tos + 1]: saved c_rarg2
963 // [tos + 2]: saved r12 (several TemplateTable methods use it)
964 // [tos + 3]: saved flags
965 // [tos + 4]: return address
966 // * [tos + 5]: error message (char*)
967 // * [tos + 6]: object to verify (oop)
968 // * [tos + 7]: saved rax - saved by caller and bashed
969 // * [tos + 8]: saved r10 (rscratch1) - saved by caller
970 // * = popped on exit
971 address generate_verify_oop() {
972 StubCodeMark mark(this, "StubRoutines", "verify_oop");
973 address start = __ pc();
975 Label exit, error;
977 __ pushf();
978 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
980 __ push(r12);
982 // save c_rarg2 and c_rarg3
983 __ push(c_rarg2);
984 __ push(c_rarg3);
986 enum {
987 // After previous pushes.
988 oop_to_verify = 6 * wordSize,
989 saved_rax = 7 * wordSize,
990 saved_r10 = 8 * wordSize,
992 // Before the call to MacroAssembler::debug(), see below.
993 return_addr = 16 * wordSize,
994 error_msg = 17 * wordSize
995 };
997 // get object
998 __ movptr(rax, Address(rsp, oop_to_verify));
1000 // make sure object is 'reasonable'
1001 __ testptr(rax, rax);
1002 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
1003 // Check if the oop is in the right area of memory
1004 __ movptr(c_rarg2, rax);
1005 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
1006 __ andptr(c_rarg2, c_rarg3);
1007 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits());
1008 __ cmpptr(c_rarg2, c_rarg3);
1009 __ jcc(Assembler::notZero, error);
1011 // set r12 to heapbase for load_klass()
1012 __ reinit_heapbase();
1014 // make sure klass is 'reasonable'
1015 __ load_klass(rax, rax); // get klass
1016 __ testptr(rax, rax);
1017 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
1018 // Check if the klass is in the right area of memory
1019 __ mov(c_rarg2, rax);
1020 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
1021 __ andptr(c_rarg2, c_rarg3);
1022 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
1023 __ cmpptr(c_rarg2, c_rarg3);
1024 __ jcc(Assembler::notZero, error);
1026 // make sure klass' klass is 'reasonable'
1027 __ load_klass(rax, rax);
1028 __ testptr(rax, rax);
1029 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
1030 // Check if the klass' klass is in the right area of memory
1031 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
1032 __ andptr(rax, c_rarg3);
1033 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
1034 __ cmpptr(rax, c_rarg3);
1035 __ jcc(Assembler::notZero, error);
1037 // return if everything seems ok
1038 __ bind(exit);
1039 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1040 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1041 __ pop(c_rarg3); // restore c_rarg3
1042 __ pop(c_rarg2); // restore c_rarg2
1043 __ pop(r12); // restore r12
1044 __ popf(); // restore flags
1045 __ ret(4 * wordSize); // pop caller saved stuff
1047 // handle errors
1048 __ bind(error);
1049 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1050 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1051 __ pop(c_rarg3); // get saved c_rarg3 back
1052 __ pop(c_rarg2); // get saved c_rarg2 back
1053 __ pop(r12); // get saved r12 back
1054 __ popf(); // get saved flags off stack --
1055 // will be ignored
1057 __ pusha(); // push registers
1058 // (rip is already
1059 // already pushed)
1060 // debug(char* msg, int64_t pc, int64_t regs[])
1061 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
1062 // pushed all the registers, so now the stack looks like:
1063 // [tos + 0] 16 saved registers
1064 // [tos + 16] return address
1065 // * [tos + 17] error message (char*)
1066 // * [tos + 18] object to verify (oop)
1067 // * [tos + 19] saved rax - saved by caller and bashed
1068 // * [tos + 20] saved r10 (rscratch1) - saved by caller
1069 // * = popped on exit
1071 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
1072 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address
1073 __ movq(c_rarg2, rsp); // pass address of regs on stack
1074 __ mov(r12, rsp); // remember rsp
1075 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1076 __ andptr(rsp, -16); // align stack as required by ABI
1077 BLOCK_COMMENT("call MacroAssembler::debug");
1078 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1079 __ mov(rsp, r12); // restore rsp
1080 __ popa(); // pop registers (includes r12)
1081 __ ret(4 * wordSize); // pop caller saved stuff
1083 return start;
1084 }
1086 //
1087 // Verify that a register contains clean 32-bits positive value
1088 // (high 32-bits are 0) so it could be used in 64-bits shifts.
1089 //
1090 // Input:
1091 // Rint - 32-bits value
1092 // Rtmp - scratch
1093 //
1094 void assert_clean_int(Register Rint, Register Rtmp) {
1095 #ifdef ASSERT
1096 Label L;
1097 assert_different_registers(Rtmp, Rint);
1098 __ movslq(Rtmp, Rint);
1099 __ cmpq(Rtmp, Rint);
1100 __ jcc(Assembler::equal, L);
1101 __ stop("high 32-bits of int value are not 0");
1102 __ bind(L);
1103 #endif
1104 }
1106 // Generate overlap test for array copy stubs
1107 //
1108 // Input:
1109 // c_rarg0 - from
1110 // c_rarg1 - to
1111 // c_rarg2 - element count
1112 //
1113 // Output:
1114 // rax - &from[element count - 1]
1115 //
1116 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
1117 assert(no_overlap_target != NULL, "must be generated");
1118 array_overlap_test(no_overlap_target, NULL, sf);
1119 }
1120 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
1121 array_overlap_test(NULL, &L_no_overlap, sf);
1122 }
1123 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) {
1124 const Register from = c_rarg0;
1125 const Register to = c_rarg1;
1126 const Register count = c_rarg2;
1127 const Register end_from = rax;
1129 __ cmpptr(to, from);
1130 __ lea(end_from, Address(from, count, sf, 0));
1131 if (NOLp == NULL) {
1132 ExternalAddress no_overlap(no_overlap_target);
1133 __ jump_cc(Assembler::belowEqual, no_overlap);
1134 __ cmpptr(to, end_from);
1135 __ jump_cc(Assembler::aboveEqual, no_overlap);
1136 } else {
1137 __ jcc(Assembler::belowEqual, (*NOLp));
1138 __ cmpptr(to, end_from);
1139 __ jcc(Assembler::aboveEqual, (*NOLp));
1140 }
1141 }
1143 // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1144 //
1145 // Outputs:
1146 // rdi - rcx
1147 // rsi - rdx
1148 // rdx - r8
1149 // rcx - r9
1150 //
1151 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1152 // are non-volatile. r9 and r10 should not be used by the caller.
1153 //
1154 void setup_arg_regs(int nargs = 3) {
1155 const Register saved_rdi = r9;
1156 const Register saved_rsi = r10;
1157 assert(nargs == 3 || nargs == 4, "else fix");
1158 #ifdef _WIN64
1159 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1160 "unexpected argument registers");
1161 if (nargs >= 4)
1162 __ mov(rax, r9); // r9 is also saved_rdi
1163 __ movptr(saved_rdi, rdi);
1164 __ movptr(saved_rsi, rsi);
1165 __ mov(rdi, rcx); // c_rarg0
1166 __ mov(rsi, rdx); // c_rarg1
1167 __ mov(rdx, r8); // c_rarg2
1168 if (nargs >= 4)
1169 __ mov(rcx, rax); // c_rarg3 (via rax)
1170 #else
1171 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1172 "unexpected argument registers");
1173 #endif
1174 }
1176 void restore_arg_regs() {
1177 const Register saved_rdi = r9;
1178 const Register saved_rsi = r10;
1179 #ifdef _WIN64
1180 __ movptr(rdi, saved_rdi);
1181 __ movptr(rsi, saved_rsi);
1182 #endif
1183 }
1185 // Generate code for an array write pre barrier
1186 //
1187 // addr - starting address
1188 // count - element count
1189 // tmp - scratch register
1190 //
1191 // Destroy no registers!
1192 //
1193 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
1194 BarrierSet* bs = Universe::heap()->barrier_set();
1195 switch (bs->kind()) {
1196 case BarrierSet::G1SATBCT:
1197 case BarrierSet::G1SATBCTLogging:
1198 // With G1, don't generate the call if we statically know that the target in uninitialized
1199 if (!dest_uninitialized) {
1200 __ pusha(); // push registers
1201 if (count == c_rarg0) {
1202 if (addr == c_rarg1) {
1203 // exactly backwards!!
1204 __ xchgptr(c_rarg1, c_rarg0);
1205 } else {
1206 __ movptr(c_rarg1, count);
1207 __ movptr(c_rarg0, addr);
1208 }
1209 } else {
1210 __ movptr(c_rarg0, addr);
1211 __ movptr(c_rarg1, count);
1212 }
1213 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
1214 __ popa();
1215 }
1216 break;
1217 case BarrierSet::CardTableModRef:
1218 case BarrierSet::CardTableExtension:
1219 case BarrierSet::ModRef:
1220 break;
1221 default:
1222 ShouldNotReachHere();
1224 }
1225 }
1227 //
1228 // Generate code for an array write post barrier
1229 //
1230 // Input:
1231 // start - register containing starting address of destination array
1232 // end - register containing ending address of destination array
1233 // scratch - scratch register
1234 //
1235 // The input registers are overwritten.
1236 // The ending address is inclusive.
1237 void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
1238 assert_different_registers(start, end, scratch);
1239 BarrierSet* bs = Universe::heap()->barrier_set();
1240 switch (bs->kind()) {
1241 case BarrierSet::G1SATBCT:
1242 case BarrierSet::G1SATBCTLogging:
1244 {
1245 __ pusha(); // push registers (overkill)
1246 // must compute element count unless barrier set interface is changed (other platforms supply count)
1247 assert_different_registers(start, end, scratch);
1248 __ lea(scratch, Address(end, BytesPerHeapOop));
1249 __ subptr(scratch, start); // subtract start to get #bytes
1250 __ shrptr(scratch, LogBytesPerHeapOop); // convert to element count
1251 __ mov(c_rarg0, start);
1252 __ mov(c_rarg1, scratch);
1253 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
1254 __ popa();
1255 }
1256 break;
1257 case BarrierSet::CardTableModRef:
1258 case BarrierSet::CardTableExtension:
1259 {
1260 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1261 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1263 Label L_loop;
1265 __ shrptr(start, CardTableModRefBS::card_shift);
1266 __ addptr(end, BytesPerHeapOop);
1267 __ shrptr(end, CardTableModRefBS::card_shift);
1268 __ subptr(end, start); // number of bytes to copy
1270 intptr_t disp = (intptr_t) ct->byte_map_base;
1271 if (__ is_simm32(disp)) {
1272 Address cardtable(noreg, noreg, Address::no_scale, disp);
1273 __ lea(scratch, cardtable);
1274 } else {
1275 ExternalAddress cardtable((address)disp);
1276 __ lea(scratch, cardtable);
1277 }
1279 const Register count = end; // 'end' register contains bytes count now
1280 __ addptr(start, scratch);
1281 __ BIND(L_loop);
1282 __ movb(Address(start, count, Address::times_1), 0);
1283 __ decrement(count);
1284 __ jcc(Assembler::greaterEqual, L_loop);
1285 }
1286 break;
1287 default:
1288 ShouldNotReachHere();
1290 }
1291 }
1294 // Copy big chunks forward
1295 //
1296 // Inputs:
1297 // end_from - source arrays end address
1298 // end_to - destination array end address
1299 // qword_count - 64-bits element count, negative
1300 // to - scratch
1301 // L_copy_32_bytes - entry label
1302 // L_copy_8_bytes - exit label
1303 //
1304 void copy_32_bytes_forward(Register end_from, Register end_to,
1305 Register qword_count, Register to,
1306 Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1307 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1308 Label L_loop;
1309 __ align(OptoLoopAlignment);
1310 __ BIND(L_loop);
1311 if(UseUnalignedLoadStores) {
1312 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
1313 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0);
1314 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8));
1315 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1);
1317 } else {
1318 __ movq(to, Address(end_from, qword_count, Address::times_8, -24));
1319 __ movq(Address(end_to, qword_count, Address::times_8, -24), to);
1320 __ movq(to, Address(end_from, qword_count, Address::times_8, -16));
1321 __ movq(Address(end_to, qword_count, Address::times_8, -16), to);
1322 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
1323 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
1324 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
1325 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
1326 }
1327 __ BIND(L_copy_32_bytes);
1328 __ addptr(qword_count, 4);
1329 __ jcc(Assembler::lessEqual, L_loop);
1330 __ subptr(qword_count, 4);
1331 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
1332 }
1335 // Copy big chunks backward
1336 //
1337 // Inputs:
1338 // from - source arrays address
1339 // dest - destination array address
1340 // qword_count - 64-bits element count
1341 // to - scratch
1342 // L_copy_32_bytes - entry label
1343 // L_copy_8_bytes - exit label
1344 //
1345 void copy_32_bytes_backward(Register from, Register dest,
1346 Register qword_count, Register to,
1347 Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1348 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1349 Label L_loop;
1350 __ align(OptoLoopAlignment);
1351 __ BIND(L_loop);
1352 if(UseUnalignedLoadStores) {
1353 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
1354 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0);
1355 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
1356 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
1358 } else {
1359 __ movq(to, Address(from, qword_count, Address::times_8, 24));
1360 __ movq(Address(dest, qword_count, Address::times_8, 24), to);
1361 __ movq(to, Address(from, qword_count, Address::times_8, 16));
1362 __ movq(Address(dest, qword_count, Address::times_8, 16), to);
1363 __ movq(to, Address(from, qword_count, Address::times_8, 8));
1364 __ movq(Address(dest, qword_count, Address::times_8, 8), to);
1365 __ movq(to, Address(from, qword_count, Address::times_8, 0));
1366 __ movq(Address(dest, qword_count, Address::times_8, 0), to);
1367 }
1368 __ BIND(L_copy_32_bytes);
1369 __ subptr(qword_count, 4);
1370 __ jcc(Assembler::greaterEqual, L_loop);
1371 __ addptr(qword_count, 4);
1372 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
1373 }
1376 // Arguments:
1377 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1378 // ignored
1379 // name - stub name string
1380 //
1381 // Inputs:
1382 // c_rarg0 - source array address
1383 // c_rarg1 - destination array address
1384 // c_rarg2 - element count, treated as ssize_t, can be zero
1385 //
1386 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1387 // we let the hardware handle it. The one to eight bytes within words,
1388 // dwords or qwords that span cache line boundaries will still be loaded
1389 // and stored atomically.
1390 //
1391 // Side Effects:
1392 // disjoint_byte_copy_entry is set to the no-overlap entry point
1393 // used by generate_conjoint_byte_copy().
1394 //
1395 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) {
1396 __ align(CodeEntryAlignment);
1397 StubCodeMark mark(this, "StubRoutines", name);
1398 address start = __ pc();
1400 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1401 Label L_copy_byte, L_exit;
1402 const Register from = rdi; // source array address
1403 const Register to = rsi; // destination array address
1404 const Register count = rdx; // elements count
1405 const Register byte_count = rcx;
1406 const Register qword_count = count;
1407 const Register end_from = from; // source array end address
1408 const Register end_to = to; // destination array end address
1409 // End pointers are inclusive, and if count is not zero they point
1410 // to the last unit copied: end_to[0] := end_from[0]
1412 __ enter(); // required for proper stackwalking of RuntimeStub frame
1413 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1415 if (entry != NULL) {
1416 *entry = __ pc();
1417 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1418 BLOCK_COMMENT("Entry:");
1419 }
1421 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1422 // r9 and r10 may be used to save non-volatile registers
1424 // 'from', 'to' and 'count' are now valid
1425 __ movptr(byte_count, count);
1426 __ shrptr(count, 3); // count => qword_count
1428 // Copy from low to high addresses. Use 'to' as scratch.
1429 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1430 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1431 __ negptr(qword_count); // make the count negative
1432 __ jmp(L_copy_32_bytes);
1434 // Copy trailing qwords
1435 __ BIND(L_copy_8_bytes);
1436 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1437 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1438 __ increment(qword_count);
1439 __ jcc(Assembler::notZero, L_copy_8_bytes);
1441 // Check for and copy trailing dword
1442 __ BIND(L_copy_4_bytes);
1443 __ testl(byte_count, 4);
1444 __ jccb(Assembler::zero, L_copy_2_bytes);
1445 __ movl(rax, Address(end_from, 8));
1446 __ movl(Address(end_to, 8), rax);
1448 __ addptr(end_from, 4);
1449 __ addptr(end_to, 4);
1451 // Check for and copy trailing word
1452 __ BIND(L_copy_2_bytes);
1453 __ testl(byte_count, 2);
1454 __ jccb(Assembler::zero, L_copy_byte);
1455 __ movw(rax, Address(end_from, 8));
1456 __ movw(Address(end_to, 8), rax);
1458 __ addptr(end_from, 2);
1459 __ addptr(end_to, 2);
1461 // Check for and copy trailing byte
1462 __ BIND(L_copy_byte);
1463 __ testl(byte_count, 1);
1464 __ jccb(Assembler::zero, L_exit);
1465 __ movb(rax, Address(end_from, 8));
1466 __ movb(Address(end_to, 8), rax);
1468 __ BIND(L_exit);
1469 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1470 restore_arg_regs();
1471 __ xorptr(rax, rax); // return 0
1472 __ leave(); // required for proper stackwalking of RuntimeStub frame
1473 __ ret(0);
1475 // Copy in 32-bytes chunks
1476 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1477 __ jmp(L_copy_4_bytes);
1479 return start;
1480 }
1482 // Arguments:
1483 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1484 // ignored
1485 // name - stub name string
1486 //
1487 // Inputs:
1488 // c_rarg0 - source array address
1489 // c_rarg1 - destination array address
1490 // c_rarg2 - element count, treated as ssize_t, can be zero
1491 //
1492 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1493 // we let the hardware handle it. The one to eight bytes within words,
1494 // dwords or qwords that span cache line boundaries will still be loaded
1495 // and stored atomically.
1496 //
1497 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
1498 address* entry, const char *name) {
1499 __ align(CodeEntryAlignment);
1500 StubCodeMark mark(this, "StubRoutines", name);
1501 address start = __ pc();
1503 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1504 const Register from = rdi; // source array address
1505 const Register to = rsi; // destination array address
1506 const Register count = rdx; // elements count
1507 const Register byte_count = rcx;
1508 const Register qword_count = count;
1510 __ enter(); // required for proper stackwalking of RuntimeStub frame
1511 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1513 if (entry != NULL) {
1514 *entry = __ pc();
1515 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1516 BLOCK_COMMENT("Entry:");
1517 }
1519 array_overlap_test(nooverlap_target, Address::times_1);
1520 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1521 // r9 and r10 may be used to save non-volatile registers
1523 // 'from', 'to' and 'count' are now valid
1524 __ movptr(byte_count, count);
1525 __ shrptr(count, 3); // count => qword_count
1527 // Copy from high to low addresses.
1529 // Check for and copy trailing byte
1530 __ testl(byte_count, 1);
1531 __ jcc(Assembler::zero, L_copy_2_bytes);
1532 __ movb(rax, Address(from, byte_count, Address::times_1, -1));
1533 __ movb(Address(to, byte_count, Address::times_1, -1), rax);
1534 __ decrement(byte_count); // Adjust for possible trailing word
1536 // Check for and copy trailing word
1537 __ BIND(L_copy_2_bytes);
1538 __ testl(byte_count, 2);
1539 __ jcc(Assembler::zero, L_copy_4_bytes);
1540 __ movw(rax, Address(from, byte_count, Address::times_1, -2));
1541 __ movw(Address(to, byte_count, Address::times_1, -2), rax);
1543 // Check for and copy trailing dword
1544 __ BIND(L_copy_4_bytes);
1545 __ testl(byte_count, 4);
1546 __ jcc(Assembler::zero, L_copy_32_bytes);
1547 __ movl(rax, Address(from, qword_count, Address::times_8));
1548 __ movl(Address(to, qword_count, Address::times_8), rax);
1549 __ jmp(L_copy_32_bytes);
1551 // Copy trailing qwords
1552 __ BIND(L_copy_8_bytes);
1553 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1554 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1555 __ decrement(qword_count);
1556 __ jcc(Assembler::notZero, L_copy_8_bytes);
1558 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1559 restore_arg_regs();
1560 __ xorptr(rax, rax); // return 0
1561 __ leave(); // required for proper stackwalking of RuntimeStub frame
1562 __ ret(0);
1564 // Copy in 32-bytes chunks
1565 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1567 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1568 restore_arg_regs();
1569 __ xorptr(rax, rax); // return 0
1570 __ leave(); // required for proper stackwalking of RuntimeStub frame
1571 __ ret(0);
1573 return start;
1574 }
1576 // Arguments:
1577 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1578 // ignored
1579 // name - stub name string
1580 //
1581 // Inputs:
1582 // c_rarg0 - source array address
1583 // c_rarg1 - destination array address
1584 // c_rarg2 - element count, treated as ssize_t, can be zero
1585 //
1586 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1587 // let the hardware handle it. The two or four words within dwords
1588 // or qwords that span cache line boundaries will still be loaded
1589 // and stored atomically.
1590 //
1591 // Side Effects:
1592 // disjoint_short_copy_entry is set to the no-overlap entry point
1593 // used by generate_conjoint_short_copy().
1594 //
1595 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) {
1596 __ align(CodeEntryAlignment);
1597 StubCodeMark mark(this, "StubRoutines", name);
1598 address start = __ pc();
1600 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit;
1601 const Register from = rdi; // source array address
1602 const Register to = rsi; // destination array address
1603 const Register count = rdx; // elements count
1604 const Register word_count = rcx;
1605 const Register qword_count = count;
1606 const Register end_from = from; // source array end address
1607 const Register end_to = to; // destination array end address
1608 // End pointers are inclusive, and if count is not zero they point
1609 // to the last unit copied: end_to[0] := end_from[0]
1611 __ enter(); // required for proper stackwalking of RuntimeStub frame
1612 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1614 if (entry != NULL) {
1615 *entry = __ pc();
1616 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1617 BLOCK_COMMENT("Entry:");
1618 }
1620 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1621 // r9 and r10 may be used to save non-volatile registers
1623 // 'from', 'to' and 'count' are now valid
1624 __ movptr(word_count, count);
1625 __ shrptr(count, 2); // count => qword_count
1627 // Copy from low to high addresses. Use 'to' as scratch.
1628 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1629 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1630 __ negptr(qword_count);
1631 __ jmp(L_copy_32_bytes);
1633 // Copy trailing qwords
1634 __ BIND(L_copy_8_bytes);
1635 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1636 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1637 __ increment(qword_count);
1638 __ jcc(Assembler::notZero, L_copy_8_bytes);
1640 // Original 'dest' is trashed, so we can't use it as a
1641 // base register for a possible trailing word copy
1643 // Check for and copy trailing dword
1644 __ BIND(L_copy_4_bytes);
1645 __ testl(word_count, 2);
1646 __ jccb(Assembler::zero, L_copy_2_bytes);
1647 __ movl(rax, Address(end_from, 8));
1648 __ movl(Address(end_to, 8), rax);
1650 __ addptr(end_from, 4);
1651 __ addptr(end_to, 4);
1653 // Check for and copy trailing word
1654 __ BIND(L_copy_2_bytes);
1655 __ testl(word_count, 1);
1656 __ jccb(Assembler::zero, L_exit);
1657 __ movw(rax, Address(end_from, 8));
1658 __ movw(Address(end_to, 8), rax);
1660 __ BIND(L_exit);
1661 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1662 restore_arg_regs();
1663 __ xorptr(rax, rax); // return 0
1664 __ leave(); // required for proper stackwalking of RuntimeStub frame
1665 __ ret(0);
1667 // Copy in 32-bytes chunks
1668 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1669 __ jmp(L_copy_4_bytes);
1671 return start;
1672 }
1674 address generate_fill(BasicType t, bool aligned, const char *name) {
1675 __ align(CodeEntryAlignment);
1676 StubCodeMark mark(this, "StubRoutines", name);
1677 address start = __ pc();
1679 BLOCK_COMMENT("Entry:");
1681 const Register to = c_rarg0; // source array address
1682 const Register value = c_rarg1; // value
1683 const Register count = c_rarg2; // elements count
1685 __ enter(); // required for proper stackwalking of RuntimeStub frame
1687 __ generate_fill(t, aligned, to, value, count, rax, xmm0);
1689 __ leave(); // required for proper stackwalking of RuntimeStub frame
1690 __ ret(0);
1691 return start;
1692 }
1694 // Arguments:
1695 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1696 // ignored
1697 // name - stub name string
1698 //
1699 // Inputs:
1700 // c_rarg0 - source array address
1701 // c_rarg1 - destination array address
1702 // c_rarg2 - element count, treated as ssize_t, can be zero
1703 //
1704 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1705 // let the hardware handle it. The two or four words within dwords
1706 // or qwords that span cache line boundaries will still be loaded
1707 // and stored atomically.
1708 //
1709 address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
1710 address *entry, const char *name) {
1711 __ align(CodeEntryAlignment);
1712 StubCodeMark mark(this, "StubRoutines", name);
1713 address start = __ pc();
1715 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes;
1716 const Register from = rdi; // source array address
1717 const Register to = rsi; // destination array address
1718 const Register count = rdx; // elements count
1719 const Register word_count = rcx;
1720 const Register qword_count = count;
1722 __ enter(); // required for proper stackwalking of RuntimeStub frame
1723 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1725 if (entry != NULL) {
1726 *entry = __ pc();
1727 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1728 BLOCK_COMMENT("Entry:");
1729 }
1731 array_overlap_test(nooverlap_target, Address::times_2);
1732 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1733 // r9 and r10 may be used to save non-volatile registers
1735 // 'from', 'to' and 'count' are now valid
1736 __ movptr(word_count, count);
1737 __ shrptr(count, 2); // count => qword_count
1739 // Copy from high to low addresses. Use 'to' as scratch.
1741 // Check for and copy trailing word
1742 __ testl(word_count, 1);
1743 __ jccb(Assembler::zero, L_copy_4_bytes);
1744 __ movw(rax, Address(from, word_count, Address::times_2, -2));
1745 __ movw(Address(to, word_count, Address::times_2, -2), rax);
1747 // Check for and copy trailing dword
1748 __ BIND(L_copy_4_bytes);
1749 __ testl(word_count, 2);
1750 __ jcc(Assembler::zero, L_copy_32_bytes);
1751 __ movl(rax, Address(from, qword_count, Address::times_8));
1752 __ movl(Address(to, qword_count, Address::times_8), rax);
1753 __ jmp(L_copy_32_bytes);
1755 // Copy trailing qwords
1756 __ BIND(L_copy_8_bytes);
1757 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1758 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1759 __ decrement(qword_count);
1760 __ jcc(Assembler::notZero, L_copy_8_bytes);
1762 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1763 restore_arg_regs();
1764 __ xorptr(rax, rax); // return 0
1765 __ leave(); // required for proper stackwalking of RuntimeStub frame
1766 __ ret(0);
1768 // Copy in 32-bytes chunks
1769 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1771 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1772 restore_arg_regs();
1773 __ xorptr(rax, rax); // return 0
1774 __ leave(); // required for proper stackwalking of RuntimeStub frame
1775 __ ret(0);
1777 return start;
1778 }
1780 // Arguments:
1781 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1782 // ignored
1783 // is_oop - true => oop array, so generate store check code
1784 // name - stub name string
1785 //
1786 // Inputs:
1787 // c_rarg0 - source array address
1788 // c_rarg1 - destination array address
1789 // c_rarg2 - element count, treated as ssize_t, can be zero
1790 //
1791 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1792 // the hardware handle it. The two dwords within qwords that span
1793 // cache line boundaries will still be loaded and stored atomicly.
1794 //
1795 // Side Effects:
1796 // disjoint_int_copy_entry is set to the no-overlap entry point
1797 // used by generate_conjoint_int_oop_copy().
1798 //
1799 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry,
1800 const char *name, bool dest_uninitialized = false) {
1801 __ align(CodeEntryAlignment);
1802 StubCodeMark mark(this, "StubRoutines", name);
1803 address start = __ pc();
1805 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
1806 const Register from = rdi; // source array address
1807 const Register to = rsi; // destination array address
1808 const Register count = rdx; // elements count
1809 const Register dword_count = rcx;
1810 const Register qword_count = count;
1811 const Register end_from = from; // source array end address
1812 const Register end_to = to; // destination array end address
1813 const Register saved_to = r11; // saved destination array address
1814 // End pointers are inclusive, and if count is not zero they point
1815 // to the last unit copied: end_to[0] := end_from[0]
1817 __ enter(); // required for proper stackwalking of RuntimeStub frame
1818 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1820 if (entry != NULL) {
1821 *entry = __ pc();
1822 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1823 BLOCK_COMMENT("Entry:");
1824 }
1826 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1827 // r9 and r10 may be used to save non-volatile registers
1828 if (is_oop) {
1829 __ movq(saved_to, to);
1830 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
1831 }
1833 // 'from', 'to' and 'count' are now valid
1834 __ movptr(dword_count, count);
1835 __ shrptr(count, 1); // count => qword_count
1837 // Copy from low to high addresses. Use 'to' as scratch.
1838 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1839 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1840 __ negptr(qword_count);
1841 __ jmp(L_copy_32_bytes);
1843 // Copy trailing qwords
1844 __ BIND(L_copy_8_bytes);
1845 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1846 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1847 __ increment(qword_count);
1848 __ jcc(Assembler::notZero, L_copy_8_bytes);
1850 // Check for and copy trailing dword
1851 __ BIND(L_copy_4_bytes);
1852 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1
1853 __ jccb(Assembler::zero, L_exit);
1854 __ movl(rax, Address(end_from, 8));
1855 __ movl(Address(end_to, 8), rax);
1857 __ BIND(L_exit);
1858 if (is_oop) {
1859 __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
1860 gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1861 }
1862 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1863 restore_arg_regs();
1864 __ xorptr(rax, rax); // return 0
1865 __ leave(); // required for proper stackwalking of RuntimeStub frame
1866 __ ret(0);
1868 // Copy 32-bytes chunks
1869 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1870 __ jmp(L_copy_4_bytes);
1872 return start;
1873 }
1875 // Arguments:
1876 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1877 // ignored
1878 // is_oop - true => oop array, so generate store check code
1879 // name - stub name string
1880 //
1881 // Inputs:
1882 // c_rarg0 - source array address
1883 // c_rarg1 - destination array address
1884 // c_rarg2 - element count, treated as ssize_t, can be zero
1885 //
1886 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1887 // the hardware handle it. The two dwords within qwords that span
1888 // cache line boundaries will still be loaded and stored atomicly.
1889 //
1890 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
1891 address *entry, const char *name,
1892 bool dest_uninitialized = false) {
1893 __ align(CodeEntryAlignment);
1894 StubCodeMark mark(this, "StubRoutines", name);
1895 address start = __ pc();
1897 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit;
1898 const Register from = rdi; // source array address
1899 const Register to = rsi; // destination array address
1900 const Register count = rdx; // elements count
1901 const Register dword_count = rcx;
1902 const Register qword_count = count;
1904 __ enter(); // required for proper stackwalking of RuntimeStub frame
1905 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1907 if (entry != NULL) {
1908 *entry = __ pc();
1909 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1910 BLOCK_COMMENT("Entry:");
1911 }
1913 array_overlap_test(nooverlap_target, Address::times_4);
1914 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1915 // r9 and r10 may be used to save non-volatile registers
1917 if (is_oop) {
1918 // no registers are destroyed by this call
1919 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
1920 }
1922 assert_clean_int(count, rax); // Make sure 'count' is clean int.
1923 // 'from', 'to' and 'count' are now valid
1924 __ movptr(dword_count, count);
1925 __ shrptr(count, 1); // count => qword_count
1927 // Copy from high to low addresses. Use 'to' as scratch.
1929 // Check for and copy trailing dword
1930 __ testl(dword_count, 1);
1931 __ jcc(Assembler::zero, L_copy_32_bytes);
1932 __ movl(rax, Address(from, dword_count, Address::times_4, -4));
1933 __ movl(Address(to, dword_count, Address::times_4, -4), rax);
1934 __ jmp(L_copy_32_bytes);
1936 // Copy trailing qwords
1937 __ BIND(L_copy_8_bytes);
1938 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1939 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1940 __ decrement(qword_count);
1941 __ jcc(Assembler::notZero, L_copy_8_bytes);
1943 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1944 if (is_oop) {
1945 __ jmp(L_exit);
1946 }
1947 restore_arg_regs();
1948 __ xorptr(rax, rax); // return 0
1949 __ leave(); // required for proper stackwalking of RuntimeStub frame
1950 __ ret(0);
1952 // Copy in 32-bytes chunks
1953 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1955 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1956 __ bind(L_exit);
1957 if (is_oop) {
1958 Register end_to = rdx;
1959 __ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
1960 gen_write_ref_array_post_barrier(to, end_to, rax);
1961 }
1962 restore_arg_regs();
1963 __ xorptr(rax, rax); // return 0
1964 __ leave(); // required for proper stackwalking of RuntimeStub frame
1965 __ ret(0);
1967 return start;
1968 }
1970 // Arguments:
1971 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1972 // ignored
1973 // is_oop - true => oop array, so generate store check code
1974 // name - stub name string
1975 //
1976 // Inputs:
1977 // c_rarg0 - source array address
1978 // c_rarg1 - destination array address
1979 // c_rarg2 - element count, treated as ssize_t, can be zero
1980 //
1981 // Side Effects:
1982 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
1983 // no-overlap entry point used by generate_conjoint_long_oop_copy().
1984 //
1985 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry,
1986 const char *name, bool dest_uninitialized = false) {
1987 __ align(CodeEntryAlignment);
1988 StubCodeMark mark(this, "StubRoutines", name);
1989 address start = __ pc();
1991 Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
1992 const Register from = rdi; // source array address
1993 const Register to = rsi; // destination array address
1994 const Register qword_count = rdx; // elements count
1995 const Register end_from = from; // source array end address
1996 const Register end_to = rcx; // destination array end address
1997 const Register saved_to = to;
1998 // End pointers are inclusive, and if count is not zero they point
1999 // to the last unit copied: end_to[0] := end_from[0]
2001 __ enter(); // required for proper stackwalking of RuntimeStub frame
2002 // Save no-overlap entry point for generate_conjoint_long_oop_copy()
2003 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
2005 if (entry != NULL) {
2006 *entry = __ pc();
2007 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2008 BLOCK_COMMENT("Entry:");
2009 }
2011 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2012 // r9 and r10 may be used to save non-volatile registers
2013 // 'from', 'to' and 'qword_count' are now valid
2014 if (is_oop) {
2015 // no registers are destroyed by this call
2016 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized);
2017 }
2019 // Copy from low to high addresses. Use 'to' as scratch.
2020 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
2021 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
2022 __ negptr(qword_count);
2023 __ jmp(L_copy_32_bytes);
2025 // Copy trailing qwords
2026 __ BIND(L_copy_8_bytes);
2027 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
2028 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
2029 __ increment(qword_count);
2030 __ jcc(Assembler::notZero, L_copy_8_bytes);
2032 if (is_oop) {
2033 __ jmp(L_exit);
2034 } else {
2035 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2036 restore_arg_regs();
2037 __ xorptr(rax, rax); // return 0
2038 __ leave(); // required for proper stackwalking of RuntimeStub frame
2039 __ ret(0);
2040 }
2042 // Copy 64-byte chunks
2043 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
2045 if (is_oop) {
2046 __ BIND(L_exit);
2047 gen_write_ref_array_post_barrier(saved_to, end_to, rax);
2048 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
2049 } else {
2050 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2051 }
2052 restore_arg_regs();
2053 __ xorptr(rax, rax); // return 0
2054 __ leave(); // required for proper stackwalking of RuntimeStub frame
2055 __ ret(0);
2057 return start;
2058 }
2060 // Arguments:
2061 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
2062 // ignored
2063 // is_oop - true => oop array, so generate store check code
2064 // name - stub name string
2065 //
2066 // Inputs:
2067 // c_rarg0 - source array address
2068 // c_rarg1 - destination array address
2069 // c_rarg2 - element count, treated as ssize_t, can be zero
2070 //
2071 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop,
2072 address nooverlap_target, address *entry,
2073 const char *name, bool dest_uninitialized = false) {
2074 __ align(CodeEntryAlignment);
2075 StubCodeMark mark(this, "StubRoutines", name);
2076 address start = __ pc();
2078 Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
2079 const Register from = rdi; // source array address
2080 const Register to = rsi; // destination array address
2081 const Register qword_count = rdx; // elements count
2082 const Register saved_count = rcx;
2084 __ enter(); // required for proper stackwalking of RuntimeStub frame
2085 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
2087 if (entry != NULL) {
2088 *entry = __ pc();
2089 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2090 BLOCK_COMMENT("Entry:");
2091 }
2093 array_overlap_test(nooverlap_target, Address::times_8);
2094 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2095 // r9 and r10 may be used to save non-volatile registers
2096 // 'from', 'to' and 'qword_count' are now valid
2097 if (is_oop) {
2098 // Save to and count for store barrier
2099 __ movptr(saved_count, qword_count);
2100 // No registers are destroyed by this call
2101 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized);
2102 }
2104 __ jmp(L_copy_32_bytes);
2106 // Copy trailing qwords
2107 __ BIND(L_copy_8_bytes);
2108 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
2109 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
2110 __ decrement(qword_count);
2111 __ jcc(Assembler::notZero, L_copy_8_bytes);
2113 if (is_oop) {
2114 __ jmp(L_exit);
2115 } else {
2116 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2117 restore_arg_regs();
2118 __ xorptr(rax, rax); // return 0
2119 __ leave(); // required for proper stackwalking of RuntimeStub frame
2120 __ ret(0);
2121 }
2123 // Copy in 32-bytes chunks
2124 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
2126 if (is_oop) {
2127 __ BIND(L_exit);
2128 __ lea(rcx, Address(to, saved_count, Address::times_8, -8));
2129 gen_write_ref_array_post_barrier(to, rcx, rax);
2130 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
2131 } else {
2132 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2133 }
2134 restore_arg_regs();
2135 __ xorptr(rax, rax); // return 0
2136 __ leave(); // required for proper stackwalking of RuntimeStub frame
2137 __ ret(0);
2139 return start;
2140 }
2143 // Helper for generating a dynamic type check.
2144 // Smashes no registers.
2145 void generate_type_check(Register sub_klass,
2146 Register super_check_offset,
2147 Register super_klass,
2148 Label& L_success) {
2149 assert_different_registers(sub_klass, super_check_offset, super_klass);
2151 BLOCK_COMMENT("type_check:");
2153 Label L_miss;
2155 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL,
2156 super_check_offset);
2157 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL);
2159 // Fall through on failure!
2160 __ BIND(L_miss);
2161 }
2163 //
2164 // Generate checkcasting array copy stub
2165 //
2166 // Input:
2167 // c_rarg0 - source array address
2168 // c_rarg1 - destination array address
2169 // c_rarg2 - element count, treated as ssize_t, can be zero
2170 // c_rarg3 - size_t ckoff (super_check_offset)
2171 // not Win64
2172 // c_rarg4 - oop ckval (super_klass)
2173 // Win64
2174 // rsp+40 - oop ckval (super_klass)
2175 //
2176 // Output:
2177 // rax == 0 - success
2178 // rax == -1^K - failure, where K is partial transfer count
2179 //
2180 address generate_checkcast_copy(const char *name, address *entry,
2181 bool dest_uninitialized = false) {
2183 Label L_load_element, L_store_element, L_do_card_marks, L_done;
2185 // Input registers (after setup_arg_regs)
2186 const Register from = rdi; // source array address
2187 const Register to = rsi; // destination array address
2188 const Register length = rdx; // elements count
2189 const Register ckoff = rcx; // super_check_offset
2190 const Register ckval = r8; // super_klass
2192 // Registers used as temps (r13, r14 are save-on-entry)
2193 const Register end_from = from; // source array end address
2194 const Register end_to = r13; // destination array end address
2195 const Register count = rdx; // -(count_remaining)
2196 const Register r14_length = r14; // saved copy of length
2197 // End pointers are inclusive, and if length is not zero they point
2198 // to the last unit copied: end_to[0] := end_from[0]
2200 const Register rax_oop = rax; // actual oop copied
2201 const Register r11_klass = r11; // oop._klass
2203 //---------------------------------------------------------------
2204 // Assembler stub will be used for this call to arraycopy
2205 // if the two arrays are subtypes of Object[] but the
2206 // destination array type is not equal to or a supertype
2207 // of the source type. Each element must be separately
2208 // checked.
2210 __ align(CodeEntryAlignment);
2211 StubCodeMark mark(this, "StubRoutines", name);
2212 address start = __ pc();
2214 __ enter(); // required for proper stackwalking of RuntimeStub frame
2216 #ifdef ASSERT
2217 // caller guarantees that the arrays really are different
2218 // otherwise, we would have to make conjoint checks
2219 { Label L;
2220 array_overlap_test(L, TIMES_OOP);
2221 __ stop("checkcast_copy within a single array");
2222 __ bind(L);
2223 }
2224 #endif //ASSERT
2226 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
2227 // ckoff => rcx, ckval => r8
2228 // r9 and r10 may be used to save non-volatile registers
2229 #ifdef _WIN64
2230 // last argument (#4) is on stack on Win64
2231 __ movptr(ckval, Address(rsp, 6 * wordSize));
2232 #endif
2234 // Caller of this entry point must set up the argument registers.
2235 if (entry != NULL) {
2236 *entry = __ pc();
2237 BLOCK_COMMENT("Entry:");
2238 }
2240 // allocate spill slots for r13, r14
2241 enum {
2242 saved_r13_offset,
2243 saved_r14_offset,
2244 saved_rbp_offset
2245 };
2246 __ subptr(rsp, saved_rbp_offset * wordSize);
2247 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
2248 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
2250 // check that int operands are properly extended to size_t
2251 assert_clean_int(length, rax);
2252 assert_clean_int(ckoff, rax);
2254 #ifdef ASSERT
2255 BLOCK_COMMENT("assert consistent ckoff/ckval");
2256 // The ckoff and ckval must be mutually consistent,
2257 // even though caller generates both.
2258 { Label L;
2259 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2260 Klass::super_check_offset_offset_in_bytes());
2261 __ cmpl(ckoff, Address(ckval, sco_offset));
2262 __ jcc(Assembler::equal, L);
2263 __ stop("super_check_offset inconsistent");
2264 __ bind(L);
2265 }
2266 #endif //ASSERT
2268 // Loop-invariant addresses. They are exclusive end pointers.
2269 Address end_from_addr(from, length, TIMES_OOP, 0);
2270 Address end_to_addr(to, length, TIMES_OOP, 0);
2271 // Loop-variant addresses. They assume post-incremented count < 0.
2272 Address from_element_addr(end_from, count, TIMES_OOP, 0);
2273 Address to_element_addr(end_to, count, TIMES_OOP, 0);
2275 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
2277 // Copy from low to high addresses, indexed from the end of each array.
2278 __ lea(end_from, end_from_addr);
2279 __ lea(end_to, end_to_addr);
2280 __ movptr(r14_length, length); // save a copy of the length
2281 assert(length == count, ""); // else fix next line:
2282 __ negptr(count); // negate and test the length
2283 __ jcc(Assembler::notZero, L_load_element);
2285 // Empty array: Nothing to do.
2286 __ xorptr(rax, rax); // return 0 on (trivial) success
2287 __ jmp(L_done);
2289 // ======== begin loop ========
2290 // (Loop is rotated; its entry is L_load_element.)
2291 // Loop control:
2292 // for (count = -count; count != 0; count++)
2293 // Base pointers src, dst are biased by 8*(count-1),to last element.
2294 __ align(OptoLoopAlignment);
2296 __ BIND(L_store_element);
2297 __ store_heap_oop(to_element_addr, rax_oop); // store the oop
2298 __ increment(count); // increment the count toward zero
2299 __ jcc(Assembler::zero, L_do_card_marks);
2301 // ======== loop entry is here ========
2302 __ BIND(L_load_element);
2303 __ load_heap_oop(rax_oop, from_element_addr); // load the oop
2304 __ testptr(rax_oop, rax_oop);
2305 __ jcc(Assembler::zero, L_store_element);
2307 __ load_klass(r11_klass, rax_oop);// query the object klass
2308 generate_type_check(r11_klass, ckoff, ckval, L_store_element);
2309 // ======== end loop ========
2311 // It was a real error; we must depend on the caller to finish the job.
2312 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
2313 // Emit GC store barriers for the oops we have copied (r14 + rdx),
2314 // and report their number to the caller.
2315 assert_different_registers(rax, r14_length, count, to, end_to, rcx);
2316 __ lea(end_to, to_element_addr);
2317 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer
2318 gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2319 __ movptr(rax, r14_length); // original oops
2320 __ addptr(rax, count); // K = (original - remaining) oops
2321 __ notptr(rax); // report (-1^K) to caller
2322 __ jmp(L_done);
2324 // Come here on success only.
2325 __ BIND(L_do_card_marks);
2326 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer
2327 gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2328 __ xorptr(rax, rax); // return 0 on success
2330 // Common exit point (success or failure).
2331 __ BIND(L_done);
2332 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
2333 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
2334 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
2335 restore_arg_regs();
2336 __ leave(); // required for proper stackwalking of RuntimeStub frame
2337 __ ret(0);
2339 return start;
2340 }
2342 //
2343 // Generate 'unsafe' array copy stub
2344 // Though just as safe as the other stubs, it takes an unscaled
2345 // size_t argument instead of an element count.
2346 //
2347 // Input:
2348 // c_rarg0 - source array address
2349 // c_rarg1 - destination array address
2350 // c_rarg2 - byte count, treated as ssize_t, can be zero
2351 //
2352 // Examines the alignment of the operands and dispatches
2353 // to a long, int, short, or byte copy loop.
2354 //
2355 address generate_unsafe_copy(const char *name,
2356 address byte_copy_entry, address short_copy_entry,
2357 address int_copy_entry, address long_copy_entry) {
2359 Label L_long_aligned, L_int_aligned, L_short_aligned;
2361 // Input registers (before setup_arg_regs)
2362 const Register from = c_rarg0; // source array address
2363 const Register to = c_rarg1; // destination array address
2364 const Register size = c_rarg2; // byte count (size_t)
2366 // Register used as a temp
2367 const Register bits = rax; // test copy of low bits
2369 __ align(CodeEntryAlignment);
2370 StubCodeMark mark(this, "StubRoutines", name);
2371 address start = __ pc();
2373 __ enter(); // required for proper stackwalking of RuntimeStub frame
2375 // bump this on entry, not on exit:
2376 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
2378 __ mov(bits, from);
2379 __ orptr(bits, to);
2380 __ orptr(bits, size);
2382 __ testb(bits, BytesPerLong-1);
2383 __ jccb(Assembler::zero, L_long_aligned);
2385 __ testb(bits, BytesPerInt-1);
2386 __ jccb(Assembler::zero, L_int_aligned);
2388 __ testb(bits, BytesPerShort-1);
2389 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
2391 __ BIND(L_short_aligned);
2392 __ shrptr(size, LogBytesPerShort); // size => short_count
2393 __ jump(RuntimeAddress(short_copy_entry));
2395 __ BIND(L_int_aligned);
2396 __ shrptr(size, LogBytesPerInt); // size => int_count
2397 __ jump(RuntimeAddress(int_copy_entry));
2399 __ BIND(L_long_aligned);
2400 __ shrptr(size, LogBytesPerLong); // size => qword_count
2401 __ jump(RuntimeAddress(long_copy_entry));
2403 return start;
2404 }
2406 // Perform range checks on the proposed arraycopy.
2407 // Kills temp, but nothing else.
2408 // Also, clean the sign bits of src_pos and dst_pos.
2409 void arraycopy_range_checks(Register src, // source array oop (c_rarg0)
2410 Register src_pos, // source position (c_rarg1)
2411 Register dst, // destination array oo (c_rarg2)
2412 Register dst_pos, // destination position (c_rarg3)
2413 Register length,
2414 Register temp,
2415 Label& L_failed) {
2416 BLOCK_COMMENT("arraycopy_range_checks:");
2418 // if (src_pos + length > arrayOop(src)->length()) FAIL;
2419 __ movl(temp, length);
2420 __ addl(temp, src_pos); // src_pos + length
2421 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes()));
2422 __ jcc(Assembler::above, L_failed);
2424 // if (dst_pos + length > arrayOop(dst)->length()) FAIL;
2425 __ movl(temp, length);
2426 __ addl(temp, dst_pos); // dst_pos + length
2427 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2428 __ jcc(Assembler::above, L_failed);
2430 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2431 // Move with sign extension can be used since they are positive.
2432 __ movslq(src_pos, src_pos);
2433 __ movslq(dst_pos, dst_pos);
2435 BLOCK_COMMENT("arraycopy_range_checks done");
2436 }
2438 //
2439 // Generate generic array copy stubs
2440 //
2441 // Input:
2442 // c_rarg0 - src oop
2443 // c_rarg1 - src_pos (32-bits)
2444 // c_rarg2 - dst oop
2445 // c_rarg3 - dst_pos (32-bits)
2446 // not Win64
2447 // c_rarg4 - element count (32-bits)
2448 // Win64
2449 // rsp+40 - element count (32-bits)
2450 //
2451 // Output:
2452 // rax == 0 - success
2453 // rax == -1^K - failure, where K is partial transfer count
2454 //
2455 address generate_generic_copy(const char *name,
2456 address byte_copy_entry, address short_copy_entry,
2457 address int_copy_entry, address oop_copy_entry,
2458 address long_copy_entry, address checkcast_copy_entry) {
2460 Label L_failed, L_failed_0, L_objArray;
2461 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs;
2463 // Input registers
2464 const Register src = c_rarg0; // source array oop
2465 const Register src_pos = c_rarg1; // source position
2466 const Register dst = c_rarg2; // destination array oop
2467 const Register dst_pos = c_rarg3; // destination position
2468 #ifndef _WIN64
2469 const Register length = c_rarg4;
2470 #else
2471 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64
2472 #endif
2474 { int modulus = CodeEntryAlignment;
2475 int target = modulus - 5; // 5 = sizeof jmp(L_failed)
2476 int advance = target - (__ offset() % modulus);
2477 if (advance < 0) advance += modulus;
2478 if (advance > 0) __ nop(advance);
2479 }
2480 StubCodeMark mark(this, "StubRoutines", name);
2482 // Short-hop target to L_failed. Makes for denser prologue code.
2483 __ BIND(L_failed_0);
2484 __ jmp(L_failed);
2485 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed");
2487 __ align(CodeEntryAlignment);
2488 address start = __ pc();
2490 __ enter(); // required for proper stackwalking of RuntimeStub frame
2492 // bump this on entry, not on exit:
2493 inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
2495 //-----------------------------------------------------------------------
2496 // Assembler stub will be used for this call to arraycopy
2497 // if the following conditions are met:
2498 //
2499 // (1) src and dst must not be null.
2500 // (2) src_pos must not be negative.
2501 // (3) dst_pos must not be negative.
2502 // (4) length must not be negative.
2503 // (5) src klass and dst klass should be the same and not NULL.
2504 // (6) src and dst should be arrays.
2505 // (7) src_pos + length must not exceed length of src.
2506 // (8) dst_pos + length must not exceed length of dst.
2507 //
2509 // if (src == NULL) return -1;
2510 __ testptr(src, src); // src oop
2511 size_t j1off = __ offset();
2512 __ jccb(Assembler::zero, L_failed_0);
2514 // if (src_pos < 0) return -1;
2515 __ testl(src_pos, src_pos); // src_pos (32-bits)
2516 __ jccb(Assembler::negative, L_failed_0);
2518 // if (dst == NULL) return -1;
2519 __ testptr(dst, dst); // dst oop
2520 __ jccb(Assembler::zero, L_failed_0);
2522 // if (dst_pos < 0) return -1;
2523 __ testl(dst_pos, dst_pos); // dst_pos (32-bits)
2524 size_t j4off = __ offset();
2525 __ jccb(Assembler::negative, L_failed_0);
2527 // The first four tests are very dense code,
2528 // but not quite dense enough to put four
2529 // jumps in a 16-byte instruction fetch buffer.
2530 // That's good, because some branch predicters
2531 // do not like jumps so close together.
2532 // Make sure of this.
2533 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps");
2535 // registers used as temp
2536 const Register r11_length = r11; // elements count to copy
2537 const Register r10_src_klass = r10; // array klass
2539 // if (length < 0) return -1;
2540 __ movl(r11_length, length); // length (elements count, 32-bits value)
2541 __ testl(r11_length, r11_length);
2542 __ jccb(Assembler::negative, L_failed_0);
2544 __ load_klass(r10_src_klass, src);
2545 #ifdef ASSERT
2546 // assert(src->klass() != NULL);
2547 {
2548 BLOCK_COMMENT("assert klasses not null {");
2549 Label L1, L2;
2550 __ testptr(r10_src_klass, r10_src_klass);
2551 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
2552 __ bind(L1);
2553 __ stop("broken null klass");
2554 __ bind(L2);
2555 __ load_klass(rax, dst);
2556 __ cmpq(rax, 0);
2557 __ jcc(Assembler::equal, L1); // this would be broken also
2558 BLOCK_COMMENT("} assert klasses not null done");
2559 }
2560 #endif
2562 // Load layout helper (32-bits)
2563 //
2564 // |array_tag| | header_size | element_type | |log2_element_size|
2565 // 32 30 24 16 8 2 0
2566 //
2567 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2568 //
2570 const int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2571 Klass::layout_helper_offset_in_bytes();
2573 // Handle objArrays completely differently...
2574 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2575 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh);
2576 __ jcc(Assembler::equal, L_objArray);
2578 // if (src->klass() != dst->klass()) return -1;
2579 __ load_klass(rax, dst);
2580 __ cmpq(r10_src_klass, rax);
2581 __ jcc(Assembler::notEqual, L_failed);
2583 const Register rax_lh = rax; // layout helper
2584 __ movl(rax_lh, Address(r10_src_klass, lh_offset));
2586 // if (!src->is_Array()) return -1;
2587 __ cmpl(rax_lh, Klass::_lh_neutral_value);
2588 __ jcc(Assembler::greaterEqual, L_failed);
2590 // At this point, it is known to be a typeArray (array_tag 0x3).
2591 #ifdef ASSERT
2592 {
2593 BLOCK_COMMENT("assert primitive array {");
2594 Label L;
2595 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
2596 __ jcc(Assembler::greaterEqual, L);
2597 __ stop("must be a primitive array");
2598 __ bind(L);
2599 BLOCK_COMMENT("} assert primitive array done");
2600 }
2601 #endif
2603 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2604 r10, L_failed);
2606 // typeArrayKlass
2607 //
2608 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2609 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2610 //
2612 const Register r10_offset = r10; // array offset
2613 const Register rax_elsize = rax_lh; // element size
2615 __ movl(r10_offset, rax_lh);
2616 __ shrl(r10_offset, Klass::_lh_header_size_shift);
2617 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset
2618 __ addptr(src, r10_offset); // src array offset
2619 __ addptr(dst, r10_offset); // dst array offset
2620 BLOCK_COMMENT("choose copy loop based on element size");
2621 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
2623 // next registers should be set before the jump to corresponding stub
2624 const Register from = c_rarg0; // source array address
2625 const Register to = c_rarg1; // destination array address
2626 const Register count = c_rarg2; // elements count
2628 // 'from', 'to', 'count' registers should be set in such order
2629 // since they are the same as 'src', 'src_pos', 'dst'.
2631 __ BIND(L_copy_bytes);
2632 __ cmpl(rax_elsize, 0);
2633 __ jccb(Assembler::notEqual, L_copy_shorts);
2634 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr
2635 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr
2636 __ movl2ptr(count, r11_length); // length
2637 __ jump(RuntimeAddress(byte_copy_entry));
2639 __ BIND(L_copy_shorts);
2640 __ cmpl(rax_elsize, LogBytesPerShort);
2641 __ jccb(Assembler::notEqual, L_copy_ints);
2642 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr
2643 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr
2644 __ movl2ptr(count, r11_length); // length
2645 __ jump(RuntimeAddress(short_copy_entry));
2647 __ BIND(L_copy_ints);
2648 __ cmpl(rax_elsize, LogBytesPerInt);
2649 __ jccb(Assembler::notEqual, L_copy_longs);
2650 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr
2651 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr
2652 __ movl2ptr(count, r11_length); // length
2653 __ jump(RuntimeAddress(int_copy_entry));
2655 __ BIND(L_copy_longs);
2656 #ifdef ASSERT
2657 {
2658 BLOCK_COMMENT("assert long copy {");
2659 Label L;
2660 __ cmpl(rax_elsize, LogBytesPerLong);
2661 __ jcc(Assembler::equal, L);
2662 __ stop("must be long copy, but elsize is wrong");
2663 __ bind(L);
2664 BLOCK_COMMENT("} assert long copy done");
2665 }
2666 #endif
2667 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr
2668 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr
2669 __ movl2ptr(count, r11_length); // length
2670 __ jump(RuntimeAddress(long_copy_entry));
2672 // objArrayKlass
2673 __ BIND(L_objArray);
2674 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos]
2676 Label L_plain_copy, L_checkcast_copy;
2677 // test array classes for subtyping
2678 __ load_klass(rax, dst);
2679 __ cmpq(r10_src_klass, rax); // usual case is exact equality
2680 __ jcc(Assembler::notEqual, L_checkcast_copy);
2682 // Identically typed arrays can be copied without element-wise checks.
2683 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2684 r10, L_failed);
2686 __ lea(from, Address(src, src_pos, TIMES_OOP,
2687 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
2688 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2689 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
2690 __ movl2ptr(count, r11_length); // length
2691 __ BIND(L_plain_copy);
2692 __ jump(RuntimeAddress(oop_copy_entry));
2694 __ BIND(L_checkcast_copy);
2695 // live at this point: r10_src_klass, r11_length, rax (dst_klass)
2696 {
2697 // Before looking at dst.length, make sure dst is also an objArray.
2698 __ cmpl(Address(rax, lh_offset), objArray_lh);
2699 __ jcc(Assembler::notEqual, L_failed);
2701 // It is safe to examine both src.length and dst.length.
2702 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2703 rax, L_failed);
2705 const Register r11_dst_klass = r11;
2706 __ load_klass(r11_dst_klass, dst); // reload
2708 // Marshal the base address arguments now, freeing registers.
2709 __ lea(from, Address(src, src_pos, TIMES_OOP,
2710 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2711 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2712 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2713 __ movl(count, length); // length (reloaded)
2714 Register sco_temp = c_rarg3; // this register is free now
2715 assert_different_registers(from, to, count, sco_temp,
2716 r11_dst_klass, r10_src_klass);
2717 assert_clean_int(count, sco_temp);
2719 // Generate the type check.
2720 const int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2721 Klass::super_check_offset_offset_in_bytes());
2722 __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2723 assert_clean_int(sco_temp, rax);
2724 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
2726 // Fetch destination element klass from the objArrayKlass header.
2727 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
2728 objArrayKlass::element_klass_offset_in_bytes());
2729 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
2730 __ movl( sco_temp, Address(r11_dst_klass, sco_offset));
2731 assert_clean_int(sco_temp, rax);
2733 // the checkcast_copy loop needs two extra arguments:
2734 assert(c_rarg3 == sco_temp, "#3 already in place");
2735 // Set up arguments for checkcast_copy_entry.
2736 setup_arg_regs(4);
2737 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris
2738 __ jump(RuntimeAddress(checkcast_copy_entry));
2739 }
2741 __ BIND(L_failed);
2742 __ xorptr(rax, rax);
2743 __ notptr(rax); // return -1
2744 __ leave(); // required for proper stackwalking of RuntimeStub frame
2745 __ ret(0);
2747 return start;
2748 }
2750 void generate_arraycopy_stubs() {
2751 address entry;
2752 address entry_jbyte_arraycopy;
2753 address entry_jshort_arraycopy;
2754 address entry_jint_arraycopy;
2755 address entry_oop_arraycopy;
2756 address entry_jlong_arraycopy;
2757 address entry_checkcast_arraycopy;
2759 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,
2760 "jbyte_disjoint_arraycopy");
2761 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy,
2762 "jbyte_arraycopy");
2764 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
2765 "jshort_disjoint_arraycopy");
2766 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy,
2767 "jshort_arraycopy");
2769 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry,
2770 "jint_disjoint_arraycopy");
2771 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry,
2772 &entry_jint_arraycopy, "jint_arraycopy");
2774 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry,
2775 "jlong_disjoint_arraycopy");
2776 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry,
2777 &entry_jlong_arraycopy, "jlong_arraycopy");
2780 if (UseCompressedOops) {
2781 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry,
2782 "oop_disjoint_arraycopy");
2783 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry,
2784 &entry_oop_arraycopy, "oop_arraycopy");
2785 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry,
2786 "oop_disjoint_arraycopy_uninit",
2787 /*dest_uninitialized*/true);
2788 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry,
2789 NULL, "oop_arraycopy_uninit",
2790 /*dest_uninitialized*/true);
2791 } else {
2792 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry,
2793 "oop_disjoint_arraycopy");
2794 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry,
2795 &entry_oop_arraycopy, "oop_arraycopy");
2796 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry,
2797 "oop_disjoint_arraycopy_uninit",
2798 /*dest_uninitialized*/true);
2799 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry,
2800 NULL, "oop_arraycopy_uninit",
2801 /*dest_uninitialized*/true);
2802 }
2804 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
2805 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
2806 /*dest_uninitialized*/true);
2808 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
2809 entry_jbyte_arraycopy,
2810 entry_jshort_arraycopy,
2811 entry_jint_arraycopy,
2812 entry_jlong_arraycopy);
2813 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
2814 entry_jbyte_arraycopy,
2815 entry_jshort_arraycopy,
2816 entry_jint_arraycopy,
2817 entry_oop_arraycopy,
2818 entry_jlong_arraycopy,
2819 entry_checkcast_arraycopy);
2821 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
2822 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
2823 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
2824 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
2825 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2826 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
2828 // We don't generate specialized code for HeapWord-aligned source
2829 // arrays, so just use the code we've already generated
2830 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy;
2831 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy;
2833 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy;
2834 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy;
2836 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy;
2837 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2839 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy;
2840 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2842 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy;
2843 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2845 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit;
2846 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit;
2847 }
2849 void generate_math_stubs() {
2850 {
2851 StubCodeMark mark(this, "StubRoutines", "log");
2852 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
2854 __ subq(rsp, 8);
2855 __ movdbl(Address(rsp, 0), xmm0);
2856 __ fld_d(Address(rsp, 0));
2857 __ flog();
2858 __ fstp_d(Address(rsp, 0));
2859 __ movdbl(xmm0, Address(rsp, 0));
2860 __ addq(rsp, 8);
2861 __ ret(0);
2862 }
2863 {
2864 StubCodeMark mark(this, "StubRoutines", "log10");
2865 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
2867 __ subq(rsp, 8);
2868 __ movdbl(Address(rsp, 0), xmm0);
2869 __ fld_d(Address(rsp, 0));
2870 __ flog10();
2871 __ fstp_d(Address(rsp, 0));
2872 __ movdbl(xmm0, Address(rsp, 0));
2873 __ addq(rsp, 8);
2874 __ ret(0);
2875 }
2876 {
2877 StubCodeMark mark(this, "StubRoutines", "sin");
2878 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
2880 __ subq(rsp, 8);
2881 __ movdbl(Address(rsp, 0), xmm0);
2882 __ fld_d(Address(rsp, 0));
2883 __ trigfunc('s');
2884 __ fstp_d(Address(rsp, 0));
2885 __ movdbl(xmm0, Address(rsp, 0));
2886 __ addq(rsp, 8);
2887 __ ret(0);
2888 }
2889 {
2890 StubCodeMark mark(this, "StubRoutines", "cos");
2891 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
2893 __ subq(rsp, 8);
2894 __ movdbl(Address(rsp, 0), xmm0);
2895 __ fld_d(Address(rsp, 0));
2896 __ trigfunc('c');
2897 __ fstp_d(Address(rsp, 0));
2898 __ movdbl(xmm0, Address(rsp, 0));
2899 __ addq(rsp, 8);
2900 __ ret(0);
2901 }
2902 {
2903 StubCodeMark mark(this, "StubRoutines", "tan");
2904 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
2906 __ subq(rsp, 8);
2907 __ movdbl(Address(rsp, 0), xmm0);
2908 __ fld_d(Address(rsp, 0));
2909 __ trigfunc('t');
2910 __ fstp_d(Address(rsp, 0));
2911 __ movdbl(xmm0, Address(rsp, 0));
2912 __ addq(rsp, 8);
2913 __ ret(0);
2914 }
2916 // The intrinsic version of these seem to return the same value as
2917 // the strict version.
2918 StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
2919 StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
2920 }
2922 #undef __
2923 #define __ masm->
2925 // Continuation point for throwing of implicit exceptions that are
2926 // not handled in the current activation. Fabricates an exception
2927 // oop and initiates normal exception dispatching in this
2928 // frame. Since we need to preserve callee-saved values (currently
2929 // only for C2, but done for C1 as well) we need a callee-saved oop
2930 // map and therefore have to make these stubs into RuntimeStubs
2931 // rather than BufferBlobs. If the compiler needs all registers to
2932 // be preserved between the fault point and the exception handler
2933 // then it must assume responsibility for that in
2934 // AbstractCompiler::continuation_for_implicit_null_exception or
2935 // continuation_for_implicit_division_by_zero_exception. All other
2936 // implicit exceptions (e.g., NullPointerException or
2937 // AbstractMethodError on entry) are either at call sites or
2938 // otherwise assume that stack unwinding will be initiated, so
2939 // caller saved registers were assumed volatile in the compiler.
2940 address generate_throw_exception(const char* name,
2941 address runtime_entry,
2942 Register arg1 = noreg,
2943 Register arg2 = noreg) {
2944 // Information about frame layout at time of blocking runtime call.
2945 // Note that we only have to preserve callee-saved registers since
2946 // the compilers are responsible for supplying a continuation point
2947 // if they expect all registers to be preserved.
2948 enum layout {
2949 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
2950 rbp_off2,
2951 return_off,
2952 return_off2,
2953 framesize // inclusive of return address
2954 };
2956 int insts_size = 512;
2957 int locs_size = 64;
2959 CodeBuffer code(name, insts_size, locs_size);
2960 OopMapSet* oop_maps = new OopMapSet();
2961 MacroAssembler* masm = new MacroAssembler(&code);
2963 address start = __ pc();
2965 // This is an inlined and slightly modified version of call_VM
2966 // which has the ability to fetch the return PC out of
2967 // thread-local storage and also sets up last_Java_sp slightly
2968 // differently than the real call_VM
2970 __ enter(); // required for proper stackwalking of RuntimeStub frame
2972 assert(is_even(framesize/2), "sp not 16-byte aligned");
2974 // return address and rbp are already in place
2975 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
2977 int frame_complete = __ pc() - start;
2979 // Set up last_Java_sp and last_Java_fp
2980 __ set_last_Java_frame(rsp, rbp, NULL);
2982 // Call runtime
2983 if (arg1 != noreg) {
2984 assert(arg2 != c_rarg1, "clobbered");
2985 __ movptr(c_rarg1, arg1);
2986 }
2987 if (arg2 != noreg) {
2988 __ movptr(c_rarg2, arg2);
2989 }
2990 __ movptr(c_rarg0, r15_thread);
2991 BLOCK_COMMENT("call runtime_entry");
2992 __ call(RuntimeAddress(runtime_entry));
2994 // Generate oop map
2995 OopMap* map = new OopMap(framesize, 0);
2997 oop_maps->add_gc_map(__ pc() - start, map);
2999 __ reset_last_Java_frame(true, false);
3001 __ leave(); // required for proper stackwalking of RuntimeStub frame
3003 // check for pending exceptions
3004 #ifdef ASSERT
3005 Label L;
3006 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()),
3007 (int32_t) NULL_WORD);
3008 __ jcc(Assembler::notEqual, L);
3009 __ should_not_reach_here();
3010 __ bind(L);
3011 #endif // ASSERT
3012 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3015 // codeBlob framesize is in words (not VMRegImpl::slot_size)
3016 RuntimeStub* stub =
3017 RuntimeStub::new_runtime_stub(name,
3018 &code,
3019 frame_complete,
3020 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3021 oop_maps, false);
3022 return stub->entry_point();
3023 }
3025 // Initialization
3026 void generate_initial() {
3027 // Generates all stubs and initializes the entry points
3029 // This platform-specific stub is needed by generate_call_stub()
3030 StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80);
3032 // entry points that exist in all platforms Note: This is code
3033 // that could be shared among different platforms - however the
3034 // benefit seems to be smaller than the disadvantage of having a
3035 // much more complicated generator structure. See also comment in
3036 // stubRoutines.hpp.
3038 StubRoutines::_forward_exception_entry = generate_forward_exception();
3040 StubRoutines::_call_stub_entry =
3041 generate_call_stub(StubRoutines::_call_stub_return_address);
3043 // is referenced by megamorphic call
3044 StubRoutines::_catch_exception_entry = generate_catch_exception();
3046 // atomic calls
3047 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
3048 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr();
3049 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
3050 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
3051 StubRoutines::_atomic_add_entry = generate_atomic_add();
3052 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
3053 StubRoutines::_fence_entry = generate_orderaccess_fence();
3055 StubRoutines::_handler_for_unsafe_access_entry =
3056 generate_handler_for_unsafe_access();
3058 // platform dependent
3059 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
3061 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
3063 // Build this early so it's available for the interpreter. Stub
3064 // expects the required and actual types as register arguments in
3065 // j_rarg0 and j_rarg1 respectively.
3066 StubRoutines::_throw_WrongMethodTypeException_entry =
3067 generate_throw_exception("WrongMethodTypeException throw_exception",
3068 CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
3069 rax, rcx);
3070 }
3072 void generate_all() {
3073 // Generates all stubs and initializes the entry points
3075 // These entry points require SharedInfo::stack0 to be set up in
3076 // non-core builds and need to be relocatable, so they each
3077 // fabricate a RuntimeStub internally.
3078 StubRoutines::_throw_AbstractMethodError_entry =
3079 generate_throw_exception("AbstractMethodError throw_exception",
3080 CAST_FROM_FN_PTR(address,
3081 SharedRuntime::
3082 throw_AbstractMethodError));
3084 StubRoutines::_throw_IncompatibleClassChangeError_entry =
3085 generate_throw_exception("IncompatibleClassChangeError throw_exception",
3086 CAST_FROM_FN_PTR(address,
3087 SharedRuntime::
3088 throw_IncompatibleClassChangeError));
3090 StubRoutines::_throw_NullPointerException_at_call_entry =
3091 generate_throw_exception("NullPointerException at call throw_exception",
3092 CAST_FROM_FN_PTR(address,
3093 SharedRuntime::
3094 throw_NullPointerException_at_call));
3096 StubRoutines::_throw_StackOverflowError_entry =
3097 generate_throw_exception("StackOverflowError throw_exception",
3098 CAST_FROM_FN_PTR(address,
3099 SharedRuntime::
3100 throw_StackOverflowError));
3102 // entry points that are platform specific
3103 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
3104 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
3105 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
3106 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
3108 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
3109 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
3110 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
3111 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
3113 // support for verify_oop (must happen after universe_init)
3114 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
3116 // arraycopy stubs used by compilers
3117 generate_arraycopy_stubs();
3119 generate_math_stubs();
3120 }
3122 public:
3123 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3124 if (all) {
3125 generate_all();
3126 } else {
3127 generate_initial();
3128 }
3129 }
3130 }; // end class declaration
3132 void StubGenerator_generate(CodeBuffer* code, bool all) {
3133 StubGenerator g(code, all);
3134 }