Sat, 10 Sep 2011 00:11:04 -0700
7088020: SEGV in JNIHandleBlock::release_block
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "assembler_x86.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "nativeInst_x86.hpp"
30 #include "oops/instanceOop.hpp"
31 #include "oops/methodOop.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/frame.inline.hpp"
36 #include "runtime/handles.inline.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubCodeGenerator.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/top.hpp"
41 #ifdef TARGET_OS_FAMILY_linux
42 # include "thread_linux.inline.hpp"
43 #endif
44 #ifdef TARGET_OS_FAMILY_solaris
45 # include "thread_solaris.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_FAMILY_windows
48 # include "thread_windows.inline.hpp"
49 #endif
50 #ifdef COMPILER2
51 #include "opto/runtime.hpp"
52 #endif
54 // Declaration and definition of StubGenerator (no .hpp file).
55 // For a more detailed description of the stub routine structure
56 // see the comment in stubRoutines.hpp
58 #define __ _masm->
59 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
60 #define a__ ((Assembler*)_masm)->
62 #ifdef PRODUCT
63 #define BLOCK_COMMENT(str) /* nothing */
64 #else
65 #define BLOCK_COMMENT(str) __ block_comment(str)
66 #endif
68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
69 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
71 // Stub Code definitions
73 static address handle_unsafe_access() {
74 JavaThread* thread = JavaThread::current();
75 address pc = thread->saved_exception_pc();
76 // pc is the instruction which we must emulate
77 // doing a no-op is fine: return garbage from the load
78 // therefore, compute npc
79 address npc = Assembler::locate_next_instruction(pc);
81 // request an async exception
82 thread->set_pending_unsafe_access_error();
84 // return address of next instruction to execute
85 return npc;
86 }
88 class StubGenerator: public StubCodeGenerator {
89 private:
91 #ifdef PRODUCT
92 #define inc_counter_np(counter) (0)
93 #else
94 void inc_counter_np_(int& counter) {
95 __ incrementl(ExternalAddress((address)&counter));
96 }
97 #define inc_counter_np(counter) \
98 BLOCK_COMMENT("inc_counter " #counter); \
99 inc_counter_np_(counter);
100 #endif
102 // Call stubs are used to call Java from C
103 //
104 // Linux Arguments:
105 // c_rarg0: call wrapper address address
106 // c_rarg1: result address
107 // c_rarg2: result type BasicType
108 // c_rarg3: method methodOop
109 // c_rarg4: (interpreter) entry point address
110 // c_rarg5: parameters intptr_t*
111 // 16(rbp): parameter size (in words) int
112 // 24(rbp): thread Thread*
113 //
114 // [ return_from_Java ] <--- rsp
115 // [ argument word n ]
116 // ...
117 // -12 [ argument word 1 ]
118 // -11 [ saved r15 ] <--- rsp_after_call
119 // -10 [ saved r14 ]
120 // -9 [ saved r13 ]
121 // -8 [ saved r12 ]
122 // -7 [ saved rbx ]
123 // -6 [ call wrapper ]
124 // -5 [ result ]
125 // -4 [ result type ]
126 // -3 [ method ]
127 // -2 [ entry point ]
128 // -1 [ parameters ]
129 // 0 [ saved rbp ] <--- rbp
130 // 1 [ return address ]
131 // 2 [ parameter size ]
132 // 3 [ thread ]
133 //
134 // Windows Arguments:
135 // c_rarg0: call wrapper address address
136 // c_rarg1: result address
137 // c_rarg2: result type BasicType
138 // c_rarg3: method methodOop
139 // 48(rbp): (interpreter) entry point address
140 // 56(rbp): parameters intptr_t*
141 // 64(rbp): parameter size (in words) int
142 // 72(rbp): thread Thread*
143 //
144 // [ return_from_Java ] <--- rsp
145 // [ argument word n ]
146 // ...
147 // -28 [ argument word 1 ]
148 // -27 [ saved xmm15 ] <--- rsp_after_call
149 // [ saved xmm7-xmm14 ]
150 // -9 [ saved xmm6 ] (each xmm register takes 2 slots)
151 // -7 [ saved r15 ]
152 // -6 [ saved r14 ]
153 // -5 [ saved r13 ]
154 // -4 [ saved r12 ]
155 // -3 [ saved rdi ]
156 // -2 [ saved rsi ]
157 // -1 [ saved rbx ]
158 // 0 [ saved rbp ] <--- rbp
159 // 1 [ return address ]
160 // 2 [ call wrapper ]
161 // 3 [ result ]
162 // 4 [ result type ]
163 // 5 [ method ]
164 // 6 [ entry point ]
165 // 7 [ parameters ]
166 // 8 [ parameter size ]
167 // 9 [ thread ]
168 //
169 // Windows reserves the callers stack space for arguments 1-4.
170 // We spill c_rarg0-c_rarg3 to this space.
172 // Call stub stack layout word offsets from rbp
173 enum call_stub_layout {
174 #ifdef _WIN64
175 xmm_save_first = 6, // save from xmm6
176 xmm_save_last = 15, // to xmm15
177 xmm_save_base = -9,
178 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27
179 r15_off = -7,
180 r14_off = -6,
181 r13_off = -5,
182 r12_off = -4,
183 rdi_off = -3,
184 rsi_off = -2,
185 rbx_off = -1,
186 rbp_off = 0,
187 retaddr_off = 1,
188 call_wrapper_off = 2,
189 result_off = 3,
190 result_type_off = 4,
191 method_off = 5,
192 entry_point_off = 6,
193 parameters_off = 7,
194 parameter_size_off = 8,
195 thread_off = 9
196 #else
197 rsp_after_call_off = -12,
198 mxcsr_off = rsp_after_call_off,
199 r15_off = -11,
200 r14_off = -10,
201 r13_off = -9,
202 r12_off = -8,
203 rbx_off = -7,
204 call_wrapper_off = -6,
205 result_off = -5,
206 result_type_off = -4,
207 method_off = -3,
208 entry_point_off = -2,
209 parameters_off = -1,
210 rbp_off = 0,
211 retaddr_off = 1,
212 parameter_size_off = 2,
213 thread_off = 3
214 #endif
215 };
217 #ifdef _WIN64
218 Address xmm_save(int reg) {
219 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range");
220 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize);
221 }
222 #endif
224 address generate_call_stub(address& return_address) {
225 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
226 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
227 "adjust this code");
228 StubCodeMark mark(this, "StubRoutines", "call_stub");
229 address start = __ pc();
231 // same as in generate_catch_exception()!
232 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
234 const Address call_wrapper (rbp, call_wrapper_off * wordSize);
235 const Address result (rbp, result_off * wordSize);
236 const Address result_type (rbp, result_type_off * wordSize);
237 const Address method (rbp, method_off * wordSize);
238 const Address entry_point (rbp, entry_point_off * wordSize);
239 const Address parameters (rbp, parameters_off * wordSize);
240 const Address parameter_size(rbp, parameter_size_off * wordSize);
242 // same as in generate_catch_exception()!
243 const Address thread (rbp, thread_off * wordSize);
245 const Address r15_save(rbp, r15_off * wordSize);
246 const Address r14_save(rbp, r14_off * wordSize);
247 const Address r13_save(rbp, r13_off * wordSize);
248 const Address r12_save(rbp, r12_off * wordSize);
249 const Address rbx_save(rbp, rbx_off * wordSize);
251 // stub code
252 __ enter();
253 __ subptr(rsp, -rsp_after_call_off * wordSize);
255 // save register parameters
256 #ifndef _WIN64
257 __ movptr(parameters, c_rarg5); // parameters
258 __ movptr(entry_point, c_rarg4); // entry_point
259 #endif
261 __ movptr(method, c_rarg3); // method
262 __ movl(result_type, c_rarg2); // result type
263 __ movptr(result, c_rarg1); // result
264 __ movptr(call_wrapper, c_rarg0); // call wrapper
266 // save regs belonging to calling function
267 __ movptr(rbx_save, rbx);
268 __ movptr(r12_save, r12);
269 __ movptr(r13_save, r13);
270 __ movptr(r14_save, r14);
271 __ movptr(r15_save, r15);
272 #ifdef _WIN64
273 for (int i = 6; i <= 15; i++) {
274 __ movdqu(xmm_save(i), as_XMMRegister(i));
275 }
277 const Address rdi_save(rbp, rdi_off * wordSize);
278 const Address rsi_save(rbp, rsi_off * wordSize);
280 __ movptr(rsi_save, rsi);
281 __ movptr(rdi_save, rdi);
282 #else
283 const Address mxcsr_save(rbp, mxcsr_off * wordSize);
284 {
285 Label skip_ldmx;
286 __ stmxcsr(mxcsr_save);
287 __ movl(rax, mxcsr_save);
288 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
289 ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std());
290 __ cmp32(rax, mxcsr_std);
291 __ jcc(Assembler::equal, skip_ldmx);
292 __ ldmxcsr(mxcsr_std);
293 __ bind(skip_ldmx);
294 }
295 #endif
297 // Load up thread register
298 __ movptr(r15_thread, thread);
299 __ reinit_heapbase();
301 #ifdef ASSERT
302 // make sure we have no pending exceptions
303 {
304 Label L;
305 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
306 __ jcc(Assembler::equal, L);
307 __ stop("StubRoutines::call_stub: entered with pending exception");
308 __ bind(L);
309 }
310 #endif
312 // pass parameters if any
313 BLOCK_COMMENT("pass parameters if any");
314 Label parameters_done;
315 __ movl(c_rarg3, parameter_size);
316 __ testl(c_rarg3, c_rarg3);
317 __ jcc(Assembler::zero, parameters_done);
319 Label loop;
320 __ movptr(c_rarg2, parameters); // parameter pointer
321 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
322 __ BIND(loop);
323 __ movptr(rax, Address(c_rarg2, 0));// get parameter
324 __ addptr(c_rarg2, wordSize); // advance to next parameter
325 __ decrementl(c_rarg1); // decrement counter
326 __ push(rax); // pass parameter
327 __ jcc(Assembler::notZero, loop);
329 // call Java function
330 __ BIND(parameters_done);
331 __ movptr(rbx, method); // get methodOop
332 __ movptr(c_rarg1, entry_point); // get entry_point
333 __ mov(r13, rsp); // set sender sp
334 BLOCK_COMMENT("call Java function");
335 __ call(c_rarg1);
337 BLOCK_COMMENT("call_stub_return_address:");
338 return_address = __ pc();
340 // store result depending on type (everything that is not
341 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
342 __ movptr(c_rarg0, result);
343 Label is_long, is_float, is_double, exit;
344 __ movl(c_rarg1, result_type);
345 __ cmpl(c_rarg1, T_OBJECT);
346 __ jcc(Assembler::equal, is_long);
347 __ cmpl(c_rarg1, T_LONG);
348 __ jcc(Assembler::equal, is_long);
349 __ cmpl(c_rarg1, T_FLOAT);
350 __ jcc(Assembler::equal, is_float);
351 __ cmpl(c_rarg1, T_DOUBLE);
352 __ jcc(Assembler::equal, is_double);
354 // handle T_INT case
355 __ movl(Address(c_rarg0, 0), rax);
357 __ BIND(exit);
359 // pop parameters
360 __ lea(rsp, rsp_after_call);
362 #ifdef ASSERT
363 // verify that threads correspond
364 {
365 Label L, S;
366 __ cmpptr(r15_thread, thread);
367 __ jcc(Assembler::notEqual, S);
368 __ get_thread(rbx);
369 __ cmpptr(r15_thread, rbx);
370 __ jcc(Assembler::equal, L);
371 __ bind(S);
372 __ jcc(Assembler::equal, L);
373 __ stop("StubRoutines::call_stub: threads must correspond");
374 __ bind(L);
375 }
376 #endif
378 // restore regs belonging to calling function
379 #ifdef _WIN64
380 for (int i = 15; i >= 6; i--) {
381 __ movdqu(as_XMMRegister(i), xmm_save(i));
382 }
383 #endif
384 __ movptr(r15, r15_save);
385 __ movptr(r14, r14_save);
386 __ movptr(r13, r13_save);
387 __ movptr(r12, r12_save);
388 __ movptr(rbx, rbx_save);
390 #ifdef _WIN64
391 __ movptr(rdi, rdi_save);
392 __ movptr(rsi, rsi_save);
393 #else
394 __ ldmxcsr(mxcsr_save);
395 #endif
397 // restore rsp
398 __ addptr(rsp, -rsp_after_call_off * wordSize);
400 // return
401 __ pop(rbp);
402 __ ret(0);
404 // handle return types different from T_INT
405 __ BIND(is_long);
406 __ movq(Address(c_rarg0, 0), rax);
407 __ jmp(exit);
409 __ BIND(is_float);
410 __ movflt(Address(c_rarg0, 0), xmm0);
411 __ jmp(exit);
413 __ BIND(is_double);
414 __ movdbl(Address(c_rarg0, 0), xmm0);
415 __ jmp(exit);
417 return start;
418 }
420 // Return point for a Java call if there's an exception thrown in
421 // Java code. The exception is caught and transformed into a
422 // pending exception stored in JavaThread that can be tested from
423 // within the VM.
424 //
425 // Note: Usually the parameters are removed by the callee. In case
426 // of an exception crossing an activation frame boundary, that is
427 // not the case if the callee is compiled code => need to setup the
428 // rsp.
429 //
430 // rax: exception oop
432 address generate_catch_exception() {
433 StubCodeMark mark(this, "StubRoutines", "catch_exception");
434 address start = __ pc();
436 // same as in generate_call_stub():
437 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
438 const Address thread (rbp, thread_off * wordSize);
440 #ifdef ASSERT
441 // verify that threads correspond
442 {
443 Label L, S;
444 __ cmpptr(r15_thread, thread);
445 __ jcc(Assembler::notEqual, S);
446 __ get_thread(rbx);
447 __ cmpptr(r15_thread, rbx);
448 __ jcc(Assembler::equal, L);
449 __ bind(S);
450 __ stop("StubRoutines::catch_exception: threads must correspond");
451 __ bind(L);
452 }
453 #endif
455 // set pending exception
456 __ verify_oop(rax);
458 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
459 __ lea(rscratch1, ExternalAddress((address)__FILE__));
460 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
461 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
463 // complete return to VM
464 assert(StubRoutines::_call_stub_return_address != NULL,
465 "_call_stub_return_address must have been generated before");
466 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
468 return start;
469 }
471 // Continuation point for runtime calls returning with a pending
472 // exception. The pending exception check happened in the runtime
473 // or native call stub. The pending exception in Thread is
474 // converted into a Java-level exception.
475 //
476 // Contract with Java-level exception handlers:
477 // rax: exception
478 // rdx: throwing pc
479 //
480 // NOTE: At entry of this stub, exception-pc must be on stack !!
482 address generate_forward_exception() {
483 StubCodeMark mark(this, "StubRoutines", "forward exception");
484 address start = __ pc();
486 // Upon entry, the sp points to the return address returning into
487 // Java (interpreted or compiled) code; i.e., the return address
488 // becomes the throwing pc.
489 //
490 // Arguments pushed before the runtime call are still on the stack
491 // but the exception handler will reset the stack pointer ->
492 // ignore them. A potential result in registers can be ignored as
493 // well.
495 #ifdef ASSERT
496 // make sure this code is only executed if there is a pending exception
497 {
498 Label L;
499 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL);
500 __ jcc(Assembler::notEqual, L);
501 __ stop("StubRoutines::forward exception: no pending exception (1)");
502 __ bind(L);
503 }
504 #endif
506 // compute exception handler into rbx
507 __ movptr(c_rarg0, Address(rsp, 0));
508 BLOCK_COMMENT("call exception_handler_for_return_address");
509 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
510 SharedRuntime::exception_handler_for_return_address),
511 r15_thread, c_rarg0);
512 __ mov(rbx, rax);
514 // setup rax & rdx, remove return address & clear pending exception
515 __ pop(rdx);
516 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
517 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
519 #ifdef ASSERT
520 // make sure exception is set
521 {
522 Label L;
523 __ testptr(rax, rax);
524 __ jcc(Assembler::notEqual, L);
525 __ stop("StubRoutines::forward exception: no pending exception (2)");
526 __ bind(L);
527 }
528 #endif
530 // continue at exception handler (return address removed)
531 // rax: exception
532 // rbx: exception handler
533 // rdx: throwing pc
534 __ verify_oop(rax);
535 __ jmp(rbx);
537 return start;
538 }
540 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
541 //
542 // Arguments :
543 // c_rarg0: exchange_value
544 // c_rarg0: dest
545 //
546 // Result:
547 // *dest <- ex, return (orig *dest)
548 address generate_atomic_xchg() {
549 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
550 address start = __ pc();
552 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
553 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
554 __ ret(0);
556 return start;
557 }
559 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
560 //
561 // Arguments :
562 // c_rarg0: exchange_value
563 // c_rarg1: dest
564 //
565 // Result:
566 // *dest <- ex, return (orig *dest)
567 address generate_atomic_xchg_ptr() {
568 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
569 address start = __ pc();
571 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
572 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
573 __ ret(0);
575 return start;
576 }
578 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
579 // jint compare_value)
580 //
581 // Arguments :
582 // c_rarg0: exchange_value
583 // c_rarg1: dest
584 // c_rarg2: compare_value
585 //
586 // Result:
587 // if ( compare_value == *dest ) {
588 // *dest = exchange_value
589 // return compare_value;
590 // else
591 // return *dest;
592 address generate_atomic_cmpxchg() {
593 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
594 address start = __ pc();
596 __ movl(rax, c_rarg2);
597 if ( os::is_MP() ) __ lock();
598 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
599 __ ret(0);
601 return start;
602 }
604 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
605 // volatile jlong* dest,
606 // jlong compare_value)
607 // Arguments :
608 // c_rarg0: exchange_value
609 // c_rarg1: dest
610 // c_rarg2: compare_value
611 //
612 // Result:
613 // if ( compare_value == *dest ) {
614 // *dest = exchange_value
615 // return compare_value;
616 // else
617 // return *dest;
618 address generate_atomic_cmpxchg_long() {
619 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
620 address start = __ pc();
622 __ movq(rax, c_rarg2);
623 if ( os::is_MP() ) __ lock();
624 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
625 __ ret(0);
627 return start;
628 }
630 // Support for jint atomic::add(jint add_value, volatile jint* dest)
631 //
632 // Arguments :
633 // c_rarg0: add_value
634 // c_rarg1: dest
635 //
636 // Result:
637 // *dest += add_value
638 // return *dest;
639 address generate_atomic_add() {
640 StubCodeMark mark(this, "StubRoutines", "atomic_add");
641 address start = __ pc();
643 __ movl(rax, c_rarg0);
644 if ( os::is_MP() ) __ lock();
645 __ xaddl(Address(c_rarg1, 0), c_rarg0);
646 __ addl(rax, c_rarg0);
647 __ ret(0);
649 return start;
650 }
652 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
653 //
654 // Arguments :
655 // c_rarg0: add_value
656 // c_rarg1: dest
657 //
658 // Result:
659 // *dest += add_value
660 // return *dest;
661 address generate_atomic_add_ptr() {
662 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
663 address start = __ pc();
665 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
666 if ( os::is_MP() ) __ lock();
667 __ xaddptr(Address(c_rarg1, 0), c_rarg0);
668 __ addptr(rax, c_rarg0);
669 __ ret(0);
671 return start;
672 }
674 // Support for intptr_t OrderAccess::fence()
675 //
676 // Arguments :
677 //
678 // Result:
679 address generate_orderaccess_fence() {
680 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
681 address start = __ pc();
682 __ membar(Assembler::StoreLoad);
683 __ ret(0);
685 return start;
686 }
688 // Support for intptr_t get_previous_fp()
689 //
690 // This routine is used to find the previous frame pointer for the
691 // caller (current_frame_guess). This is used as part of debugging
692 // ps() is seemingly lost trying to find frames.
693 // This code assumes that caller current_frame_guess) has a frame.
694 address generate_get_previous_fp() {
695 StubCodeMark mark(this, "StubRoutines", "get_previous_fp");
696 const Address old_fp(rbp, 0);
697 const Address older_fp(rax, 0);
698 address start = __ pc();
700 __ enter();
701 __ movptr(rax, old_fp); // callers fp
702 __ movptr(rax, older_fp); // the frame for ps()
703 __ pop(rbp);
704 __ ret(0);
706 return start;
707 }
709 //----------------------------------------------------------------------------------------------------
710 // Support for void verify_mxcsr()
711 //
712 // This routine is used with -Xcheck:jni to verify that native
713 // JNI code does not return to Java code without restoring the
714 // MXCSR register to our expected state.
716 address generate_verify_mxcsr() {
717 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
718 address start = __ pc();
720 const Address mxcsr_save(rsp, 0);
722 if (CheckJNICalls) {
723 Label ok_ret;
724 __ push(rax);
725 __ subptr(rsp, wordSize); // allocate a temp location
726 __ stmxcsr(mxcsr_save);
727 __ movl(rax, mxcsr_save);
728 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
729 __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std()));
730 __ jcc(Assembler::equal, ok_ret);
732 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
734 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
736 __ bind(ok_ret);
737 __ addptr(rsp, wordSize);
738 __ pop(rax);
739 }
741 __ ret(0);
743 return start;
744 }
746 address generate_f2i_fixup() {
747 StubCodeMark mark(this, "StubRoutines", "f2i_fixup");
748 Address inout(rsp, 5 * wordSize); // return address + 4 saves
750 address start = __ pc();
752 Label L;
754 __ push(rax);
755 __ push(c_rarg3);
756 __ push(c_rarg2);
757 __ push(c_rarg1);
759 __ movl(rax, 0x7f800000);
760 __ xorl(c_rarg3, c_rarg3);
761 __ movl(c_rarg2, inout);
762 __ movl(c_rarg1, c_rarg2);
763 __ andl(c_rarg1, 0x7fffffff);
764 __ cmpl(rax, c_rarg1); // NaN? -> 0
765 __ jcc(Assembler::negative, L);
766 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
767 __ movl(c_rarg3, 0x80000000);
768 __ movl(rax, 0x7fffffff);
769 __ cmovl(Assembler::positive, c_rarg3, rax);
771 __ bind(L);
772 __ movptr(inout, c_rarg3);
774 __ pop(c_rarg1);
775 __ pop(c_rarg2);
776 __ pop(c_rarg3);
777 __ pop(rax);
779 __ ret(0);
781 return start;
782 }
784 address generate_f2l_fixup() {
785 StubCodeMark mark(this, "StubRoutines", "f2l_fixup");
786 Address inout(rsp, 5 * wordSize); // return address + 4 saves
787 address start = __ pc();
789 Label L;
791 __ push(rax);
792 __ push(c_rarg3);
793 __ push(c_rarg2);
794 __ push(c_rarg1);
796 __ movl(rax, 0x7f800000);
797 __ xorl(c_rarg3, c_rarg3);
798 __ movl(c_rarg2, inout);
799 __ movl(c_rarg1, c_rarg2);
800 __ andl(c_rarg1, 0x7fffffff);
801 __ cmpl(rax, c_rarg1); // NaN? -> 0
802 __ jcc(Assembler::negative, L);
803 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
804 __ mov64(c_rarg3, 0x8000000000000000);
805 __ mov64(rax, 0x7fffffffffffffff);
806 __ cmov(Assembler::positive, c_rarg3, rax);
808 __ bind(L);
809 __ movptr(inout, c_rarg3);
811 __ pop(c_rarg1);
812 __ pop(c_rarg2);
813 __ pop(c_rarg3);
814 __ pop(rax);
816 __ ret(0);
818 return start;
819 }
821 address generate_d2i_fixup() {
822 StubCodeMark mark(this, "StubRoutines", "d2i_fixup");
823 Address inout(rsp, 6 * wordSize); // return address + 5 saves
825 address start = __ pc();
827 Label L;
829 __ push(rax);
830 __ push(c_rarg3);
831 __ push(c_rarg2);
832 __ push(c_rarg1);
833 __ push(c_rarg0);
835 __ movl(rax, 0x7ff00000);
836 __ movq(c_rarg2, inout);
837 __ movl(c_rarg3, c_rarg2);
838 __ mov(c_rarg1, c_rarg2);
839 __ mov(c_rarg0, c_rarg2);
840 __ negl(c_rarg3);
841 __ shrptr(c_rarg1, 0x20);
842 __ orl(c_rarg3, c_rarg2);
843 __ andl(c_rarg1, 0x7fffffff);
844 __ xorl(c_rarg2, c_rarg2);
845 __ shrl(c_rarg3, 0x1f);
846 __ orl(c_rarg1, c_rarg3);
847 __ cmpl(rax, c_rarg1);
848 __ jcc(Assembler::negative, L); // NaN -> 0
849 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
850 __ movl(c_rarg2, 0x80000000);
851 __ movl(rax, 0x7fffffff);
852 __ cmov(Assembler::positive, c_rarg2, rax);
854 __ bind(L);
855 __ movptr(inout, c_rarg2);
857 __ pop(c_rarg0);
858 __ pop(c_rarg1);
859 __ pop(c_rarg2);
860 __ pop(c_rarg3);
861 __ pop(rax);
863 __ ret(0);
865 return start;
866 }
868 address generate_d2l_fixup() {
869 StubCodeMark mark(this, "StubRoutines", "d2l_fixup");
870 Address inout(rsp, 6 * wordSize); // return address + 5 saves
872 address start = __ pc();
874 Label L;
876 __ push(rax);
877 __ push(c_rarg3);
878 __ push(c_rarg2);
879 __ push(c_rarg1);
880 __ push(c_rarg0);
882 __ movl(rax, 0x7ff00000);
883 __ movq(c_rarg2, inout);
884 __ movl(c_rarg3, c_rarg2);
885 __ mov(c_rarg1, c_rarg2);
886 __ mov(c_rarg0, c_rarg2);
887 __ negl(c_rarg3);
888 __ shrptr(c_rarg1, 0x20);
889 __ orl(c_rarg3, c_rarg2);
890 __ andl(c_rarg1, 0x7fffffff);
891 __ xorl(c_rarg2, c_rarg2);
892 __ shrl(c_rarg3, 0x1f);
893 __ orl(c_rarg1, c_rarg3);
894 __ cmpl(rax, c_rarg1);
895 __ jcc(Assembler::negative, L); // NaN -> 0
896 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
897 __ mov64(c_rarg2, 0x8000000000000000);
898 __ mov64(rax, 0x7fffffffffffffff);
899 __ cmovq(Assembler::positive, c_rarg2, rax);
901 __ bind(L);
902 __ movq(inout, c_rarg2);
904 __ pop(c_rarg0);
905 __ pop(c_rarg1);
906 __ pop(c_rarg2);
907 __ pop(c_rarg3);
908 __ pop(rax);
910 __ ret(0);
912 return start;
913 }
915 address generate_fp_mask(const char *stub_name, int64_t mask) {
916 __ align(CodeEntryAlignment);
917 StubCodeMark mark(this, "StubRoutines", stub_name);
918 address start = __ pc();
920 __ emit_data64( mask, relocInfo::none );
921 __ emit_data64( mask, relocInfo::none );
923 return start;
924 }
926 // The following routine generates a subroutine to throw an
927 // asynchronous UnknownError when an unsafe access gets a fault that
928 // could not be reasonably prevented by the programmer. (Example:
929 // SIGBUS/OBJERR.)
930 address generate_handler_for_unsafe_access() {
931 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
932 address start = __ pc();
934 __ push(0); // hole for return address-to-be
935 __ pusha(); // push registers
936 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
938 // FIXME: this probably needs alignment logic
940 __ subptr(rsp, frame::arg_reg_save_area_bytes);
941 BLOCK_COMMENT("call handle_unsafe_access");
942 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
943 __ addptr(rsp, frame::arg_reg_save_area_bytes);
945 __ movptr(next_pc, rax); // stuff next address
946 __ popa();
947 __ ret(0); // jump to next address
949 return start;
950 }
952 // Non-destructive plausibility checks for oops
953 //
954 // Arguments:
955 // all args on stack!
956 //
957 // Stack after saving c_rarg3:
958 // [tos + 0]: saved c_rarg3
959 // [tos + 1]: saved c_rarg2
960 // [tos + 2]: saved r12 (several TemplateTable methods use it)
961 // [tos + 3]: saved flags
962 // [tos + 4]: return address
963 // * [tos + 5]: error message (char*)
964 // * [tos + 6]: object to verify (oop)
965 // * [tos + 7]: saved rax - saved by caller and bashed
966 // * [tos + 8]: saved r10 (rscratch1) - saved by caller
967 // * = popped on exit
968 address generate_verify_oop() {
969 StubCodeMark mark(this, "StubRoutines", "verify_oop");
970 address start = __ pc();
972 Label exit, error;
974 __ pushf();
975 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
977 __ push(r12);
979 // save c_rarg2 and c_rarg3
980 __ push(c_rarg2);
981 __ push(c_rarg3);
983 enum {
984 // After previous pushes.
985 oop_to_verify = 6 * wordSize,
986 saved_rax = 7 * wordSize,
987 saved_r10 = 8 * wordSize,
989 // Before the call to MacroAssembler::debug(), see below.
990 return_addr = 16 * wordSize,
991 error_msg = 17 * wordSize
992 };
994 // get object
995 __ movptr(rax, Address(rsp, oop_to_verify));
997 // make sure object is 'reasonable'
998 __ testptr(rax, rax);
999 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
1000 // Check if the oop is in the right area of memory
1001 __ movptr(c_rarg2, rax);
1002 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
1003 __ andptr(c_rarg2, c_rarg3);
1004 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits());
1005 __ cmpptr(c_rarg2, c_rarg3);
1006 __ jcc(Assembler::notZero, error);
1008 // set r12 to heapbase for load_klass()
1009 __ reinit_heapbase();
1011 // make sure klass is 'reasonable'
1012 __ load_klass(rax, rax); // get klass
1013 __ testptr(rax, rax);
1014 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
1015 // Check if the klass is in the right area of memory
1016 __ mov(c_rarg2, rax);
1017 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
1018 __ andptr(c_rarg2, c_rarg3);
1019 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
1020 __ cmpptr(c_rarg2, c_rarg3);
1021 __ jcc(Assembler::notZero, error);
1023 // make sure klass' klass is 'reasonable'
1024 __ load_klass(rax, rax);
1025 __ testptr(rax, rax);
1026 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
1027 // Check if the klass' klass is in the right area of memory
1028 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
1029 __ andptr(rax, c_rarg3);
1030 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
1031 __ cmpptr(rax, c_rarg3);
1032 __ jcc(Assembler::notZero, error);
1034 // return if everything seems ok
1035 __ bind(exit);
1036 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1037 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1038 __ pop(c_rarg3); // restore c_rarg3
1039 __ pop(c_rarg2); // restore c_rarg2
1040 __ pop(r12); // restore r12
1041 __ popf(); // restore flags
1042 __ ret(4 * wordSize); // pop caller saved stuff
1044 // handle errors
1045 __ bind(error);
1046 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1047 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1048 __ pop(c_rarg3); // get saved c_rarg3 back
1049 __ pop(c_rarg2); // get saved c_rarg2 back
1050 __ pop(r12); // get saved r12 back
1051 __ popf(); // get saved flags off stack --
1052 // will be ignored
1054 __ pusha(); // push registers
1055 // (rip is already
1056 // already pushed)
1057 // debug(char* msg, int64_t pc, int64_t regs[])
1058 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
1059 // pushed all the registers, so now the stack looks like:
1060 // [tos + 0] 16 saved registers
1061 // [tos + 16] return address
1062 // * [tos + 17] error message (char*)
1063 // * [tos + 18] object to verify (oop)
1064 // * [tos + 19] saved rax - saved by caller and bashed
1065 // * [tos + 20] saved r10 (rscratch1) - saved by caller
1066 // * = popped on exit
1068 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
1069 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address
1070 __ movq(c_rarg2, rsp); // pass address of regs on stack
1071 __ mov(r12, rsp); // remember rsp
1072 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1073 __ andptr(rsp, -16); // align stack as required by ABI
1074 BLOCK_COMMENT("call MacroAssembler::debug");
1075 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1076 __ mov(rsp, r12); // restore rsp
1077 __ popa(); // pop registers (includes r12)
1078 __ ret(4 * wordSize); // pop caller saved stuff
1080 return start;
1081 }
1083 //
1084 // Verify that a register contains clean 32-bits positive value
1085 // (high 32-bits are 0) so it could be used in 64-bits shifts.
1086 //
1087 // Input:
1088 // Rint - 32-bits value
1089 // Rtmp - scratch
1090 //
1091 void assert_clean_int(Register Rint, Register Rtmp) {
1092 #ifdef ASSERT
1093 Label L;
1094 assert_different_registers(Rtmp, Rint);
1095 __ movslq(Rtmp, Rint);
1096 __ cmpq(Rtmp, Rint);
1097 __ jcc(Assembler::equal, L);
1098 __ stop("high 32-bits of int value are not 0");
1099 __ bind(L);
1100 #endif
1101 }
1103 // Generate overlap test for array copy stubs
1104 //
1105 // Input:
1106 // c_rarg0 - from
1107 // c_rarg1 - to
1108 // c_rarg2 - element count
1109 //
1110 // Output:
1111 // rax - &from[element count - 1]
1112 //
1113 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
1114 assert(no_overlap_target != NULL, "must be generated");
1115 array_overlap_test(no_overlap_target, NULL, sf);
1116 }
1117 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
1118 array_overlap_test(NULL, &L_no_overlap, sf);
1119 }
1120 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) {
1121 const Register from = c_rarg0;
1122 const Register to = c_rarg1;
1123 const Register count = c_rarg2;
1124 const Register end_from = rax;
1126 __ cmpptr(to, from);
1127 __ lea(end_from, Address(from, count, sf, 0));
1128 if (NOLp == NULL) {
1129 ExternalAddress no_overlap(no_overlap_target);
1130 __ jump_cc(Assembler::belowEqual, no_overlap);
1131 __ cmpptr(to, end_from);
1132 __ jump_cc(Assembler::aboveEqual, no_overlap);
1133 } else {
1134 __ jcc(Assembler::belowEqual, (*NOLp));
1135 __ cmpptr(to, end_from);
1136 __ jcc(Assembler::aboveEqual, (*NOLp));
1137 }
1138 }
1140 // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1141 //
1142 // Outputs:
1143 // rdi - rcx
1144 // rsi - rdx
1145 // rdx - r8
1146 // rcx - r9
1147 //
1148 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1149 // are non-volatile. r9 and r10 should not be used by the caller.
1150 //
1151 void setup_arg_regs(int nargs = 3) {
1152 const Register saved_rdi = r9;
1153 const Register saved_rsi = r10;
1154 assert(nargs == 3 || nargs == 4, "else fix");
1155 #ifdef _WIN64
1156 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1157 "unexpected argument registers");
1158 if (nargs >= 4)
1159 __ mov(rax, r9); // r9 is also saved_rdi
1160 __ movptr(saved_rdi, rdi);
1161 __ movptr(saved_rsi, rsi);
1162 __ mov(rdi, rcx); // c_rarg0
1163 __ mov(rsi, rdx); // c_rarg1
1164 __ mov(rdx, r8); // c_rarg2
1165 if (nargs >= 4)
1166 __ mov(rcx, rax); // c_rarg3 (via rax)
1167 #else
1168 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1169 "unexpected argument registers");
1170 #endif
1171 }
1173 void restore_arg_regs() {
1174 const Register saved_rdi = r9;
1175 const Register saved_rsi = r10;
1176 #ifdef _WIN64
1177 __ movptr(rdi, saved_rdi);
1178 __ movptr(rsi, saved_rsi);
1179 #endif
1180 }
1182 // Generate code for an array write pre barrier
1183 //
1184 // addr - starting address
1185 // count - element count
1186 // tmp - scratch register
1187 //
1188 // Destroy no registers!
1189 //
1190 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
1191 BarrierSet* bs = Universe::heap()->barrier_set();
1192 switch (bs->kind()) {
1193 case BarrierSet::G1SATBCT:
1194 case BarrierSet::G1SATBCTLogging:
1195 // With G1, don't generate the call if we statically know that the target in uninitialized
1196 if (!dest_uninitialized) {
1197 __ pusha(); // push registers
1198 if (count == c_rarg0) {
1199 if (addr == c_rarg1) {
1200 // exactly backwards!!
1201 __ xchgptr(c_rarg1, c_rarg0);
1202 } else {
1203 __ movptr(c_rarg1, count);
1204 __ movptr(c_rarg0, addr);
1205 }
1206 } else {
1207 __ movptr(c_rarg0, addr);
1208 __ movptr(c_rarg1, count);
1209 }
1210 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
1211 __ popa();
1212 }
1213 break;
1214 case BarrierSet::CardTableModRef:
1215 case BarrierSet::CardTableExtension:
1216 case BarrierSet::ModRef:
1217 break;
1218 default:
1219 ShouldNotReachHere();
1221 }
1222 }
1224 //
1225 // Generate code for an array write post barrier
1226 //
1227 // Input:
1228 // start - register containing starting address of destination array
1229 // end - register containing ending address of destination array
1230 // scratch - scratch register
1231 //
1232 // The input registers are overwritten.
1233 // The ending address is inclusive.
1234 void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
1235 assert_different_registers(start, end, scratch);
1236 BarrierSet* bs = Universe::heap()->barrier_set();
1237 switch (bs->kind()) {
1238 case BarrierSet::G1SATBCT:
1239 case BarrierSet::G1SATBCTLogging:
1241 {
1242 __ pusha(); // push registers (overkill)
1243 // must compute element count unless barrier set interface is changed (other platforms supply count)
1244 assert_different_registers(start, end, scratch);
1245 __ lea(scratch, Address(end, BytesPerHeapOop));
1246 __ subptr(scratch, start); // subtract start to get #bytes
1247 __ shrptr(scratch, LogBytesPerHeapOop); // convert to element count
1248 __ mov(c_rarg0, start);
1249 __ mov(c_rarg1, scratch);
1250 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
1251 __ popa();
1252 }
1253 break;
1254 case BarrierSet::CardTableModRef:
1255 case BarrierSet::CardTableExtension:
1256 {
1257 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1258 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1260 Label L_loop;
1262 __ shrptr(start, CardTableModRefBS::card_shift);
1263 __ addptr(end, BytesPerHeapOop);
1264 __ shrptr(end, CardTableModRefBS::card_shift);
1265 __ subptr(end, start); // number of bytes to copy
1267 intptr_t disp = (intptr_t) ct->byte_map_base;
1268 if (__ is_simm32(disp)) {
1269 Address cardtable(noreg, noreg, Address::no_scale, disp);
1270 __ lea(scratch, cardtable);
1271 } else {
1272 ExternalAddress cardtable((address)disp);
1273 __ lea(scratch, cardtable);
1274 }
1276 const Register count = end; // 'end' register contains bytes count now
1277 __ addptr(start, scratch);
1278 __ BIND(L_loop);
1279 __ movb(Address(start, count, Address::times_1), 0);
1280 __ decrement(count);
1281 __ jcc(Assembler::greaterEqual, L_loop);
1282 }
1283 break;
1284 default:
1285 ShouldNotReachHere();
1287 }
1288 }
1291 // Copy big chunks forward
1292 //
1293 // Inputs:
1294 // end_from - source arrays end address
1295 // end_to - destination array end address
1296 // qword_count - 64-bits element count, negative
1297 // to - scratch
1298 // L_copy_32_bytes - entry label
1299 // L_copy_8_bytes - exit label
1300 //
1301 void copy_32_bytes_forward(Register end_from, Register end_to,
1302 Register qword_count, Register to,
1303 Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1304 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1305 Label L_loop;
1306 __ align(OptoLoopAlignment);
1307 __ BIND(L_loop);
1308 if(UseUnalignedLoadStores) {
1309 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
1310 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0);
1311 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8));
1312 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1);
1314 } else {
1315 __ movq(to, Address(end_from, qword_count, Address::times_8, -24));
1316 __ movq(Address(end_to, qword_count, Address::times_8, -24), to);
1317 __ movq(to, Address(end_from, qword_count, Address::times_8, -16));
1318 __ movq(Address(end_to, qword_count, Address::times_8, -16), to);
1319 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
1320 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
1321 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
1322 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
1323 }
1324 __ BIND(L_copy_32_bytes);
1325 __ addptr(qword_count, 4);
1326 __ jcc(Assembler::lessEqual, L_loop);
1327 __ subptr(qword_count, 4);
1328 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
1329 }
1332 // Copy big chunks backward
1333 //
1334 // Inputs:
1335 // from - source arrays address
1336 // dest - destination array address
1337 // qword_count - 64-bits element count
1338 // to - scratch
1339 // L_copy_32_bytes - entry label
1340 // L_copy_8_bytes - exit label
1341 //
1342 void copy_32_bytes_backward(Register from, Register dest,
1343 Register qword_count, Register to,
1344 Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1345 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1346 Label L_loop;
1347 __ align(OptoLoopAlignment);
1348 __ BIND(L_loop);
1349 if(UseUnalignedLoadStores) {
1350 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
1351 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0);
1352 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
1353 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
1355 } else {
1356 __ movq(to, Address(from, qword_count, Address::times_8, 24));
1357 __ movq(Address(dest, qword_count, Address::times_8, 24), to);
1358 __ movq(to, Address(from, qword_count, Address::times_8, 16));
1359 __ movq(Address(dest, qword_count, Address::times_8, 16), to);
1360 __ movq(to, Address(from, qword_count, Address::times_8, 8));
1361 __ movq(Address(dest, qword_count, Address::times_8, 8), to);
1362 __ movq(to, Address(from, qword_count, Address::times_8, 0));
1363 __ movq(Address(dest, qword_count, Address::times_8, 0), to);
1364 }
1365 __ BIND(L_copy_32_bytes);
1366 __ subptr(qword_count, 4);
1367 __ jcc(Assembler::greaterEqual, L_loop);
1368 __ addptr(qword_count, 4);
1369 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
1370 }
1373 // Arguments:
1374 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1375 // ignored
1376 // name - stub name string
1377 //
1378 // Inputs:
1379 // c_rarg0 - source array address
1380 // c_rarg1 - destination array address
1381 // c_rarg2 - element count, treated as ssize_t, can be zero
1382 //
1383 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1384 // we let the hardware handle it. The one to eight bytes within words,
1385 // dwords or qwords that span cache line boundaries will still be loaded
1386 // and stored atomically.
1387 //
1388 // Side Effects:
1389 // disjoint_byte_copy_entry is set to the no-overlap entry point
1390 // used by generate_conjoint_byte_copy().
1391 //
1392 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) {
1393 __ align(CodeEntryAlignment);
1394 StubCodeMark mark(this, "StubRoutines", name);
1395 address start = __ pc();
1397 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1398 Label L_copy_byte, L_exit;
1399 const Register from = rdi; // source array address
1400 const Register to = rsi; // destination array address
1401 const Register count = rdx; // elements count
1402 const Register byte_count = rcx;
1403 const Register qword_count = count;
1404 const Register end_from = from; // source array end address
1405 const Register end_to = to; // destination array end address
1406 // End pointers are inclusive, and if count is not zero they point
1407 // to the last unit copied: end_to[0] := end_from[0]
1409 __ enter(); // required for proper stackwalking of RuntimeStub frame
1410 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1412 if (entry != NULL) {
1413 *entry = __ pc();
1414 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1415 BLOCK_COMMENT("Entry:");
1416 }
1418 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1419 // r9 and r10 may be used to save non-volatile registers
1421 // 'from', 'to' and 'count' are now valid
1422 __ movptr(byte_count, count);
1423 __ shrptr(count, 3); // count => qword_count
1425 // Copy from low to high addresses. Use 'to' as scratch.
1426 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1427 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1428 __ negptr(qword_count); // make the count negative
1429 __ jmp(L_copy_32_bytes);
1431 // Copy trailing qwords
1432 __ BIND(L_copy_8_bytes);
1433 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1434 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1435 __ increment(qword_count);
1436 __ jcc(Assembler::notZero, L_copy_8_bytes);
1438 // Check for and copy trailing dword
1439 __ BIND(L_copy_4_bytes);
1440 __ testl(byte_count, 4);
1441 __ jccb(Assembler::zero, L_copy_2_bytes);
1442 __ movl(rax, Address(end_from, 8));
1443 __ movl(Address(end_to, 8), rax);
1445 __ addptr(end_from, 4);
1446 __ addptr(end_to, 4);
1448 // Check for and copy trailing word
1449 __ BIND(L_copy_2_bytes);
1450 __ testl(byte_count, 2);
1451 __ jccb(Assembler::zero, L_copy_byte);
1452 __ movw(rax, Address(end_from, 8));
1453 __ movw(Address(end_to, 8), rax);
1455 __ addptr(end_from, 2);
1456 __ addptr(end_to, 2);
1458 // Check for and copy trailing byte
1459 __ BIND(L_copy_byte);
1460 __ testl(byte_count, 1);
1461 __ jccb(Assembler::zero, L_exit);
1462 __ movb(rax, Address(end_from, 8));
1463 __ movb(Address(end_to, 8), rax);
1465 __ BIND(L_exit);
1466 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1467 restore_arg_regs();
1468 __ xorptr(rax, rax); // return 0
1469 __ leave(); // required for proper stackwalking of RuntimeStub frame
1470 __ ret(0);
1472 // Copy in 32-bytes chunks
1473 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1474 __ jmp(L_copy_4_bytes);
1476 return start;
1477 }
1479 // Arguments:
1480 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1481 // ignored
1482 // name - stub name string
1483 //
1484 // Inputs:
1485 // c_rarg0 - source array address
1486 // c_rarg1 - destination array address
1487 // c_rarg2 - element count, treated as ssize_t, can be zero
1488 //
1489 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1490 // we let the hardware handle it. The one to eight bytes within words,
1491 // dwords or qwords that span cache line boundaries will still be loaded
1492 // and stored atomically.
1493 //
1494 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
1495 address* entry, const char *name) {
1496 __ align(CodeEntryAlignment);
1497 StubCodeMark mark(this, "StubRoutines", name);
1498 address start = __ pc();
1500 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1501 const Register from = rdi; // source array address
1502 const Register to = rsi; // destination array address
1503 const Register count = rdx; // elements count
1504 const Register byte_count = rcx;
1505 const Register qword_count = count;
1507 __ enter(); // required for proper stackwalking of RuntimeStub frame
1508 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1510 if (entry != NULL) {
1511 *entry = __ pc();
1512 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1513 BLOCK_COMMENT("Entry:");
1514 }
1516 array_overlap_test(nooverlap_target, Address::times_1);
1517 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1518 // r9 and r10 may be used to save non-volatile registers
1520 // 'from', 'to' and 'count' are now valid
1521 __ movptr(byte_count, count);
1522 __ shrptr(count, 3); // count => qword_count
1524 // Copy from high to low addresses.
1526 // Check for and copy trailing byte
1527 __ testl(byte_count, 1);
1528 __ jcc(Assembler::zero, L_copy_2_bytes);
1529 __ movb(rax, Address(from, byte_count, Address::times_1, -1));
1530 __ movb(Address(to, byte_count, Address::times_1, -1), rax);
1531 __ decrement(byte_count); // Adjust for possible trailing word
1533 // Check for and copy trailing word
1534 __ BIND(L_copy_2_bytes);
1535 __ testl(byte_count, 2);
1536 __ jcc(Assembler::zero, L_copy_4_bytes);
1537 __ movw(rax, Address(from, byte_count, Address::times_1, -2));
1538 __ movw(Address(to, byte_count, Address::times_1, -2), rax);
1540 // Check for and copy trailing dword
1541 __ BIND(L_copy_4_bytes);
1542 __ testl(byte_count, 4);
1543 __ jcc(Assembler::zero, L_copy_32_bytes);
1544 __ movl(rax, Address(from, qword_count, Address::times_8));
1545 __ movl(Address(to, qword_count, Address::times_8), rax);
1546 __ jmp(L_copy_32_bytes);
1548 // Copy trailing qwords
1549 __ BIND(L_copy_8_bytes);
1550 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1551 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1552 __ decrement(qword_count);
1553 __ jcc(Assembler::notZero, L_copy_8_bytes);
1555 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1556 restore_arg_regs();
1557 __ xorptr(rax, rax); // return 0
1558 __ leave(); // required for proper stackwalking of RuntimeStub frame
1559 __ ret(0);
1561 // Copy in 32-bytes chunks
1562 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1564 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1565 restore_arg_regs();
1566 __ xorptr(rax, rax); // return 0
1567 __ leave(); // required for proper stackwalking of RuntimeStub frame
1568 __ ret(0);
1570 return start;
1571 }
1573 // Arguments:
1574 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1575 // ignored
1576 // name - stub name string
1577 //
1578 // Inputs:
1579 // c_rarg0 - source array address
1580 // c_rarg1 - destination array address
1581 // c_rarg2 - element count, treated as ssize_t, can be zero
1582 //
1583 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1584 // let the hardware handle it. The two or four words within dwords
1585 // or qwords that span cache line boundaries will still be loaded
1586 // and stored atomically.
1587 //
1588 // Side Effects:
1589 // disjoint_short_copy_entry is set to the no-overlap entry point
1590 // used by generate_conjoint_short_copy().
1591 //
1592 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) {
1593 __ align(CodeEntryAlignment);
1594 StubCodeMark mark(this, "StubRoutines", name);
1595 address start = __ pc();
1597 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit;
1598 const Register from = rdi; // source array address
1599 const Register to = rsi; // destination array address
1600 const Register count = rdx; // elements count
1601 const Register word_count = rcx;
1602 const Register qword_count = count;
1603 const Register end_from = from; // source array end address
1604 const Register end_to = to; // destination array end address
1605 // End pointers are inclusive, and if count is not zero they point
1606 // to the last unit copied: end_to[0] := end_from[0]
1608 __ enter(); // required for proper stackwalking of RuntimeStub frame
1609 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1611 if (entry != NULL) {
1612 *entry = __ pc();
1613 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1614 BLOCK_COMMENT("Entry:");
1615 }
1617 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1618 // r9 and r10 may be used to save non-volatile registers
1620 // 'from', 'to' and 'count' are now valid
1621 __ movptr(word_count, count);
1622 __ shrptr(count, 2); // count => qword_count
1624 // Copy from low to high addresses. Use 'to' as scratch.
1625 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1626 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1627 __ negptr(qword_count);
1628 __ jmp(L_copy_32_bytes);
1630 // Copy trailing qwords
1631 __ BIND(L_copy_8_bytes);
1632 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1633 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1634 __ increment(qword_count);
1635 __ jcc(Assembler::notZero, L_copy_8_bytes);
1637 // Original 'dest' is trashed, so we can't use it as a
1638 // base register for a possible trailing word copy
1640 // Check for and copy trailing dword
1641 __ BIND(L_copy_4_bytes);
1642 __ testl(word_count, 2);
1643 __ jccb(Assembler::zero, L_copy_2_bytes);
1644 __ movl(rax, Address(end_from, 8));
1645 __ movl(Address(end_to, 8), rax);
1647 __ addptr(end_from, 4);
1648 __ addptr(end_to, 4);
1650 // Check for and copy trailing word
1651 __ BIND(L_copy_2_bytes);
1652 __ testl(word_count, 1);
1653 __ jccb(Assembler::zero, L_exit);
1654 __ movw(rax, Address(end_from, 8));
1655 __ movw(Address(end_to, 8), rax);
1657 __ BIND(L_exit);
1658 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1659 restore_arg_regs();
1660 __ xorptr(rax, rax); // return 0
1661 __ leave(); // required for proper stackwalking of RuntimeStub frame
1662 __ ret(0);
1664 // Copy in 32-bytes chunks
1665 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1666 __ jmp(L_copy_4_bytes);
1668 return start;
1669 }
1671 address generate_fill(BasicType t, bool aligned, const char *name) {
1672 __ align(CodeEntryAlignment);
1673 StubCodeMark mark(this, "StubRoutines", name);
1674 address start = __ pc();
1676 BLOCK_COMMENT("Entry:");
1678 const Register to = c_rarg0; // source array address
1679 const Register value = c_rarg1; // value
1680 const Register count = c_rarg2; // elements count
1682 __ enter(); // required for proper stackwalking of RuntimeStub frame
1684 __ generate_fill(t, aligned, to, value, count, rax, xmm0);
1686 __ leave(); // required for proper stackwalking of RuntimeStub frame
1687 __ ret(0);
1688 return start;
1689 }
1691 // Arguments:
1692 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1693 // ignored
1694 // name - stub name string
1695 //
1696 // Inputs:
1697 // c_rarg0 - source array address
1698 // c_rarg1 - destination array address
1699 // c_rarg2 - element count, treated as ssize_t, can be zero
1700 //
1701 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1702 // let the hardware handle it. The two or four words within dwords
1703 // or qwords that span cache line boundaries will still be loaded
1704 // and stored atomically.
1705 //
1706 address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
1707 address *entry, const char *name) {
1708 __ align(CodeEntryAlignment);
1709 StubCodeMark mark(this, "StubRoutines", name);
1710 address start = __ pc();
1712 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes;
1713 const Register from = rdi; // source array address
1714 const Register to = rsi; // destination array address
1715 const Register count = rdx; // elements count
1716 const Register word_count = rcx;
1717 const Register qword_count = count;
1719 __ enter(); // required for proper stackwalking of RuntimeStub frame
1720 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1722 if (entry != NULL) {
1723 *entry = __ pc();
1724 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1725 BLOCK_COMMENT("Entry:");
1726 }
1728 array_overlap_test(nooverlap_target, Address::times_2);
1729 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1730 // r9 and r10 may be used to save non-volatile registers
1732 // 'from', 'to' and 'count' are now valid
1733 __ movptr(word_count, count);
1734 __ shrptr(count, 2); // count => qword_count
1736 // Copy from high to low addresses. Use 'to' as scratch.
1738 // Check for and copy trailing word
1739 __ testl(word_count, 1);
1740 __ jccb(Assembler::zero, L_copy_4_bytes);
1741 __ movw(rax, Address(from, word_count, Address::times_2, -2));
1742 __ movw(Address(to, word_count, Address::times_2, -2), rax);
1744 // Check for and copy trailing dword
1745 __ BIND(L_copy_4_bytes);
1746 __ testl(word_count, 2);
1747 __ jcc(Assembler::zero, L_copy_32_bytes);
1748 __ movl(rax, Address(from, qword_count, Address::times_8));
1749 __ movl(Address(to, qword_count, Address::times_8), rax);
1750 __ jmp(L_copy_32_bytes);
1752 // Copy trailing qwords
1753 __ BIND(L_copy_8_bytes);
1754 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1755 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1756 __ decrement(qword_count);
1757 __ jcc(Assembler::notZero, L_copy_8_bytes);
1759 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1760 restore_arg_regs();
1761 __ xorptr(rax, rax); // return 0
1762 __ leave(); // required for proper stackwalking of RuntimeStub frame
1763 __ ret(0);
1765 // Copy in 32-bytes chunks
1766 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1768 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1769 restore_arg_regs();
1770 __ xorptr(rax, rax); // return 0
1771 __ leave(); // required for proper stackwalking of RuntimeStub frame
1772 __ ret(0);
1774 return start;
1775 }
1777 // Arguments:
1778 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1779 // ignored
1780 // is_oop - true => oop array, so generate store check code
1781 // name - stub name string
1782 //
1783 // Inputs:
1784 // c_rarg0 - source array address
1785 // c_rarg1 - destination array address
1786 // c_rarg2 - element count, treated as ssize_t, can be zero
1787 //
1788 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1789 // the hardware handle it. The two dwords within qwords that span
1790 // cache line boundaries will still be loaded and stored atomicly.
1791 //
1792 // Side Effects:
1793 // disjoint_int_copy_entry is set to the no-overlap entry point
1794 // used by generate_conjoint_int_oop_copy().
1795 //
1796 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry,
1797 const char *name, bool dest_uninitialized = false) {
1798 __ align(CodeEntryAlignment);
1799 StubCodeMark mark(this, "StubRoutines", name);
1800 address start = __ pc();
1802 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
1803 const Register from = rdi; // source array address
1804 const Register to = rsi; // destination array address
1805 const Register count = rdx; // elements count
1806 const Register dword_count = rcx;
1807 const Register qword_count = count;
1808 const Register end_from = from; // source array end address
1809 const Register end_to = to; // destination array end address
1810 const Register saved_to = r11; // saved destination array address
1811 // End pointers are inclusive, and if count is not zero they point
1812 // to the last unit copied: end_to[0] := end_from[0]
1814 __ enter(); // required for proper stackwalking of RuntimeStub frame
1815 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1817 if (entry != NULL) {
1818 *entry = __ pc();
1819 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1820 BLOCK_COMMENT("Entry:");
1821 }
1823 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1824 // r9 and r10 may be used to save non-volatile registers
1825 if (is_oop) {
1826 __ movq(saved_to, to);
1827 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
1828 }
1830 // 'from', 'to' and 'count' are now valid
1831 __ movptr(dword_count, count);
1832 __ shrptr(count, 1); // count => qword_count
1834 // Copy from low to high addresses. Use 'to' as scratch.
1835 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1836 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1837 __ negptr(qword_count);
1838 __ jmp(L_copy_32_bytes);
1840 // Copy trailing qwords
1841 __ BIND(L_copy_8_bytes);
1842 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1843 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1844 __ increment(qword_count);
1845 __ jcc(Assembler::notZero, L_copy_8_bytes);
1847 // Check for and copy trailing dword
1848 __ BIND(L_copy_4_bytes);
1849 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1
1850 __ jccb(Assembler::zero, L_exit);
1851 __ movl(rax, Address(end_from, 8));
1852 __ movl(Address(end_to, 8), rax);
1854 __ BIND(L_exit);
1855 if (is_oop) {
1856 __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
1857 gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1858 }
1859 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1860 restore_arg_regs();
1861 __ xorptr(rax, rax); // return 0
1862 __ leave(); // required for proper stackwalking of RuntimeStub frame
1863 __ ret(0);
1865 // Copy 32-bytes chunks
1866 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1867 __ jmp(L_copy_4_bytes);
1869 return start;
1870 }
1872 // Arguments:
1873 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1874 // ignored
1875 // is_oop - true => oop array, so generate store check code
1876 // name - stub name string
1877 //
1878 // Inputs:
1879 // c_rarg0 - source array address
1880 // c_rarg1 - destination array address
1881 // c_rarg2 - element count, treated as ssize_t, can be zero
1882 //
1883 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1884 // the hardware handle it. The two dwords within qwords that span
1885 // cache line boundaries will still be loaded and stored atomicly.
1886 //
1887 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
1888 address *entry, const char *name,
1889 bool dest_uninitialized = false) {
1890 __ align(CodeEntryAlignment);
1891 StubCodeMark mark(this, "StubRoutines", name);
1892 address start = __ pc();
1894 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit;
1895 const Register from = rdi; // source array address
1896 const Register to = rsi; // destination array address
1897 const Register count = rdx; // elements count
1898 const Register dword_count = rcx;
1899 const Register qword_count = count;
1901 __ enter(); // required for proper stackwalking of RuntimeStub frame
1902 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1904 if (entry != NULL) {
1905 *entry = __ pc();
1906 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1907 BLOCK_COMMENT("Entry:");
1908 }
1910 array_overlap_test(nooverlap_target, Address::times_4);
1911 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1912 // r9 and r10 may be used to save non-volatile registers
1914 if (is_oop) {
1915 // no registers are destroyed by this call
1916 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
1917 }
1919 assert_clean_int(count, rax); // Make sure 'count' is clean int.
1920 // 'from', 'to' and 'count' are now valid
1921 __ movptr(dword_count, count);
1922 __ shrptr(count, 1); // count => qword_count
1924 // Copy from high to low addresses. Use 'to' as scratch.
1926 // Check for and copy trailing dword
1927 __ testl(dword_count, 1);
1928 __ jcc(Assembler::zero, L_copy_32_bytes);
1929 __ movl(rax, Address(from, dword_count, Address::times_4, -4));
1930 __ movl(Address(to, dword_count, Address::times_4, -4), rax);
1931 __ jmp(L_copy_32_bytes);
1933 // Copy trailing qwords
1934 __ BIND(L_copy_8_bytes);
1935 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1936 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1937 __ decrement(qword_count);
1938 __ jcc(Assembler::notZero, L_copy_8_bytes);
1940 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1941 if (is_oop) {
1942 __ jmp(L_exit);
1943 }
1944 restore_arg_regs();
1945 __ xorptr(rax, rax); // return 0
1946 __ leave(); // required for proper stackwalking of RuntimeStub frame
1947 __ ret(0);
1949 // Copy in 32-bytes chunks
1950 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1952 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1953 __ bind(L_exit);
1954 if (is_oop) {
1955 Register end_to = rdx;
1956 __ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
1957 gen_write_ref_array_post_barrier(to, end_to, rax);
1958 }
1959 restore_arg_regs();
1960 __ xorptr(rax, rax); // return 0
1961 __ leave(); // required for proper stackwalking of RuntimeStub frame
1962 __ ret(0);
1964 return start;
1965 }
1967 // Arguments:
1968 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1969 // ignored
1970 // is_oop - true => oop array, so generate store check code
1971 // name - stub name string
1972 //
1973 // Inputs:
1974 // c_rarg0 - source array address
1975 // c_rarg1 - destination array address
1976 // c_rarg2 - element count, treated as ssize_t, can be zero
1977 //
1978 // Side Effects:
1979 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
1980 // no-overlap entry point used by generate_conjoint_long_oop_copy().
1981 //
1982 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry,
1983 const char *name, bool dest_uninitialized = false) {
1984 __ align(CodeEntryAlignment);
1985 StubCodeMark mark(this, "StubRoutines", name);
1986 address start = __ pc();
1988 Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
1989 const Register from = rdi; // source array address
1990 const Register to = rsi; // destination array address
1991 const Register qword_count = rdx; // elements count
1992 const Register end_from = from; // source array end address
1993 const Register end_to = rcx; // destination array end address
1994 const Register saved_to = to;
1995 // End pointers are inclusive, and if count is not zero they point
1996 // to the last unit copied: end_to[0] := end_from[0]
1998 __ enter(); // required for proper stackwalking of RuntimeStub frame
1999 // Save no-overlap entry point for generate_conjoint_long_oop_copy()
2000 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
2002 if (entry != NULL) {
2003 *entry = __ pc();
2004 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2005 BLOCK_COMMENT("Entry:");
2006 }
2008 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2009 // r9 and r10 may be used to save non-volatile registers
2010 // 'from', 'to' and 'qword_count' are now valid
2011 if (is_oop) {
2012 // no registers are destroyed by this call
2013 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized);
2014 }
2016 // Copy from low to high addresses. Use 'to' as scratch.
2017 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
2018 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
2019 __ negptr(qword_count);
2020 __ jmp(L_copy_32_bytes);
2022 // Copy trailing qwords
2023 __ BIND(L_copy_8_bytes);
2024 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
2025 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
2026 __ increment(qword_count);
2027 __ jcc(Assembler::notZero, L_copy_8_bytes);
2029 if (is_oop) {
2030 __ jmp(L_exit);
2031 } else {
2032 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2033 restore_arg_regs();
2034 __ xorptr(rax, rax); // return 0
2035 __ leave(); // required for proper stackwalking of RuntimeStub frame
2036 __ ret(0);
2037 }
2039 // Copy 64-byte chunks
2040 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
2042 if (is_oop) {
2043 __ BIND(L_exit);
2044 gen_write_ref_array_post_barrier(saved_to, end_to, rax);
2045 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
2046 } else {
2047 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2048 }
2049 restore_arg_regs();
2050 __ xorptr(rax, rax); // return 0
2051 __ leave(); // required for proper stackwalking of RuntimeStub frame
2052 __ ret(0);
2054 return start;
2055 }
2057 // Arguments:
2058 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
2059 // ignored
2060 // is_oop - true => oop array, so generate store check code
2061 // name - stub name string
2062 //
2063 // Inputs:
2064 // c_rarg0 - source array address
2065 // c_rarg1 - destination array address
2066 // c_rarg2 - element count, treated as ssize_t, can be zero
2067 //
2068 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop,
2069 address nooverlap_target, address *entry,
2070 const char *name, bool dest_uninitialized = false) {
2071 __ align(CodeEntryAlignment);
2072 StubCodeMark mark(this, "StubRoutines", name);
2073 address start = __ pc();
2075 Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
2076 const Register from = rdi; // source array address
2077 const Register to = rsi; // destination array address
2078 const Register qword_count = rdx; // elements count
2079 const Register saved_count = rcx;
2081 __ enter(); // required for proper stackwalking of RuntimeStub frame
2082 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
2084 if (entry != NULL) {
2085 *entry = __ pc();
2086 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2087 BLOCK_COMMENT("Entry:");
2088 }
2090 array_overlap_test(nooverlap_target, Address::times_8);
2091 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2092 // r9 and r10 may be used to save non-volatile registers
2093 // 'from', 'to' and 'qword_count' are now valid
2094 if (is_oop) {
2095 // Save to and count for store barrier
2096 __ movptr(saved_count, qword_count);
2097 // No registers are destroyed by this call
2098 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized);
2099 }
2101 __ jmp(L_copy_32_bytes);
2103 // Copy trailing qwords
2104 __ BIND(L_copy_8_bytes);
2105 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
2106 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
2107 __ decrement(qword_count);
2108 __ jcc(Assembler::notZero, L_copy_8_bytes);
2110 if (is_oop) {
2111 __ jmp(L_exit);
2112 } else {
2113 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2114 restore_arg_regs();
2115 __ xorptr(rax, rax); // return 0
2116 __ leave(); // required for proper stackwalking of RuntimeStub frame
2117 __ ret(0);
2118 }
2120 // Copy in 32-bytes chunks
2121 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
2123 if (is_oop) {
2124 __ BIND(L_exit);
2125 __ lea(rcx, Address(to, saved_count, Address::times_8, -8));
2126 gen_write_ref_array_post_barrier(to, rcx, rax);
2127 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
2128 } else {
2129 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2130 }
2131 restore_arg_regs();
2132 __ xorptr(rax, rax); // return 0
2133 __ leave(); // required for proper stackwalking of RuntimeStub frame
2134 __ ret(0);
2136 return start;
2137 }
2140 // Helper for generating a dynamic type check.
2141 // Smashes no registers.
2142 void generate_type_check(Register sub_klass,
2143 Register super_check_offset,
2144 Register super_klass,
2145 Label& L_success) {
2146 assert_different_registers(sub_klass, super_check_offset, super_klass);
2148 BLOCK_COMMENT("type_check:");
2150 Label L_miss;
2152 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL,
2153 super_check_offset);
2154 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL);
2156 // Fall through on failure!
2157 __ BIND(L_miss);
2158 }
2160 //
2161 // Generate checkcasting array copy stub
2162 //
2163 // Input:
2164 // c_rarg0 - source array address
2165 // c_rarg1 - destination array address
2166 // c_rarg2 - element count, treated as ssize_t, can be zero
2167 // c_rarg3 - size_t ckoff (super_check_offset)
2168 // not Win64
2169 // c_rarg4 - oop ckval (super_klass)
2170 // Win64
2171 // rsp+40 - oop ckval (super_klass)
2172 //
2173 // Output:
2174 // rax == 0 - success
2175 // rax == -1^K - failure, where K is partial transfer count
2176 //
2177 address generate_checkcast_copy(const char *name, address *entry,
2178 bool dest_uninitialized = false) {
2180 Label L_load_element, L_store_element, L_do_card_marks, L_done;
2182 // Input registers (after setup_arg_regs)
2183 const Register from = rdi; // source array address
2184 const Register to = rsi; // destination array address
2185 const Register length = rdx; // elements count
2186 const Register ckoff = rcx; // super_check_offset
2187 const Register ckval = r8; // super_klass
2189 // Registers used as temps (r13, r14 are save-on-entry)
2190 const Register end_from = from; // source array end address
2191 const Register end_to = r13; // destination array end address
2192 const Register count = rdx; // -(count_remaining)
2193 const Register r14_length = r14; // saved copy of length
2194 // End pointers are inclusive, and if length is not zero they point
2195 // to the last unit copied: end_to[0] := end_from[0]
2197 const Register rax_oop = rax; // actual oop copied
2198 const Register r11_klass = r11; // oop._klass
2200 //---------------------------------------------------------------
2201 // Assembler stub will be used for this call to arraycopy
2202 // if the two arrays are subtypes of Object[] but the
2203 // destination array type is not equal to or a supertype
2204 // of the source type. Each element must be separately
2205 // checked.
2207 __ align(CodeEntryAlignment);
2208 StubCodeMark mark(this, "StubRoutines", name);
2209 address start = __ pc();
2211 __ enter(); // required for proper stackwalking of RuntimeStub frame
2213 #ifdef ASSERT
2214 // caller guarantees that the arrays really are different
2215 // otherwise, we would have to make conjoint checks
2216 { Label L;
2217 array_overlap_test(L, TIMES_OOP);
2218 __ stop("checkcast_copy within a single array");
2219 __ bind(L);
2220 }
2221 #endif //ASSERT
2223 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
2224 // ckoff => rcx, ckval => r8
2225 // r9 and r10 may be used to save non-volatile registers
2226 #ifdef _WIN64
2227 // last argument (#4) is on stack on Win64
2228 __ movptr(ckval, Address(rsp, 6 * wordSize));
2229 #endif
2231 // Caller of this entry point must set up the argument registers.
2232 if (entry != NULL) {
2233 *entry = __ pc();
2234 BLOCK_COMMENT("Entry:");
2235 }
2237 // allocate spill slots for r13, r14
2238 enum {
2239 saved_r13_offset,
2240 saved_r14_offset,
2241 saved_rbp_offset
2242 };
2243 __ subptr(rsp, saved_rbp_offset * wordSize);
2244 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
2245 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
2247 // check that int operands are properly extended to size_t
2248 assert_clean_int(length, rax);
2249 assert_clean_int(ckoff, rax);
2251 #ifdef ASSERT
2252 BLOCK_COMMENT("assert consistent ckoff/ckval");
2253 // The ckoff and ckval must be mutually consistent,
2254 // even though caller generates both.
2255 { Label L;
2256 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2257 Klass::super_check_offset_offset_in_bytes());
2258 __ cmpl(ckoff, Address(ckval, sco_offset));
2259 __ jcc(Assembler::equal, L);
2260 __ stop("super_check_offset inconsistent");
2261 __ bind(L);
2262 }
2263 #endif //ASSERT
2265 // Loop-invariant addresses. They are exclusive end pointers.
2266 Address end_from_addr(from, length, TIMES_OOP, 0);
2267 Address end_to_addr(to, length, TIMES_OOP, 0);
2268 // Loop-variant addresses. They assume post-incremented count < 0.
2269 Address from_element_addr(end_from, count, TIMES_OOP, 0);
2270 Address to_element_addr(end_to, count, TIMES_OOP, 0);
2272 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
2274 // Copy from low to high addresses, indexed from the end of each array.
2275 __ lea(end_from, end_from_addr);
2276 __ lea(end_to, end_to_addr);
2277 __ movptr(r14_length, length); // save a copy of the length
2278 assert(length == count, ""); // else fix next line:
2279 __ negptr(count); // negate and test the length
2280 __ jcc(Assembler::notZero, L_load_element);
2282 // Empty array: Nothing to do.
2283 __ xorptr(rax, rax); // return 0 on (trivial) success
2284 __ jmp(L_done);
2286 // ======== begin loop ========
2287 // (Loop is rotated; its entry is L_load_element.)
2288 // Loop control:
2289 // for (count = -count; count != 0; count++)
2290 // Base pointers src, dst are biased by 8*(count-1),to last element.
2291 __ align(OptoLoopAlignment);
2293 __ BIND(L_store_element);
2294 __ store_heap_oop(to_element_addr, rax_oop); // store the oop
2295 __ increment(count); // increment the count toward zero
2296 __ jcc(Assembler::zero, L_do_card_marks);
2298 // ======== loop entry is here ========
2299 __ BIND(L_load_element);
2300 __ load_heap_oop(rax_oop, from_element_addr); // load the oop
2301 __ testptr(rax_oop, rax_oop);
2302 __ jcc(Assembler::zero, L_store_element);
2304 __ load_klass(r11_klass, rax_oop);// query the object klass
2305 generate_type_check(r11_klass, ckoff, ckval, L_store_element);
2306 // ======== end loop ========
2308 // It was a real error; we must depend on the caller to finish the job.
2309 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
2310 // Emit GC store barriers for the oops we have copied (r14 + rdx),
2311 // and report their number to the caller.
2312 assert_different_registers(rax, r14_length, count, to, end_to, rcx);
2313 __ lea(end_to, to_element_addr);
2314 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer
2315 gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2316 __ movptr(rax, r14_length); // original oops
2317 __ addptr(rax, count); // K = (original - remaining) oops
2318 __ notptr(rax); // report (-1^K) to caller
2319 __ jmp(L_done);
2321 // Come here on success only.
2322 __ BIND(L_do_card_marks);
2323 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer
2324 gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2325 __ xorptr(rax, rax); // return 0 on success
2327 // Common exit point (success or failure).
2328 __ BIND(L_done);
2329 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
2330 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
2331 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
2332 restore_arg_regs();
2333 __ leave(); // required for proper stackwalking of RuntimeStub frame
2334 __ ret(0);
2336 return start;
2337 }
2339 //
2340 // Generate 'unsafe' array copy stub
2341 // Though just as safe as the other stubs, it takes an unscaled
2342 // size_t argument instead of an element count.
2343 //
2344 // Input:
2345 // c_rarg0 - source array address
2346 // c_rarg1 - destination array address
2347 // c_rarg2 - byte count, treated as ssize_t, can be zero
2348 //
2349 // Examines the alignment of the operands and dispatches
2350 // to a long, int, short, or byte copy loop.
2351 //
2352 address generate_unsafe_copy(const char *name,
2353 address byte_copy_entry, address short_copy_entry,
2354 address int_copy_entry, address long_copy_entry) {
2356 Label L_long_aligned, L_int_aligned, L_short_aligned;
2358 // Input registers (before setup_arg_regs)
2359 const Register from = c_rarg0; // source array address
2360 const Register to = c_rarg1; // destination array address
2361 const Register size = c_rarg2; // byte count (size_t)
2363 // Register used as a temp
2364 const Register bits = rax; // test copy of low bits
2366 __ align(CodeEntryAlignment);
2367 StubCodeMark mark(this, "StubRoutines", name);
2368 address start = __ pc();
2370 __ enter(); // required for proper stackwalking of RuntimeStub frame
2372 // bump this on entry, not on exit:
2373 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
2375 __ mov(bits, from);
2376 __ orptr(bits, to);
2377 __ orptr(bits, size);
2379 __ testb(bits, BytesPerLong-1);
2380 __ jccb(Assembler::zero, L_long_aligned);
2382 __ testb(bits, BytesPerInt-1);
2383 __ jccb(Assembler::zero, L_int_aligned);
2385 __ testb(bits, BytesPerShort-1);
2386 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
2388 __ BIND(L_short_aligned);
2389 __ shrptr(size, LogBytesPerShort); // size => short_count
2390 __ jump(RuntimeAddress(short_copy_entry));
2392 __ BIND(L_int_aligned);
2393 __ shrptr(size, LogBytesPerInt); // size => int_count
2394 __ jump(RuntimeAddress(int_copy_entry));
2396 __ BIND(L_long_aligned);
2397 __ shrptr(size, LogBytesPerLong); // size => qword_count
2398 __ jump(RuntimeAddress(long_copy_entry));
2400 return start;
2401 }
2403 // Perform range checks on the proposed arraycopy.
2404 // Kills temp, but nothing else.
2405 // Also, clean the sign bits of src_pos and dst_pos.
2406 void arraycopy_range_checks(Register src, // source array oop (c_rarg0)
2407 Register src_pos, // source position (c_rarg1)
2408 Register dst, // destination array oo (c_rarg2)
2409 Register dst_pos, // destination position (c_rarg3)
2410 Register length,
2411 Register temp,
2412 Label& L_failed) {
2413 BLOCK_COMMENT("arraycopy_range_checks:");
2415 // if (src_pos + length > arrayOop(src)->length()) FAIL;
2416 __ movl(temp, length);
2417 __ addl(temp, src_pos); // src_pos + length
2418 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes()));
2419 __ jcc(Assembler::above, L_failed);
2421 // if (dst_pos + length > arrayOop(dst)->length()) FAIL;
2422 __ movl(temp, length);
2423 __ addl(temp, dst_pos); // dst_pos + length
2424 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2425 __ jcc(Assembler::above, L_failed);
2427 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2428 // Move with sign extension can be used since they are positive.
2429 __ movslq(src_pos, src_pos);
2430 __ movslq(dst_pos, dst_pos);
2432 BLOCK_COMMENT("arraycopy_range_checks done");
2433 }
2435 //
2436 // Generate generic array copy stubs
2437 //
2438 // Input:
2439 // c_rarg0 - src oop
2440 // c_rarg1 - src_pos (32-bits)
2441 // c_rarg2 - dst oop
2442 // c_rarg3 - dst_pos (32-bits)
2443 // not Win64
2444 // c_rarg4 - element count (32-bits)
2445 // Win64
2446 // rsp+40 - element count (32-bits)
2447 //
2448 // Output:
2449 // rax == 0 - success
2450 // rax == -1^K - failure, where K is partial transfer count
2451 //
2452 address generate_generic_copy(const char *name,
2453 address byte_copy_entry, address short_copy_entry,
2454 address int_copy_entry, address oop_copy_entry,
2455 address long_copy_entry, address checkcast_copy_entry) {
2457 Label L_failed, L_failed_0, L_objArray;
2458 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs;
2460 // Input registers
2461 const Register src = c_rarg0; // source array oop
2462 const Register src_pos = c_rarg1; // source position
2463 const Register dst = c_rarg2; // destination array oop
2464 const Register dst_pos = c_rarg3; // destination position
2465 #ifndef _WIN64
2466 const Register length = c_rarg4;
2467 #else
2468 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64
2469 #endif
2471 { int modulus = CodeEntryAlignment;
2472 int target = modulus - 5; // 5 = sizeof jmp(L_failed)
2473 int advance = target - (__ offset() % modulus);
2474 if (advance < 0) advance += modulus;
2475 if (advance > 0) __ nop(advance);
2476 }
2477 StubCodeMark mark(this, "StubRoutines", name);
2479 // Short-hop target to L_failed. Makes for denser prologue code.
2480 __ BIND(L_failed_0);
2481 __ jmp(L_failed);
2482 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed");
2484 __ align(CodeEntryAlignment);
2485 address start = __ pc();
2487 __ enter(); // required for proper stackwalking of RuntimeStub frame
2489 // bump this on entry, not on exit:
2490 inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
2492 //-----------------------------------------------------------------------
2493 // Assembler stub will be used for this call to arraycopy
2494 // if the following conditions are met:
2495 //
2496 // (1) src and dst must not be null.
2497 // (2) src_pos must not be negative.
2498 // (3) dst_pos must not be negative.
2499 // (4) length must not be negative.
2500 // (5) src klass and dst klass should be the same and not NULL.
2501 // (6) src and dst should be arrays.
2502 // (7) src_pos + length must not exceed length of src.
2503 // (8) dst_pos + length must not exceed length of dst.
2504 //
2506 // if (src == NULL) return -1;
2507 __ testptr(src, src); // src oop
2508 size_t j1off = __ offset();
2509 __ jccb(Assembler::zero, L_failed_0);
2511 // if (src_pos < 0) return -1;
2512 __ testl(src_pos, src_pos); // src_pos (32-bits)
2513 __ jccb(Assembler::negative, L_failed_0);
2515 // if (dst == NULL) return -1;
2516 __ testptr(dst, dst); // dst oop
2517 __ jccb(Assembler::zero, L_failed_0);
2519 // if (dst_pos < 0) return -1;
2520 __ testl(dst_pos, dst_pos); // dst_pos (32-bits)
2521 size_t j4off = __ offset();
2522 __ jccb(Assembler::negative, L_failed_0);
2524 // The first four tests are very dense code,
2525 // but not quite dense enough to put four
2526 // jumps in a 16-byte instruction fetch buffer.
2527 // That's good, because some branch predicters
2528 // do not like jumps so close together.
2529 // Make sure of this.
2530 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps");
2532 // registers used as temp
2533 const Register r11_length = r11; // elements count to copy
2534 const Register r10_src_klass = r10; // array klass
2536 // if (length < 0) return -1;
2537 __ movl(r11_length, length); // length (elements count, 32-bits value)
2538 __ testl(r11_length, r11_length);
2539 __ jccb(Assembler::negative, L_failed_0);
2541 __ load_klass(r10_src_klass, src);
2542 #ifdef ASSERT
2543 // assert(src->klass() != NULL);
2544 {
2545 BLOCK_COMMENT("assert klasses not null {");
2546 Label L1, L2;
2547 __ testptr(r10_src_klass, r10_src_klass);
2548 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
2549 __ bind(L1);
2550 __ stop("broken null klass");
2551 __ bind(L2);
2552 __ load_klass(rax, dst);
2553 __ cmpq(rax, 0);
2554 __ jcc(Assembler::equal, L1); // this would be broken also
2555 BLOCK_COMMENT("} assert klasses not null done");
2556 }
2557 #endif
2559 // Load layout helper (32-bits)
2560 //
2561 // |array_tag| | header_size | element_type | |log2_element_size|
2562 // 32 30 24 16 8 2 0
2563 //
2564 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2565 //
2567 const int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2568 Klass::layout_helper_offset_in_bytes();
2570 // Handle objArrays completely differently...
2571 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2572 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh);
2573 __ jcc(Assembler::equal, L_objArray);
2575 // if (src->klass() != dst->klass()) return -1;
2576 __ load_klass(rax, dst);
2577 __ cmpq(r10_src_klass, rax);
2578 __ jcc(Assembler::notEqual, L_failed);
2580 const Register rax_lh = rax; // layout helper
2581 __ movl(rax_lh, Address(r10_src_klass, lh_offset));
2583 // if (!src->is_Array()) return -1;
2584 __ cmpl(rax_lh, Klass::_lh_neutral_value);
2585 __ jcc(Assembler::greaterEqual, L_failed);
2587 // At this point, it is known to be a typeArray (array_tag 0x3).
2588 #ifdef ASSERT
2589 {
2590 BLOCK_COMMENT("assert primitive array {");
2591 Label L;
2592 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
2593 __ jcc(Assembler::greaterEqual, L);
2594 __ stop("must be a primitive array");
2595 __ bind(L);
2596 BLOCK_COMMENT("} assert primitive array done");
2597 }
2598 #endif
2600 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2601 r10, L_failed);
2603 // typeArrayKlass
2604 //
2605 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2606 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2607 //
2609 const Register r10_offset = r10; // array offset
2610 const Register rax_elsize = rax_lh; // element size
2612 __ movl(r10_offset, rax_lh);
2613 __ shrl(r10_offset, Klass::_lh_header_size_shift);
2614 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset
2615 __ addptr(src, r10_offset); // src array offset
2616 __ addptr(dst, r10_offset); // dst array offset
2617 BLOCK_COMMENT("choose copy loop based on element size");
2618 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
2620 // next registers should be set before the jump to corresponding stub
2621 const Register from = c_rarg0; // source array address
2622 const Register to = c_rarg1; // destination array address
2623 const Register count = c_rarg2; // elements count
2625 // 'from', 'to', 'count' registers should be set in such order
2626 // since they are the same as 'src', 'src_pos', 'dst'.
2628 __ BIND(L_copy_bytes);
2629 __ cmpl(rax_elsize, 0);
2630 __ jccb(Assembler::notEqual, L_copy_shorts);
2631 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr
2632 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr
2633 __ movl2ptr(count, r11_length); // length
2634 __ jump(RuntimeAddress(byte_copy_entry));
2636 __ BIND(L_copy_shorts);
2637 __ cmpl(rax_elsize, LogBytesPerShort);
2638 __ jccb(Assembler::notEqual, L_copy_ints);
2639 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr
2640 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr
2641 __ movl2ptr(count, r11_length); // length
2642 __ jump(RuntimeAddress(short_copy_entry));
2644 __ BIND(L_copy_ints);
2645 __ cmpl(rax_elsize, LogBytesPerInt);
2646 __ jccb(Assembler::notEqual, L_copy_longs);
2647 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr
2648 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr
2649 __ movl2ptr(count, r11_length); // length
2650 __ jump(RuntimeAddress(int_copy_entry));
2652 __ BIND(L_copy_longs);
2653 #ifdef ASSERT
2654 {
2655 BLOCK_COMMENT("assert long copy {");
2656 Label L;
2657 __ cmpl(rax_elsize, LogBytesPerLong);
2658 __ jcc(Assembler::equal, L);
2659 __ stop("must be long copy, but elsize is wrong");
2660 __ bind(L);
2661 BLOCK_COMMENT("} assert long copy done");
2662 }
2663 #endif
2664 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr
2665 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr
2666 __ movl2ptr(count, r11_length); // length
2667 __ jump(RuntimeAddress(long_copy_entry));
2669 // objArrayKlass
2670 __ BIND(L_objArray);
2671 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos]
2673 Label L_plain_copy, L_checkcast_copy;
2674 // test array classes for subtyping
2675 __ load_klass(rax, dst);
2676 __ cmpq(r10_src_klass, rax); // usual case is exact equality
2677 __ jcc(Assembler::notEqual, L_checkcast_copy);
2679 // Identically typed arrays can be copied without element-wise checks.
2680 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2681 r10, L_failed);
2683 __ lea(from, Address(src, src_pos, TIMES_OOP,
2684 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
2685 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2686 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
2687 __ movl2ptr(count, r11_length); // length
2688 __ BIND(L_plain_copy);
2689 __ jump(RuntimeAddress(oop_copy_entry));
2691 __ BIND(L_checkcast_copy);
2692 // live at this point: r10_src_klass, r11_length, rax (dst_klass)
2693 {
2694 // Before looking at dst.length, make sure dst is also an objArray.
2695 __ cmpl(Address(rax, lh_offset), objArray_lh);
2696 __ jcc(Assembler::notEqual, L_failed);
2698 // It is safe to examine both src.length and dst.length.
2699 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2700 rax, L_failed);
2702 const Register r11_dst_klass = r11;
2703 __ load_klass(r11_dst_klass, dst); // reload
2705 // Marshal the base address arguments now, freeing registers.
2706 __ lea(from, Address(src, src_pos, TIMES_OOP,
2707 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2708 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2709 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2710 __ movl(count, length); // length (reloaded)
2711 Register sco_temp = c_rarg3; // this register is free now
2712 assert_different_registers(from, to, count, sco_temp,
2713 r11_dst_klass, r10_src_klass);
2714 assert_clean_int(count, sco_temp);
2716 // Generate the type check.
2717 const int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2718 Klass::super_check_offset_offset_in_bytes());
2719 __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2720 assert_clean_int(sco_temp, rax);
2721 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
2723 // Fetch destination element klass from the objArrayKlass header.
2724 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
2725 objArrayKlass::element_klass_offset_in_bytes());
2726 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
2727 __ movl( sco_temp, Address(r11_dst_klass, sco_offset));
2728 assert_clean_int(sco_temp, rax);
2730 // the checkcast_copy loop needs two extra arguments:
2731 assert(c_rarg3 == sco_temp, "#3 already in place");
2732 // Set up arguments for checkcast_copy_entry.
2733 setup_arg_regs(4);
2734 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris
2735 __ jump(RuntimeAddress(checkcast_copy_entry));
2736 }
2738 __ BIND(L_failed);
2739 __ xorptr(rax, rax);
2740 __ notptr(rax); // return -1
2741 __ leave(); // required for proper stackwalking of RuntimeStub frame
2742 __ ret(0);
2744 return start;
2745 }
2747 void generate_arraycopy_stubs() {
2748 address entry;
2749 address entry_jbyte_arraycopy;
2750 address entry_jshort_arraycopy;
2751 address entry_jint_arraycopy;
2752 address entry_oop_arraycopy;
2753 address entry_jlong_arraycopy;
2754 address entry_checkcast_arraycopy;
2756 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,
2757 "jbyte_disjoint_arraycopy");
2758 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy,
2759 "jbyte_arraycopy");
2761 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
2762 "jshort_disjoint_arraycopy");
2763 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy,
2764 "jshort_arraycopy");
2766 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry,
2767 "jint_disjoint_arraycopy");
2768 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry,
2769 &entry_jint_arraycopy, "jint_arraycopy");
2771 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry,
2772 "jlong_disjoint_arraycopy");
2773 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry,
2774 &entry_jlong_arraycopy, "jlong_arraycopy");
2777 if (UseCompressedOops) {
2778 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry,
2779 "oop_disjoint_arraycopy");
2780 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry,
2781 &entry_oop_arraycopy, "oop_arraycopy");
2782 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry,
2783 "oop_disjoint_arraycopy_uninit",
2784 /*dest_uninitialized*/true);
2785 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry,
2786 NULL, "oop_arraycopy_uninit",
2787 /*dest_uninitialized*/true);
2788 } else {
2789 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry,
2790 "oop_disjoint_arraycopy");
2791 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry,
2792 &entry_oop_arraycopy, "oop_arraycopy");
2793 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry,
2794 "oop_disjoint_arraycopy_uninit",
2795 /*dest_uninitialized*/true);
2796 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry,
2797 NULL, "oop_arraycopy_uninit",
2798 /*dest_uninitialized*/true);
2799 }
2801 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
2802 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
2803 /*dest_uninitialized*/true);
2805 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
2806 entry_jbyte_arraycopy,
2807 entry_jshort_arraycopy,
2808 entry_jint_arraycopy,
2809 entry_jlong_arraycopy);
2810 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
2811 entry_jbyte_arraycopy,
2812 entry_jshort_arraycopy,
2813 entry_jint_arraycopy,
2814 entry_oop_arraycopy,
2815 entry_jlong_arraycopy,
2816 entry_checkcast_arraycopy);
2818 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
2819 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
2820 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
2821 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
2822 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2823 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
2825 // We don't generate specialized code for HeapWord-aligned source
2826 // arrays, so just use the code we've already generated
2827 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy;
2828 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy;
2830 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy;
2831 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy;
2833 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy;
2834 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2836 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy;
2837 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2839 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy;
2840 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2842 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit;
2843 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit;
2844 }
2846 void generate_math_stubs() {
2847 {
2848 StubCodeMark mark(this, "StubRoutines", "log");
2849 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
2851 __ subq(rsp, 8);
2852 __ movdbl(Address(rsp, 0), xmm0);
2853 __ fld_d(Address(rsp, 0));
2854 __ flog();
2855 __ fstp_d(Address(rsp, 0));
2856 __ movdbl(xmm0, Address(rsp, 0));
2857 __ addq(rsp, 8);
2858 __ ret(0);
2859 }
2860 {
2861 StubCodeMark mark(this, "StubRoutines", "log10");
2862 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
2864 __ subq(rsp, 8);
2865 __ movdbl(Address(rsp, 0), xmm0);
2866 __ fld_d(Address(rsp, 0));
2867 __ flog10();
2868 __ fstp_d(Address(rsp, 0));
2869 __ movdbl(xmm0, Address(rsp, 0));
2870 __ addq(rsp, 8);
2871 __ ret(0);
2872 }
2873 {
2874 StubCodeMark mark(this, "StubRoutines", "sin");
2875 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
2877 __ subq(rsp, 8);
2878 __ movdbl(Address(rsp, 0), xmm0);
2879 __ fld_d(Address(rsp, 0));
2880 __ trigfunc('s');
2881 __ fstp_d(Address(rsp, 0));
2882 __ movdbl(xmm0, Address(rsp, 0));
2883 __ addq(rsp, 8);
2884 __ ret(0);
2885 }
2886 {
2887 StubCodeMark mark(this, "StubRoutines", "cos");
2888 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
2890 __ subq(rsp, 8);
2891 __ movdbl(Address(rsp, 0), xmm0);
2892 __ fld_d(Address(rsp, 0));
2893 __ trigfunc('c');
2894 __ fstp_d(Address(rsp, 0));
2895 __ movdbl(xmm0, Address(rsp, 0));
2896 __ addq(rsp, 8);
2897 __ ret(0);
2898 }
2899 {
2900 StubCodeMark mark(this, "StubRoutines", "tan");
2901 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
2903 __ subq(rsp, 8);
2904 __ movdbl(Address(rsp, 0), xmm0);
2905 __ fld_d(Address(rsp, 0));
2906 __ trigfunc('t');
2907 __ fstp_d(Address(rsp, 0));
2908 __ movdbl(xmm0, Address(rsp, 0));
2909 __ addq(rsp, 8);
2910 __ ret(0);
2911 }
2913 // The intrinsic version of these seem to return the same value as
2914 // the strict version.
2915 StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
2916 StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
2917 }
2919 #undef __
2920 #define __ masm->
2922 // Continuation point for throwing of implicit exceptions that are
2923 // not handled in the current activation. Fabricates an exception
2924 // oop and initiates normal exception dispatching in this
2925 // frame. Since we need to preserve callee-saved values (currently
2926 // only for C2, but done for C1 as well) we need a callee-saved oop
2927 // map and therefore have to make these stubs into RuntimeStubs
2928 // rather than BufferBlobs. If the compiler needs all registers to
2929 // be preserved between the fault point and the exception handler
2930 // then it must assume responsibility for that in
2931 // AbstractCompiler::continuation_for_implicit_null_exception or
2932 // continuation_for_implicit_division_by_zero_exception. All other
2933 // implicit exceptions (e.g., NullPointerException or
2934 // AbstractMethodError on entry) are either at call sites or
2935 // otherwise assume that stack unwinding will be initiated, so
2936 // caller saved registers were assumed volatile in the compiler.
2937 address generate_throw_exception(const char* name,
2938 address runtime_entry,
2939 Register arg1 = noreg,
2940 Register arg2 = noreg) {
2941 // Information about frame layout at time of blocking runtime call.
2942 // Note that we only have to preserve callee-saved registers since
2943 // the compilers are responsible for supplying a continuation point
2944 // if they expect all registers to be preserved.
2945 enum layout {
2946 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
2947 rbp_off2,
2948 return_off,
2949 return_off2,
2950 framesize // inclusive of return address
2951 };
2953 int insts_size = 512;
2954 int locs_size = 64;
2956 CodeBuffer code(name, insts_size, locs_size);
2957 OopMapSet* oop_maps = new OopMapSet();
2958 MacroAssembler* masm = new MacroAssembler(&code);
2960 address start = __ pc();
2962 // This is an inlined and slightly modified version of call_VM
2963 // which has the ability to fetch the return PC out of
2964 // thread-local storage and also sets up last_Java_sp slightly
2965 // differently than the real call_VM
2967 __ enter(); // required for proper stackwalking of RuntimeStub frame
2969 assert(is_even(framesize/2), "sp not 16-byte aligned");
2971 // return address and rbp are already in place
2972 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
2974 int frame_complete = __ pc() - start;
2976 // Set up last_Java_sp and last_Java_fp
2977 __ set_last_Java_frame(rsp, rbp, NULL);
2979 // Call runtime
2980 if (arg1 != noreg) {
2981 assert(arg2 != c_rarg1, "clobbered");
2982 __ movptr(c_rarg1, arg1);
2983 }
2984 if (arg2 != noreg) {
2985 __ movptr(c_rarg2, arg2);
2986 }
2987 __ movptr(c_rarg0, r15_thread);
2988 BLOCK_COMMENT("call runtime_entry");
2989 __ call(RuntimeAddress(runtime_entry));
2991 // Generate oop map
2992 OopMap* map = new OopMap(framesize, 0);
2994 oop_maps->add_gc_map(__ pc() - start, map);
2996 __ reset_last_Java_frame(true, false);
2998 __ leave(); // required for proper stackwalking of RuntimeStub frame
3000 // check for pending exceptions
3001 #ifdef ASSERT
3002 Label L;
3003 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()),
3004 (int32_t) NULL_WORD);
3005 __ jcc(Assembler::notEqual, L);
3006 __ should_not_reach_here();
3007 __ bind(L);
3008 #endif // ASSERT
3009 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3012 // codeBlob framesize is in words (not VMRegImpl::slot_size)
3013 RuntimeStub* stub =
3014 RuntimeStub::new_runtime_stub(name,
3015 &code,
3016 frame_complete,
3017 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
3018 oop_maps, false);
3019 return stub->entry_point();
3020 }
3022 // Initialization
3023 void generate_initial() {
3024 // Generates all stubs and initializes the entry points
3026 // This platform-specific stub is needed by generate_call_stub()
3027 StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80);
3029 // entry points that exist in all platforms Note: This is code
3030 // that could be shared among different platforms - however the
3031 // benefit seems to be smaller than the disadvantage of having a
3032 // much more complicated generator structure. See also comment in
3033 // stubRoutines.hpp.
3035 StubRoutines::_forward_exception_entry = generate_forward_exception();
3037 StubRoutines::_call_stub_entry =
3038 generate_call_stub(StubRoutines::_call_stub_return_address);
3040 // is referenced by megamorphic call
3041 StubRoutines::_catch_exception_entry = generate_catch_exception();
3043 // atomic calls
3044 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
3045 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr();
3046 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
3047 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
3048 StubRoutines::_atomic_add_entry = generate_atomic_add();
3049 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
3050 StubRoutines::_fence_entry = generate_orderaccess_fence();
3052 StubRoutines::_handler_for_unsafe_access_entry =
3053 generate_handler_for_unsafe_access();
3055 // platform dependent
3056 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
3058 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
3060 // Build this early so it's available for the interpreter. Stub
3061 // expects the required and actual types as register arguments in
3062 // j_rarg0 and j_rarg1 respectively.
3063 StubRoutines::_throw_WrongMethodTypeException_entry =
3064 generate_throw_exception("WrongMethodTypeException throw_exception",
3065 CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
3066 rax, rcx);
3067 }
3069 void generate_all() {
3070 // Generates all stubs and initializes the entry points
3072 // These entry points require SharedInfo::stack0 to be set up in
3073 // non-core builds and need to be relocatable, so they each
3074 // fabricate a RuntimeStub internally.
3075 StubRoutines::_throw_AbstractMethodError_entry =
3076 generate_throw_exception("AbstractMethodError throw_exception",
3077 CAST_FROM_FN_PTR(address,
3078 SharedRuntime::
3079 throw_AbstractMethodError));
3081 StubRoutines::_throw_IncompatibleClassChangeError_entry =
3082 generate_throw_exception("IncompatibleClassChangeError throw_exception",
3083 CAST_FROM_FN_PTR(address,
3084 SharedRuntime::
3085 throw_IncompatibleClassChangeError));
3087 StubRoutines::_throw_NullPointerException_at_call_entry =
3088 generate_throw_exception("NullPointerException at call throw_exception",
3089 CAST_FROM_FN_PTR(address,
3090 SharedRuntime::
3091 throw_NullPointerException_at_call));
3093 StubRoutines::_throw_StackOverflowError_entry =
3094 generate_throw_exception("StackOverflowError throw_exception",
3095 CAST_FROM_FN_PTR(address,
3096 SharedRuntime::
3097 throw_StackOverflowError));
3099 // entry points that are platform specific
3100 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
3101 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
3102 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
3103 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
3105 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
3106 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
3107 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
3108 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
3110 // support for verify_oop (must happen after universe_init)
3111 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
3113 // arraycopy stubs used by compilers
3114 generate_arraycopy_stubs();
3116 generate_math_stubs();
3117 }
3119 public:
3120 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3121 if (all) {
3122 generate_all();
3123 } else {
3124 generate_initial();
3125 }
3126 }
3127 }; // end class declaration
3129 void StubGenerator_generate(CodeBuffer* code, bool all) {
3130 StubGenerator g(code, all);
3131 }