Wed, 02 Jun 2010 22:45:42 -0700
Merge
1 /*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_stubGenerator_x86_64.cpp.incl"
28 // Declaration and definition of StubGenerator (no .hpp file).
29 // For a more detailed description of the stub routine structure
30 // see the comment in stubRoutines.hpp
32 #define __ _masm->
33 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
34 #define a__ ((Assembler*)_masm)->
36 #ifdef PRODUCT
37 #define BLOCK_COMMENT(str) /* nothing */
38 #else
39 #define BLOCK_COMMENT(str) __ block_comment(str)
40 #endif
42 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
43 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
45 // Stub Code definitions
47 static address handle_unsafe_access() {
48 JavaThread* thread = JavaThread::current();
49 address pc = thread->saved_exception_pc();
50 // pc is the instruction which we must emulate
51 // doing a no-op is fine: return garbage from the load
52 // therefore, compute npc
53 address npc = Assembler::locate_next_instruction(pc);
55 // request an async exception
56 thread->set_pending_unsafe_access_error();
58 // return address of next instruction to execute
59 return npc;
60 }
62 class StubGenerator: public StubCodeGenerator {
63 private:
65 #ifdef PRODUCT
66 #define inc_counter_np(counter) (0)
67 #else
68 void inc_counter_np_(int& counter) {
69 __ incrementl(ExternalAddress((address)&counter));
70 }
71 #define inc_counter_np(counter) \
72 BLOCK_COMMENT("inc_counter " #counter); \
73 inc_counter_np_(counter);
74 #endif
76 // Call stubs are used to call Java from C
77 //
78 // Linux Arguments:
79 // c_rarg0: call wrapper address address
80 // c_rarg1: result address
81 // c_rarg2: result type BasicType
82 // c_rarg3: method methodOop
83 // c_rarg4: (interpreter) entry point address
84 // c_rarg5: parameters intptr_t*
85 // 16(rbp): parameter size (in words) int
86 // 24(rbp): thread Thread*
87 //
88 // [ return_from_Java ] <--- rsp
89 // [ argument word n ]
90 // ...
91 // -12 [ argument word 1 ]
92 // -11 [ saved r15 ] <--- rsp_after_call
93 // -10 [ saved r14 ]
94 // -9 [ saved r13 ]
95 // -8 [ saved r12 ]
96 // -7 [ saved rbx ]
97 // -6 [ call wrapper ]
98 // -5 [ result ]
99 // -4 [ result type ]
100 // -3 [ method ]
101 // -2 [ entry point ]
102 // -1 [ parameters ]
103 // 0 [ saved rbp ] <--- rbp
104 // 1 [ return address ]
105 // 2 [ parameter size ]
106 // 3 [ thread ]
107 //
108 // Windows Arguments:
109 // c_rarg0: call wrapper address address
110 // c_rarg1: result address
111 // c_rarg2: result type BasicType
112 // c_rarg3: method methodOop
113 // 48(rbp): (interpreter) entry point address
114 // 56(rbp): parameters intptr_t*
115 // 64(rbp): parameter size (in words) int
116 // 72(rbp): thread Thread*
117 //
118 // [ return_from_Java ] <--- rsp
119 // [ argument word n ]
120 // ...
121 // -8 [ argument word 1 ]
122 // -7 [ saved r15 ] <--- rsp_after_call
123 // -6 [ saved r14 ]
124 // -5 [ saved r13 ]
125 // -4 [ saved r12 ]
126 // -3 [ saved rdi ]
127 // -2 [ saved rsi ]
128 // -1 [ saved rbx ]
129 // 0 [ saved rbp ] <--- rbp
130 // 1 [ return address ]
131 // 2 [ call wrapper ]
132 // 3 [ result ]
133 // 4 [ result type ]
134 // 5 [ method ]
135 // 6 [ entry point ]
136 // 7 [ parameters ]
137 // 8 [ parameter size ]
138 // 9 [ thread ]
139 //
140 // Windows reserves the callers stack space for arguments 1-4.
141 // We spill c_rarg0-c_rarg3 to this space.
143 // Call stub stack layout word offsets from rbp
144 enum call_stub_layout {
145 #ifdef _WIN64
146 rsp_after_call_off = -7,
147 r15_off = rsp_after_call_off,
148 r14_off = -6,
149 r13_off = -5,
150 r12_off = -4,
151 rdi_off = -3,
152 rsi_off = -2,
153 rbx_off = -1,
154 rbp_off = 0,
155 retaddr_off = 1,
156 call_wrapper_off = 2,
157 result_off = 3,
158 result_type_off = 4,
159 method_off = 5,
160 entry_point_off = 6,
161 parameters_off = 7,
162 parameter_size_off = 8,
163 thread_off = 9
164 #else
165 rsp_after_call_off = -12,
166 mxcsr_off = rsp_after_call_off,
167 r15_off = -11,
168 r14_off = -10,
169 r13_off = -9,
170 r12_off = -8,
171 rbx_off = -7,
172 call_wrapper_off = -6,
173 result_off = -5,
174 result_type_off = -4,
175 method_off = -3,
176 entry_point_off = -2,
177 parameters_off = -1,
178 rbp_off = 0,
179 retaddr_off = 1,
180 parameter_size_off = 2,
181 thread_off = 3
182 #endif
183 };
185 address generate_call_stub(address& return_address) {
186 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
187 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
188 "adjust this code");
189 StubCodeMark mark(this, "StubRoutines", "call_stub");
190 address start = __ pc();
192 // same as in generate_catch_exception()!
193 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
195 const Address call_wrapper (rbp, call_wrapper_off * wordSize);
196 const Address result (rbp, result_off * wordSize);
197 const Address result_type (rbp, result_type_off * wordSize);
198 const Address method (rbp, method_off * wordSize);
199 const Address entry_point (rbp, entry_point_off * wordSize);
200 const Address parameters (rbp, parameters_off * wordSize);
201 const Address parameter_size(rbp, parameter_size_off * wordSize);
203 // same as in generate_catch_exception()!
204 const Address thread (rbp, thread_off * wordSize);
206 const Address r15_save(rbp, r15_off * wordSize);
207 const Address r14_save(rbp, r14_off * wordSize);
208 const Address r13_save(rbp, r13_off * wordSize);
209 const Address r12_save(rbp, r12_off * wordSize);
210 const Address rbx_save(rbp, rbx_off * wordSize);
212 // stub code
213 __ enter();
214 __ subptr(rsp, -rsp_after_call_off * wordSize);
216 // save register parameters
217 #ifndef _WIN64
218 __ movptr(parameters, c_rarg5); // parameters
219 __ movptr(entry_point, c_rarg4); // entry_point
220 #endif
222 __ movptr(method, c_rarg3); // method
223 __ movl(result_type, c_rarg2); // result type
224 __ movptr(result, c_rarg1); // result
225 __ movptr(call_wrapper, c_rarg0); // call wrapper
227 // save regs belonging to calling function
228 __ movptr(rbx_save, rbx);
229 __ movptr(r12_save, r12);
230 __ movptr(r13_save, r13);
231 __ movptr(r14_save, r14);
232 __ movptr(r15_save, r15);
234 #ifdef _WIN64
235 const Address rdi_save(rbp, rdi_off * wordSize);
236 const Address rsi_save(rbp, rsi_off * wordSize);
238 __ movptr(rsi_save, rsi);
239 __ movptr(rdi_save, rdi);
240 #else
241 const Address mxcsr_save(rbp, mxcsr_off * wordSize);
242 {
243 Label skip_ldmx;
244 __ stmxcsr(mxcsr_save);
245 __ movl(rax, mxcsr_save);
246 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
247 ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std());
248 __ cmp32(rax, mxcsr_std);
249 __ jcc(Assembler::equal, skip_ldmx);
250 __ ldmxcsr(mxcsr_std);
251 __ bind(skip_ldmx);
252 }
253 #endif
255 // Load up thread register
256 __ movptr(r15_thread, thread);
257 __ reinit_heapbase();
259 #ifdef ASSERT
260 // make sure we have no pending exceptions
261 {
262 Label L;
263 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
264 __ jcc(Assembler::equal, L);
265 __ stop("StubRoutines::call_stub: entered with pending exception");
266 __ bind(L);
267 }
268 #endif
270 // pass parameters if any
271 BLOCK_COMMENT("pass parameters if any");
272 Label parameters_done;
273 __ movl(c_rarg3, parameter_size);
274 __ testl(c_rarg3, c_rarg3);
275 __ jcc(Assembler::zero, parameters_done);
277 Label loop;
278 __ movptr(c_rarg2, parameters); // parameter pointer
279 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
280 __ BIND(loop);
281 __ movptr(rax, Address(c_rarg2, 0));// get parameter
282 __ addptr(c_rarg2, wordSize); // advance to next parameter
283 __ decrementl(c_rarg1); // decrement counter
284 __ push(rax); // pass parameter
285 __ jcc(Assembler::notZero, loop);
287 // call Java function
288 __ BIND(parameters_done);
289 __ movptr(rbx, method); // get methodOop
290 __ movptr(c_rarg1, entry_point); // get entry_point
291 __ mov(r13, rsp); // set sender sp
292 BLOCK_COMMENT("call Java function");
293 __ call(c_rarg1);
295 BLOCK_COMMENT("call_stub_return_address:");
296 return_address = __ pc();
298 // store result depending on type (everything that is not
299 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
300 __ movptr(c_rarg0, result);
301 Label is_long, is_float, is_double, exit;
302 __ movl(c_rarg1, result_type);
303 __ cmpl(c_rarg1, T_OBJECT);
304 __ jcc(Assembler::equal, is_long);
305 __ cmpl(c_rarg1, T_LONG);
306 __ jcc(Assembler::equal, is_long);
307 __ cmpl(c_rarg1, T_FLOAT);
308 __ jcc(Assembler::equal, is_float);
309 __ cmpl(c_rarg1, T_DOUBLE);
310 __ jcc(Assembler::equal, is_double);
312 // handle T_INT case
313 __ movl(Address(c_rarg0, 0), rax);
315 __ BIND(exit);
317 // pop parameters
318 __ lea(rsp, rsp_after_call);
320 #ifdef ASSERT
321 // verify that threads correspond
322 {
323 Label L, S;
324 __ cmpptr(r15_thread, thread);
325 __ jcc(Assembler::notEqual, S);
326 __ get_thread(rbx);
327 __ cmpptr(r15_thread, rbx);
328 __ jcc(Assembler::equal, L);
329 __ bind(S);
330 __ jcc(Assembler::equal, L);
331 __ stop("StubRoutines::call_stub: threads must correspond");
332 __ bind(L);
333 }
334 #endif
336 // restore regs belonging to calling function
337 __ movptr(r15, r15_save);
338 __ movptr(r14, r14_save);
339 __ movptr(r13, r13_save);
340 __ movptr(r12, r12_save);
341 __ movptr(rbx, rbx_save);
343 #ifdef _WIN64
344 __ movptr(rdi, rdi_save);
345 __ movptr(rsi, rsi_save);
346 #else
347 __ ldmxcsr(mxcsr_save);
348 #endif
350 // restore rsp
351 __ addptr(rsp, -rsp_after_call_off * wordSize);
353 // return
354 __ pop(rbp);
355 __ ret(0);
357 // handle return types different from T_INT
358 __ BIND(is_long);
359 __ movq(Address(c_rarg0, 0), rax);
360 __ jmp(exit);
362 __ BIND(is_float);
363 __ movflt(Address(c_rarg0, 0), xmm0);
364 __ jmp(exit);
366 __ BIND(is_double);
367 __ movdbl(Address(c_rarg0, 0), xmm0);
368 __ jmp(exit);
370 return start;
371 }
373 // Return point for a Java call if there's an exception thrown in
374 // Java code. The exception is caught and transformed into a
375 // pending exception stored in JavaThread that can be tested from
376 // within the VM.
377 //
378 // Note: Usually the parameters are removed by the callee. In case
379 // of an exception crossing an activation frame boundary, that is
380 // not the case if the callee is compiled code => need to setup the
381 // rsp.
382 //
383 // rax: exception oop
385 address generate_catch_exception() {
386 StubCodeMark mark(this, "StubRoutines", "catch_exception");
387 address start = __ pc();
389 // same as in generate_call_stub():
390 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
391 const Address thread (rbp, thread_off * wordSize);
393 #ifdef ASSERT
394 // verify that threads correspond
395 {
396 Label L, S;
397 __ cmpptr(r15_thread, thread);
398 __ jcc(Assembler::notEqual, S);
399 __ get_thread(rbx);
400 __ cmpptr(r15_thread, rbx);
401 __ jcc(Assembler::equal, L);
402 __ bind(S);
403 __ stop("StubRoutines::catch_exception: threads must correspond");
404 __ bind(L);
405 }
406 #endif
408 // set pending exception
409 __ verify_oop(rax);
411 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
412 __ lea(rscratch1, ExternalAddress((address)__FILE__));
413 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
414 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
416 // complete return to VM
417 assert(StubRoutines::_call_stub_return_address != NULL,
418 "_call_stub_return_address must have been generated before");
419 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
421 return start;
422 }
424 // Continuation point for runtime calls returning with a pending
425 // exception. The pending exception check happened in the runtime
426 // or native call stub. The pending exception in Thread is
427 // converted into a Java-level exception.
428 //
429 // Contract with Java-level exception handlers:
430 // rax: exception
431 // rdx: throwing pc
432 //
433 // NOTE: At entry of this stub, exception-pc must be on stack !!
435 address generate_forward_exception() {
436 StubCodeMark mark(this, "StubRoutines", "forward exception");
437 address start = __ pc();
439 // Upon entry, the sp points to the return address returning into
440 // Java (interpreted or compiled) code; i.e., the return address
441 // becomes the throwing pc.
442 //
443 // Arguments pushed before the runtime call are still on the stack
444 // but the exception handler will reset the stack pointer ->
445 // ignore them. A potential result in registers can be ignored as
446 // well.
448 #ifdef ASSERT
449 // make sure this code is only executed if there is a pending exception
450 {
451 Label L;
452 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL);
453 __ jcc(Assembler::notEqual, L);
454 __ stop("StubRoutines::forward exception: no pending exception (1)");
455 __ bind(L);
456 }
457 #endif
459 // compute exception handler into rbx
460 __ movptr(c_rarg0, Address(rsp, 0));
461 BLOCK_COMMENT("call exception_handler_for_return_address");
462 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
463 SharedRuntime::exception_handler_for_return_address),
464 r15_thread, c_rarg0);
465 __ mov(rbx, rax);
467 // setup rax & rdx, remove return address & clear pending exception
468 __ pop(rdx);
469 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
470 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
472 #ifdef ASSERT
473 // make sure exception is set
474 {
475 Label L;
476 __ testptr(rax, rax);
477 __ jcc(Assembler::notEqual, L);
478 __ stop("StubRoutines::forward exception: no pending exception (2)");
479 __ bind(L);
480 }
481 #endif
483 // continue at exception handler (return address removed)
484 // rax: exception
485 // rbx: exception handler
486 // rdx: throwing pc
487 __ verify_oop(rax);
488 __ jmp(rbx);
490 return start;
491 }
493 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
494 //
495 // Arguments :
496 // c_rarg0: exchange_value
497 // c_rarg0: dest
498 //
499 // Result:
500 // *dest <- ex, return (orig *dest)
501 address generate_atomic_xchg() {
502 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
503 address start = __ pc();
505 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
506 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
507 __ ret(0);
509 return start;
510 }
512 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
513 //
514 // Arguments :
515 // c_rarg0: exchange_value
516 // c_rarg1: dest
517 //
518 // Result:
519 // *dest <- ex, return (orig *dest)
520 address generate_atomic_xchg_ptr() {
521 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
522 address start = __ pc();
524 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
525 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
526 __ ret(0);
528 return start;
529 }
531 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
532 // jint compare_value)
533 //
534 // Arguments :
535 // c_rarg0: exchange_value
536 // c_rarg1: dest
537 // c_rarg2: compare_value
538 //
539 // Result:
540 // if ( compare_value == *dest ) {
541 // *dest = exchange_value
542 // return compare_value;
543 // else
544 // return *dest;
545 address generate_atomic_cmpxchg() {
546 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
547 address start = __ pc();
549 __ movl(rax, c_rarg2);
550 if ( os::is_MP() ) __ lock();
551 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
552 __ ret(0);
554 return start;
555 }
557 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
558 // volatile jlong* dest,
559 // jlong compare_value)
560 // Arguments :
561 // c_rarg0: exchange_value
562 // c_rarg1: dest
563 // c_rarg2: compare_value
564 //
565 // Result:
566 // if ( compare_value == *dest ) {
567 // *dest = exchange_value
568 // return compare_value;
569 // else
570 // return *dest;
571 address generate_atomic_cmpxchg_long() {
572 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
573 address start = __ pc();
575 __ movq(rax, c_rarg2);
576 if ( os::is_MP() ) __ lock();
577 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
578 __ ret(0);
580 return start;
581 }
583 // Support for jint atomic::add(jint add_value, volatile jint* dest)
584 //
585 // Arguments :
586 // c_rarg0: add_value
587 // c_rarg1: dest
588 //
589 // Result:
590 // *dest += add_value
591 // return *dest;
592 address generate_atomic_add() {
593 StubCodeMark mark(this, "StubRoutines", "atomic_add");
594 address start = __ pc();
596 __ movl(rax, c_rarg0);
597 if ( os::is_MP() ) __ lock();
598 __ xaddl(Address(c_rarg1, 0), c_rarg0);
599 __ addl(rax, c_rarg0);
600 __ ret(0);
602 return start;
603 }
605 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
606 //
607 // Arguments :
608 // c_rarg0: add_value
609 // c_rarg1: dest
610 //
611 // Result:
612 // *dest += add_value
613 // return *dest;
614 address generate_atomic_add_ptr() {
615 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
616 address start = __ pc();
618 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
619 if ( os::is_MP() ) __ lock();
620 __ xaddptr(Address(c_rarg1, 0), c_rarg0);
621 __ addptr(rax, c_rarg0);
622 __ ret(0);
624 return start;
625 }
627 // Support for intptr_t OrderAccess::fence()
628 //
629 // Arguments :
630 //
631 // Result:
632 address generate_orderaccess_fence() {
633 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
634 address start = __ pc();
635 __ membar(Assembler::StoreLoad);
636 __ ret(0);
638 return start;
639 }
641 // Support for intptr_t get_previous_fp()
642 //
643 // This routine is used to find the previous frame pointer for the
644 // caller (current_frame_guess). This is used as part of debugging
645 // ps() is seemingly lost trying to find frames.
646 // This code assumes that caller current_frame_guess) has a frame.
647 address generate_get_previous_fp() {
648 StubCodeMark mark(this, "StubRoutines", "get_previous_fp");
649 const Address old_fp(rbp, 0);
650 const Address older_fp(rax, 0);
651 address start = __ pc();
653 __ enter();
654 __ movptr(rax, old_fp); // callers fp
655 __ movptr(rax, older_fp); // the frame for ps()
656 __ pop(rbp);
657 __ ret(0);
659 return start;
660 }
662 //----------------------------------------------------------------------------------------------------
663 // Support for void verify_mxcsr()
664 //
665 // This routine is used with -Xcheck:jni to verify that native
666 // JNI code does not return to Java code without restoring the
667 // MXCSR register to our expected state.
669 address generate_verify_mxcsr() {
670 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
671 address start = __ pc();
673 const Address mxcsr_save(rsp, 0);
675 if (CheckJNICalls) {
676 Label ok_ret;
677 __ push(rax);
678 __ subptr(rsp, wordSize); // allocate a temp location
679 __ stmxcsr(mxcsr_save);
680 __ movl(rax, mxcsr_save);
681 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
682 __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std()));
683 __ jcc(Assembler::equal, ok_ret);
685 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
687 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
689 __ bind(ok_ret);
690 __ addptr(rsp, wordSize);
691 __ pop(rax);
692 }
694 __ ret(0);
696 return start;
697 }
699 address generate_f2i_fixup() {
700 StubCodeMark mark(this, "StubRoutines", "f2i_fixup");
701 Address inout(rsp, 5 * wordSize); // return address + 4 saves
703 address start = __ pc();
705 Label L;
707 __ push(rax);
708 __ push(c_rarg3);
709 __ push(c_rarg2);
710 __ push(c_rarg1);
712 __ movl(rax, 0x7f800000);
713 __ xorl(c_rarg3, c_rarg3);
714 __ movl(c_rarg2, inout);
715 __ movl(c_rarg1, c_rarg2);
716 __ andl(c_rarg1, 0x7fffffff);
717 __ cmpl(rax, c_rarg1); // NaN? -> 0
718 __ jcc(Assembler::negative, L);
719 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
720 __ movl(c_rarg3, 0x80000000);
721 __ movl(rax, 0x7fffffff);
722 __ cmovl(Assembler::positive, c_rarg3, rax);
724 __ bind(L);
725 __ movptr(inout, c_rarg3);
727 __ pop(c_rarg1);
728 __ pop(c_rarg2);
729 __ pop(c_rarg3);
730 __ pop(rax);
732 __ ret(0);
734 return start;
735 }
737 address generate_f2l_fixup() {
738 StubCodeMark mark(this, "StubRoutines", "f2l_fixup");
739 Address inout(rsp, 5 * wordSize); // return address + 4 saves
740 address start = __ pc();
742 Label L;
744 __ push(rax);
745 __ push(c_rarg3);
746 __ push(c_rarg2);
747 __ push(c_rarg1);
749 __ movl(rax, 0x7f800000);
750 __ xorl(c_rarg3, c_rarg3);
751 __ movl(c_rarg2, inout);
752 __ movl(c_rarg1, c_rarg2);
753 __ andl(c_rarg1, 0x7fffffff);
754 __ cmpl(rax, c_rarg1); // NaN? -> 0
755 __ jcc(Assembler::negative, L);
756 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
757 __ mov64(c_rarg3, 0x8000000000000000);
758 __ mov64(rax, 0x7fffffffffffffff);
759 __ cmov(Assembler::positive, c_rarg3, rax);
761 __ bind(L);
762 __ movptr(inout, c_rarg3);
764 __ pop(c_rarg1);
765 __ pop(c_rarg2);
766 __ pop(c_rarg3);
767 __ pop(rax);
769 __ ret(0);
771 return start;
772 }
774 address generate_d2i_fixup() {
775 StubCodeMark mark(this, "StubRoutines", "d2i_fixup");
776 Address inout(rsp, 6 * wordSize); // return address + 5 saves
778 address start = __ pc();
780 Label L;
782 __ push(rax);
783 __ push(c_rarg3);
784 __ push(c_rarg2);
785 __ push(c_rarg1);
786 __ push(c_rarg0);
788 __ movl(rax, 0x7ff00000);
789 __ movq(c_rarg2, inout);
790 __ movl(c_rarg3, c_rarg2);
791 __ mov(c_rarg1, c_rarg2);
792 __ mov(c_rarg0, c_rarg2);
793 __ negl(c_rarg3);
794 __ shrptr(c_rarg1, 0x20);
795 __ orl(c_rarg3, c_rarg2);
796 __ andl(c_rarg1, 0x7fffffff);
797 __ xorl(c_rarg2, c_rarg2);
798 __ shrl(c_rarg3, 0x1f);
799 __ orl(c_rarg1, c_rarg3);
800 __ cmpl(rax, c_rarg1);
801 __ jcc(Assembler::negative, L); // NaN -> 0
802 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
803 __ movl(c_rarg2, 0x80000000);
804 __ movl(rax, 0x7fffffff);
805 __ cmov(Assembler::positive, c_rarg2, rax);
807 __ bind(L);
808 __ movptr(inout, c_rarg2);
810 __ pop(c_rarg0);
811 __ pop(c_rarg1);
812 __ pop(c_rarg2);
813 __ pop(c_rarg3);
814 __ pop(rax);
816 __ ret(0);
818 return start;
819 }
821 address generate_d2l_fixup() {
822 StubCodeMark mark(this, "StubRoutines", "d2l_fixup");
823 Address inout(rsp, 6 * wordSize); // return address + 5 saves
825 address start = __ pc();
827 Label L;
829 __ push(rax);
830 __ push(c_rarg3);
831 __ push(c_rarg2);
832 __ push(c_rarg1);
833 __ push(c_rarg0);
835 __ movl(rax, 0x7ff00000);
836 __ movq(c_rarg2, inout);
837 __ movl(c_rarg3, c_rarg2);
838 __ mov(c_rarg1, c_rarg2);
839 __ mov(c_rarg0, c_rarg2);
840 __ negl(c_rarg3);
841 __ shrptr(c_rarg1, 0x20);
842 __ orl(c_rarg3, c_rarg2);
843 __ andl(c_rarg1, 0x7fffffff);
844 __ xorl(c_rarg2, c_rarg2);
845 __ shrl(c_rarg3, 0x1f);
846 __ orl(c_rarg1, c_rarg3);
847 __ cmpl(rax, c_rarg1);
848 __ jcc(Assembler::negative, L); // NaN -> 0
849 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
850 __ mov64(c_rarg2, 0x8000000000000000);
851 __ mov64(rax, 0x7fffffffffffffff);
852 __ cmovq(Assembler::positive, c_rarg2, rax);
854 __ bind(L);
855 __ movq(inout, c_rarg2);
857 __ pop(c_rarg0);
858 __ pop(c_rarg1);
859 __ pop(c_rarg2);
860 __ pop(c_rarg3);
861 __ pop(rax);
863 __ ret(0);
865 return start;
866 }
868 address generate_fp_mask(const char *stub_name, int64_t mask) {
869 __ align(CodeEntryAlignment);
870 StubCodeMark mark(this, "StubRoutines", stub_name);
871 address start = __ pc();
873 __ emit_data64( mask, relocInfo::none );
874 __ emit_data64( mask, relocInfo::none );
876 return start;
877 }
879 // The following routine generates a subroutine to throw an
880 // asynchronous UnknownError when an unsafe access gets a fault that
881 // could not be reasonably prevented by the programmer. (Example:
882 // SIGBUS/OBJERR.)
883 address generate_handler_for_unsafe_access() {
884 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
885 address start = __ pc();
887 __ push(0); // hole for return address-to-be
888 __ pusha(); // push registers
889 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
891 __ subptr(rsp, frame::arg_reg_save_area_bytes);
892 BLOCK_COMMENT("call handle_unsafe_access");
893 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
894 __ addptr(rsp, frame::arg_reg_save_area_bytes);
896 __ movptr(next_pc, rax); // stuff next address
897 __ popa();
898 __ ret(0); // jump to next address
900 return start;
901 }
903 // Non-destructive plausibility checks for oops
904 //
905 // Arguments:
906 // all args on stack!
907 //
908 // Stack after saving c_rarg3:
909 // [tos + 0]: saved c_rarg3
910 // [tos + 1]: saved c_rarg2
911 // [tos + 2]: saved r12 (several TemplateTable methods use it)
912 // [tos + 3]: saved flags
913 // [tos + 4]: return address
914 // * [tos + 5]: error message (char*)
915 // * [tos + 6]: object to verify (oop)
916 // * [tos + 7]: saved rax - saved by caller and bashed
917 // * = popped on exit
918 address generate_verify_oop() {
919 StubCodeMark mark(this, "StubRoutines", "verify_oop");
920 address start = __ pc();
922 Label exit, error;
924 __ pushf();
925 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
927 __ push(r12);
929 // save c_rarg2 and c_rarg3
930 __ push(c_rarg2);
931 __ push(c_rarg3);
933 enum {
934 // After previous pushes.
935 oop_to_verify = 6 * wordSize,
936 saved_rax = 7 * wordSize,
938 // Before the call to MacroAssembler::debug(), see below.
939 return_addr = 16 * wordSize,
940 error_msg = 17 * wordSize
941 };
943 // get object
944 __ movptr(rax, Address(rsp, oop_to_verify));
946 // make sure object is 'reasonable'
947 __ testptr(rax, rax);
948 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
949 // Check if the oop is in the right area of memory
950 __ movptr(c_rarg2, rax);
951 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
952 __ andptr(c_rarg2, c_rarg3);
953 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits());
954 __ cmpptr(c_rarg2, c_rarg3);
955 __ jcc(Assembler::notZero, error);
957 // set r12 to heapbase for load_klass()
958 __ reinit_heapbase();
960 // make sure klass is 'reasonable'
961 __ load_klass(rax, rax); // get klass
962 __ testptr(rax, rax);
963 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
964 // Check if the klass is in the right area of memory
965 __ mov(c_rarg2, rax);
966 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
967 __ andptr(c_rarg2, c_rarg3);
968 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
969 __ cmpptr(c_rarg2, c_rarg3);
970 __ jcc(Assembler::notZero, error);
972 // make sure klass' klass is 'reasonable'
973 __ load_klass(rax, rax);
974 __ testptr(rax, rax);
975 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
976 // Check if the klass' klass is in the right area of memory
977 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
978 __ andptr(rax, c_rarg3);
979 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
980 __ cmpptr(rax, c_rarg3);
981 __ jcc(Assembler::notZero, error);
983 // return if everything seems ok
984 __ bind(exit);
985 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
986 __ pop(c_rarg3); // restore c_rarg3
987 __ pop(c_rarg2); // restore c_rarg2
988 __ pop(r12); // restore r12
989 __ popf(); // restore flags
990 __ ret(3 * wordSize); // pop caller saved stuff
992 // handle errors
993 __ bind(error);
994 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
995 __ pop(c_rarg3); // get saved c_rarg3 back
996 __ pop(c_rarg2); // get saved c_rarg2 back
997 __ pop(r12); // get saved r12 back
998 __ popf(); // get saved flags off stack --
999 // will be ignored
1001 __ pusha(); // push registers
1002 // (rip is already
1003 // already pushed)
1004 // debug(char* msg, int64_t pc, int64_t regs[])
1005 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
1006 // pushed all the registers, so now the stack looks like:
1007 // [tos + 0] 16 saved registers
1008 // [tos + 16] return address
1009 // * [tos + 17] error message (char*)
1010 // * [tos + 18] object to verify (oop)
1011 // * [tos + 19] saved rax - saved by caller and bashed
1012 // * = popped on exit
1014 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
1015 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address
1016 __ movq(c_rarg2, rsp); // pass address of regs on stack
1017 __ mov(r12, rsp); // remember rsp
1018 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1019 __ andptr(rsp, -16); // align stack as required by ABI
1020 BLOCK_COMMENT("call MacroAssembler::debug");
1021 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1022 __ mov(rsp, r12); // restore rsp
1023 __ popa(); // pop registers (includes r12)
1024 __ ret(3 * wordSize); // pop caller saved stuff
1026 return start;
1027 }
1029 static address disjoint_byte_copy_entry;
1030 static address disjoint_short_copy_entry;
1031 static address disjoint_int_copy_entry;
1032 static address disjoint_long_copy_entry;
1033 static address disjoint_oop_copy_entry;
1035 static address byte_copy_entry;
1036 static address short_copy_entry;
1037 static address int_copy_entry;
1038 static address long_copy_entry;
1039 static address oop_copy_entry;
1041 static address checkcast_copy_entry;
1043 //
1044 // Verify that a register contains clean 32-bits positive value
1045 // (high 32-bits are 0) so it could be used in 64-bits shifts.
1046 //
1047 // Input:
1048 // Rint - 32-bits value
1049 // Rtmp - scratch
1050 //
1051 void assert_clean_int(Register Rint, Register Rtmp) {
1052 #ifdef ASSERT
1053 Label L;
1054 assert_different_registers(Rtmp, Rint);
1055 __ movslq(Rtmp, Rint);
1056 __ cmpq(Rtmp, Rint);
1057 __ jcc(Assembler::equal, L);
1058 __ stop("high 32-bits of int value are not 0");
1059 __ bind(L);
1060 #endif
1061 }
1063 // Generate overlap test for array copy stubs
1064 //
1065 // Input:
1066 // c_rarg0 - from
1067 // c_rarg1 - to
1068 // c_rarg2 - element count
1069 //
1070 // Output:
1071 // rax - &from[element count - 1]
1072 //
1073 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
1074 assert(no_overlap_target != NULL, "must be generated");
1075 array_overlap_test(no_overlap_target, NULL, sf);
1076 }
1077 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
1078 array_overlap_test(NULL, &L_no_overlap, sf);
1079 }
1080 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) {
1081 const Register from = c_rarg0;
1082 const Register to = c_rarg1;
1083 const Register count = c_rarg2;
1084 const Register end_from = rax;
1086 __ cmpptr(to, from);
1087 __ lea(end_from, Address(from, count, sf, 0));
1088 if (NOLp == NULL) {
1089 ExternalAddress no_overlap(no_overlap_target);
1090 __ jump_cc(Assembler::belowEqual, no_overlap);
1091 __ cmpptr(to, end_from);
1092 __ jump_cc(Assembler::aboveEqual, no_overlap);
1093 } else {
1094 __ jcc(Assembler::belowEqual, (*NOLp));
1095 __ cmpptr(to, end_from);
1096 __ jcc(Assembler::aboveEqual, (*NOLp));
1097 }
1098 }
1100 // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1101 //
1102 // Outputs:
1103 // rdi - rcx
1104 // rsi - rdx
1105 // rdx - r8
1106 // rcx - r9
1107 //
1108 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1109 // are non-volatile. r9 and r10 should not be used by the caller.
1110 //
1111 void setup_arg_regs(int nargs = 3) {
1112 const Register saved_rdi = r9;
1113 const Register saved_rsi = r10;
1114 assert(nargs == 3 || nargs == 4, "else fix");
1115 #ifdef _WIN64
1116 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1117 "unexpected argument registers");
1118 if (nargs >= 4)
1119 __ mov(rax, r9); // r9 is also saved_rdi
1120 __ movptr(saved_rdi, rdi);
1121 __ movptr(saved_rsi, rsi);
1122 __ mov(rdi, rcx); // c_rarg0
1123 __ mov(rsi, rdx); // c_rarg1
1124 __ mov(rdx, r8); // c_rarg2
1125 if (nargs >= 4)
1126 __ mov(rcx, rax); // c_rarg3 (via rax)
1127 #else
1128 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1129 "unexpected argument registers");
1130 #endif
1131 }
1133 void restore_arg_regs() {
1134 const Register saved_rdi = r9;
1135 const Register saved_rsi = r10;
1136 #ifdef _WIN64
1137 __ movptr(rdi, saved_rdi);
1138 __ movptr(rsi, saved_rsi);
1139 #endif
1140 }
1142 // Generate code for an array write pre barrier
1143 //
1144 // addr - starting address
1145 // count - element count
1146 //
1147 // Destroy no registers!
1148 //
1149 void gen_write_ref_array_pre_barrier(Register addr, Register count) {
1150 BarrierSet* bs = Universe::heap()->barrier_set();
1151 switch (bs->kind()) {
1152 case BarrierSet::G1SATBCT:
1153 case BarrierSet::G1SATBCTLogging:
1154 {
1155 __ pusha(); // push registers
1156 if (count == c_rarg0) {
1157 if (addr == c_rarg1) {
1158 // exactly backwards!!
1159 __ xchgptr(c_rarg1, c_rarg0);
1160 } else {
1161 __ movptr(c_rarg1, count);
1162 __ movptr(c_rarg0, addr);
1163 }
1165 } else {
1166 __ movptr(c_rarg0, addr);
1167 __ movptr(c_rarg1, count);
1168 }
1169 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
1170 __ popa();
1171 }
1172 break;
1173 case BarrierSet::CardTableModRef:
1174 case BarrierSet::CardTableExtension:
1175 case BarrierSet::ModRef:
1176 break;
1177 default:
1178 ShouldNotReachHere();
1180 }
1181 }
1183 //
1184 // Generate code for an array write post barrier
1185 //
1186 // Input:
1187 // start - register containing starting address of destination array
1188 // end - register containing ending address of destination array
1189 // scratch - scratch register
1190 //
1191 // The input registers are overwritten.
1192 // The ending address is inclusive.
1193 void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
1194 assert_different_registers(start, end, scratch);
1195 BarrierSet* bs = Universe::heap()->barrier_set();
1196 switch (bs->kind()) {
1197 case BarrierSet::G1SATBCT:
1198 case BarrierSet::G1SATBCTLogging:
1200 {
1201 __ pusha(); // push registers (overkill)
1202 // must compute element count unless barrier set interface is changed (other platforms supply count)
1203 assert_different_registers(start, end, scratch);
1204 __ lea(scratch, Address(end, BytesPerHeapOop));
1205 __ subptr(scratch, start); // subtract start to get #bytes
1206 __ shrptr(scratch, LogBytesPerHeapOop); // convert to element count
1207 __ mov(c_rarg0, start);
1208 __ mov(c_rarg1, scratch);
1209 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
1210 __ popa();
1211 }
1212 break;
1213 case BarrierSet::CardTableModRef:
1214 case BarrierSet::CardTableExtension:
1215 {
1216 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1217 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1219 Label L_loop;
1221 __ shrptr(start, CardTableModRefBS::card_shift);
1222 __ addptr(end, BytesPerHeapOop);
1223 __ shrptr(end, CardTableModRefBS::card_shift);
1224 __ subptr(end, start); // number of bytes to copy
1226 intptr_t disp = (intptr_t) ct->byte_map_base;
1227 if (__ is_simm32(disp)) {
1228 Address cardtable(noreg, noreg, Address::no_scale, disp);
1229 __ lea(scratch, cardtable);
1230 } else {
1231 ExternalAddress cardtable((address)disp);
1232 __ lea(scratch, cardtable);
1233 }
1235 const Register count = end; // 'end' register contains bytes count now
1236 __ addptr(start, scratch);
1237 __ BIND(L_loop);
1238 __ movb(Address(start, count, Address::times_1), 0);
1239 __ decrement(count);
1240 __ jcc(Assembler::greaterEqual, L_loop);
1241 }
1242 break;
1243 default:
1244 ShouldNotReachHere();
1246 }
1247 }
1250 // Copy big chunks forward
1251 //
1252 // Inputs:
1253 // end_from - source arrays end address
1254 // end_to - destination array end address
1255 // qword_count - 64-bits element count, negative
1256 // to - scratch
1257 // L_copy_32_bytes - entry label
1258 // L_copy_8_bytes - exit label
1259 //
1260 void copy_32_bytes_forward(Register end_from, Register end_to,
1261 Register qword_count, Register to,
1262 Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1263 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1264 Label L_loop;
1265 __ align(OptoLoopAlignment);
1266 __ BIND(L_loop);
1267 if(UseUnalignedLoadStores) {
1268 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
1269 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0);
1270 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8));
1271 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1);
1273 } else {
1274 __ movq(to, Address(end_from, qword_count, Address::times_8, -24));
1275 __ movq(Address(end_to, qword_count, Address::times_8, -24), to);
1276 __ movq(to, Address(end_from, qword_count, Address::times_8, -16));
1277 __ movq(Address(end_to, qword_count, Address::times_8, -16), to);
1278 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
1279 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
1280 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
1281 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
1282 }
1283 __ BIND(L_copy_32_bytes);
1284 __ addptr(qword_count, 4);
1285 __ jcc(Assembler::lessEqual, L_loop);
1286 __ subptr(qword_count, 4);
1287 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
1288 }
1291 // Copy big chunks backward
1292 //
1293 // Inputs:
1294 // from - source arrays address
1295 // dest - destination array address
1296 // qword_count - 64-bits element count
1297 // to - scratch
1298 // L_copy_32_bytes - entry label
1299 // L_copy_8_bytes - exit label
1300 //
1301 void copy_32_bytes_backward(Register from, Register dest,
1302 Register qword_count, Register to,
1303 Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1304 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1305 Label L_loop;
1306 __ align(OptoLoopAlignment);
1307 __ BIND(L_loop);
1308 if(UseUnalignedLoadStores) {
1309 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
1310 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0);
1311 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
1312 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
1314 } else {
1315 __ movq(to, Address(from, qword_count, Address::times_8, 24));
1316 __ movq(Address(dest, qword_count, Address::times_8, 24), to);
1317 __ movq(to, Address(from, qword_count, Address::times_8, 16));
1318 __ movq(Address(dest, qword_count, Address::times_8, 16), to);
1319 __ movq(to, Address(from, qword_count, Address::times_8, 8));
1320 __ movq(Address(dest, qword_count, Address::times_8, 8), to);
1321 __ movq(to, Address(from, qword_count, Address::times_8, 0));
1322 __ movq(Address(dest, qword_count, Address::times_8, 0), to);
1323 }
1324 __ BIND(L_copy_32_bytes);
1325 __ subptr(qword_count, 4);
1326 __ jcc(Assembler::greaterEqual, L_loop);
1327 __ addptr(qword_count, 4);
1328 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
1329 }
1332 // Arguments:
1333 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1334 // ignored
1335 // name - stub name string
1336 //
1337 // Inputs:
1338 // c_rarg0 - source array address
1339 // c_rarg1 - destination array address
1340 // c_rarg2 - element count, treated as ssize_t, can be zero
1341 //
1342 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1343 // we let the hardware handle it. The one to eight bytes within words,
1344 // dwords or qwords that span cache line boundaries will still be loaded
1345 // and stored atomically.
1346 //
1347 // Side Effects:
1348 // disjoint_byte_copy_entry is set to the no-overlap entry point
1349 // used by generate_conjoint_byte_copy().
1350 //
1351 address generate_disjoint_byte_copy(bool aligned, const char *name) {
1352 __ align(CodeEntryAlignment);
1353 StubCodeMark mark(this, "StubRoutines", name);
1354 address start = __ pc();
1356 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1357 Label L_copy_byte, L_exit;
1358 const Register from = rdi; // source array address
1359 const Register to = rsi; // destination array address
1360 const Register count = rdx; // elements count
1361 const Register byte_count = rcx;
1362 const Register qword_count = count;
1363 const Register end_from = from; // source array end address
1364 const Register end_to = to; // destination array end address
1365 // End pointers are inclusive, and if count is not zero they point
1366 // to the last unit copied: end_to[0] := end_from[0]
1368 __ enter(); // required for proper stackwalking of RuntimeStub frame
1369 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1371 disjoint_byte_copy_entry = __ pc();
1372 BLOCK_COMMENT("Entry:");
1373 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1375 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1376 // r9 and r10 may be used to save non-volatile registers
1378 // 'from', 'to' and 'count' are now valid
1379 __ movptr(byte_count, count);
1380 __ shrptr(count, 3); // count => qword_count
1382 // Copy from low to high addresses. Use 'to' as scratch.
1383 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1384 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1385 __ negptr(qword_count); // make the count negative
1386 __ jmp(L_copy_32_bytes);
1388 // Copy trailing qwords
1389 __ BIND(L_copy_8_bytes);
1390 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1391 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1392 __ increment(qword_count);
1393 __ jcc(Assembler::notZero, L_copy_8_bytes);
1395 // Check for and copy trailing dword
1396 __ BIND(L_copy_4_bytes);
1397 __ testl(byte_count, 4);
1398 __ jccb(Assembler::zero, L_copy_2_bytes);
1399 __ movl(rax, Address(end_from, 8));
1400 __ movl(Address(end_to, 8), rax);
1402 __ addptr(end_from, 4);
1403 __ addptr(end_to, 4);
1405 // Check for and copy trailing word
1406 __ BIND(L_copy_2_bytes);
1407 __ testl(byte_count, 2);
1408 __ jccb(Assembler::zero, L_copy_byte);
1409 __ movw(rax, Address(end_from, 8));
1410 __ movw(Address(end_to, 8), rax);
1412 __ addptr(end_from, 2);
1413 __ addptr(end_to, 2);
1415 // Check for and copy trailing byte
1416 __ BIND(L_copy_byte);
1417 __ testl(byte_count, 1);
1418 __ jccb(Assembler::zero, L_exit);
1419 __ movb(rax, Address(end_from, 8));
1420 __ movb(Address(end_to, 8), rax);
1422 __ BIND(L_exit);
1423 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1424 restore_arg_regs();
1425 __ xorptr(rax, rax); // return 0
1426 __ leave(); // required for proper stackwalking of RuntimeStub frame
1427 __ ret(0);
1429 // Copy in 32-bytes chunks
1430 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1431 __ jmp(L_copy_4_bytes);
1433 return start;
1434 }
1436 // Arguments:
1437 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1438 // ignored
1439 // name - stub name string
1440 //
1441 // Inputs:
1442 // c_rarg0 - source array address
1443 // c_rarg1 - destination array address
1444 // c_rarg2 - element count, treated as ssize_t, can be zero
1445 //
1446 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1447 // we let the hardware handle it. The one to eight bytes within words,
1448 // dwords or qwords that span cache line boundaries will still be loaded
1449 // and stored atomically.
1450 //
1451 address generate_conjoint_byte_copy(bool aligned, const char *name) {
1452 __ align(CodeEntryAlignment);
1453 StubCodeMark mark(this, "StubRoutines", name);
1454 address start = __ pc();
1456 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1457 const Register from = rdi; // source array address
1458 const Register to = rsi; // destination array address
1459 const Register count = rdx; // elements count
1460 const Register byte_count = rcx;
1461 const Register qword_count = count;
1463 __ enter(); // required for proper stackwalking of RuntimeStub frame
1464 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1466 byte_copy_entry = __ pc();
1467 BLOCK_COMMENT("Entry:");
1468 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1470 array_overlap_test(disjoint_byte_copy_entry, Address::times_1);
1471 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1472 // r9 and r10 may be used to save non-volatile registers
1474 // 'from', 'to' and 'count' are now valid
1475 __ movptr(byte_count, count);
1476 __ shrptr(count, 3); // count => qword_count
1478 // Copy from high to low addresses.
1480 // Check for and copy trailing byte
1481 __ testl(byte_count, 1);
1482 __ jcc(Assembler::zero, L_copy_2_bytes);
1483 __ movb(rax, Address(from, byte_count, Address::times_1, -1));
1484 __ movb(Address(to, byte_count, Address::times_1, -1), rax);
1485 __ decrement(byte_count); // Adjust for possible trailing word
1487 // Check for and copy trailing word
1488 __ BIND(L_copy_2_bytes);
1489 __ testl(byte_count, 2);
1490 __ jcc(Assembler::zero, L_copy_4_bytes);
1491 __ movw(rax, Address(from, byte_count, Address::times_1, -2));
1492 __ movw(Address(to, byte_count, Address::times_1, -2), rax);
1494 // Check for and copy trailing dword
1495 __ BIND(L_copy_4_bytes);
1496 __ testl(byte_count, 4);
1497 __ jcc(Assembler::zero, L_copy_32_bytes);
1498 __ movl(rax, Address(from, qword_count, Address::times_8));
1499 __ movl(Address(to, qword_count, Address::times_8), rax);
1500 __ jmp(L_copy_32_bytes);
1502 // Copy trailing qwords
1503 __ BIND(L_copy_8_bytes);
1504 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1505 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1506 __ decrement(qword_count);
1507 __ jcc(Assembler::notZero, L_copy_8_bytes);
1509 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1510 restore_arg_regs();
1511 __ xorptr(rax, rax); // return 0
1512 __ leave(); // required for proper stackwalking of RuntimeStub frame
1513 __ ret(0);
1515 // Copy in 32-bytes chunks
1516 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1518 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1519 restore_arg_regs();
1520 __ xorptr(rax, rax); // return 0
1521 __ leave(); // required for proper stackwalking of RuntimeStub frame
1522 __ ret(0);
1524 return start;
1525 }
1527 // Arguments:
1528 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1529 // ignored
1530 // name - stub name string
1531 //
1532 // Inputs:
1533 // c_rarg0 - source array address
1534 // c_rarg1 - destination array address
1535 // c_rarg2 - element count, treated as ssize_t, can be zero
1536 //
1537 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1538 // let the hardware handle it. The two or four words within dwords
1539 // or qwords that span cache line boundaries will still be loaded
1540 // and stored atomically.
1541 //
1542 // Side Effects:
1543 // disjoint_short_copy_entry is set to the no-overlap entry point
1544 // used by generate_conjoint_short_copy().
1545 //
1546 address generate_disjoint_short_copy(bool aligned, const char *name) {
1547 __ align(CodeEntryAlignment);
1548 StubCodeMark mark(this, "StubRoutines", name);
1549 address start = __ pc();
1551 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit;
1552 const Register from = rdi; // source array address
1553 const Register to = rsi; // destination array address
1554 const Register count = rdx; // elements count
1555 const Register word_count = rcx;
1556 const Register qword_count = count;
1557 const Register end_from = from; // source array end address
1558 const Register end_to = to; // destination array end address
1559 // End pointers are inclusive, and if count is not zero they point
1560 // to the last unit copied: end_to[0] := end_from[0]
1562 __ enter(); // required for proper stackwalking of RuntimeStub frame
1563 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1565 disjoint_short_copy_entry = __ pc();
1566 BLOCK_COMMENT("Entry:");
1567 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1569 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1570 // r9 and r10 may be used to save non-volatile registers
1572 // 'from', 'to' and 'count' are now valid
1573 __ movptr(word_count, count);
1574 __ shrptr(count, 2); // count => qword_count
1576 // Copy from low to high addresses. Use 'to' as scratch.
1577 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1578 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1579 __ negptr(qword_count);
1580 __ jmp(L_copy_32_bytes);
1582 // Copy trailing qwords
1583 __ BIND(L_copy_8_bytes);
1584 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1585 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1586 __ increment(qword_count);
1587 __ jcc(Assembler::notZero, L_copy_8_bytes);
1589 // Original 'dest' is trashed, so we can't use it as a
1590 // base register for a possible trailing word copy
1592 // Check for and copy trailing dword
1593 __ BIND(L_copy_4_bytes);
1594 __ testl(word_count, 2);
1595 __ jccb(Assembler::zero, L_copy_2_bytes);
1596 __ movl(rax, Address(end_from, 8));
1597 __ movl(Address(end_to, 8), rax);
1599 __ addptr(end_from, 4);
1600 __ addptr(end_to, 4);
1602 // Check for and copy trailing word
1603 __ BIND(L_copy_2_bytes);
1604 __ testl(word_count, 1);
1605 __ jccb(Assembler::zero, L_exit);
1606 __ movw(rax, Address(end_from, 8));
1607 __ movw(Address(end_to, 8), rax);
1609 __ BIND(L_exit);
1610 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1611 restore_arg_regs();
1612 __ xorptr(rax, rax); // return 0
1613 __ leave(); // required for proper stackwalking of RuntimeStub frame
1614 __ ret(0);
1616 // Copy in 32-bytes chunks
1617 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1618 __ jmp(L_copy_4_bytes);
1620 return start;
1621 }
1623 // Arguments:
1624 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1625 // ignored
1626 // name - stub name string
1627 //
1628 // Inputs:
1629 // c_rarg0 - source array address
1630 // c_rarg1 - destination array address
1631 // c_rarg2 - element count, treated as ssize_t, can be zero
1632 //
1633 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1634 // let the hardware handle it. The two or four words within dwords
1635 // or qwords that span cache line boundaries will still be loaded
1636 // and stored atomically.
1637 //
1638 address generate_conjoint_short_copy(bool aligned, const char *name) {
1639 __ align(CodeEntryAlignment);
1640 StubCodeMark mark(this, "StubRoutines", name);
1641 address start = __ pc();
1643 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes;
1644 const Register from = rdi; // source array address
1645 const Register to = rsi; // destination array address
1646 const Register count = rdx; // elements count
1647 const Register word_count = rcx;
1648 const Register qword_count = count;
1650 __ enter(); // required for proper stackwalking of RuntimeStub frame
1651 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1653 short_copy_entry = __ pc();
1654 BLOCK_COMMENT("Entry:");
1655 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1657 array_overlap_test(disjoint_short_copy_entry, Address::times_2);
1658 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1659 // r9 and r10 may be used to save non-volatile registers
1661 // 'from', 'to' and 'count' are now valid
1662 __ movptr(word_count, count);
1663 __ shrptr(count, 2); // count => qword_count
1665 // Copy from high to low addresses. Use 'to' as scratch.
1667 // Check for and copy trailing word
1668 __ testl(word_count, 1);
1669 __ jccb(Assembler::zero, L_copy_4_bytes);
1670 __ movw(rax, Address(from, word_count, Address::times_2, -2));
1671 __ movw(Address(to, word_count, Address::times_2, -2), rax);
1673 // Check for and copy trailing dword
1674 __ BIND(L_copy_4_bytes);
1675 __ testl(word_count, 2);
1676 __ jcc(Assembler::zero, L_copy_32_bytes);
1677 __ movl(rax, Address(from, qword_count, Address::times_8));
1678 __ movl(Address(to, qword_count, Address::times_8), rax);
1679 __ jmp(L_copy_32_bytes);
1681 // Copy trailing qwords
1682 __ BIND(L_copy_8_bytes);
1683 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1684 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1685 __ decrement(qword_count);
1686 __ jcc(Assembler::notZero, L_copy_8_bytes);
1688 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1689 restore_arg_regs();
1690 __ xorptr(rax, rax); // return 0
1691 __ leave(); // required for proper stackwalking of RuntimeStub frame
1692 __ ret(0);
1694 // Copy in 32-bytes chunks
1695 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1697 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1698 restore_arg_regs();
1699 __ xorptr(rax, rax); // return 0
1700 __ leave(); // required for proper stackwalking of RuntimeStub frame
1701 __ ret(0);
1703 return start;
1704 }
1706 // Arguments:
1707 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1708 // ignored
1709 // is_oop - true => oop array, so generate store check code
1710 // name - stub name string
1711 //
1712 // Inputs:
1713 // c_rarg0 - source array address
1714 // c_rarg1 - destination array address
1715 // c_rarg2 - element count, treated as ssize_t, can be zero
1716 //
1717 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1718 // the hardware handle it. The two dwords within qwords that span
1719 // cache line boundaries will still be loaded and stored atomicly.
1720 //
1721 // Side Effects:
1722 // disjoint_int_copy_entry is set to the no-overlap entry point
1723 // used by generate_conjoint_int_oop_copy().
1724 //
1725 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
1726 __ align(CodeEntryAlignment);
1727 StubCodeMark mark(this, "StubRoutines", name);
1728 address start = __ pc();
1730 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
1731 const Register from = rdi; // source array address
1732 const Register to = rsi; // destination array address
1733 const Register count = rdx; // elements count
1734 const Register dword_count = rcx;
1735 const Register qword_count = count;
1736 const Register end_from = from; // source array end address
1737 const Register end_to = to; // destination array end address
1738 const Register saved_to = r11; // saved destination array address
1739 // End pointers are inclusive, and if count is not zero they point
1740 // to the last unit copied: end_to[0] := end_from[0]
1742 __ enter(); // required for proper stackwalking of RuntimeStub frame
1743 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1745 (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc();
1747 if (is_oop) {
1748 // no registers are destroyed by this call
1749 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1750 }
1752 BLOCK_COMMENT("Entry:");
1753 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1755 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1756 // r9 and r10 may be used to save non-volatile registers
1758 if (is_oop) {
1759 __ movq(saved_to, to);
1760 }
1762 // 'from', 'to' and 'count' are now valid
1763 __ movptr(dword_count, count);
1764 __ shrptr(count, 1); // count => qword_count
1766 // Copy from low to high addresses. Use 'to' as scratch.
1767 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1768 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1769 __ negptr(qword_count);
1770 __ jmp(L_copy_32_bytes);
1772 // Copy trailing qwords
1773 __ BIND(L_copy_8_bytes);
1774 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1775 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1776 __ increment(qword_count);
1777 __ jcc(Assembler::notZero, L_copy_8_bytes);
1779 // Check for and copy trailing dword
1780 __ BIND(L_copy_4_bytes);
1781 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1
1782 __ jccb(Assembler::zero, L_exit);
1783 __ movl(rax, Address(end_from, 8));
1784 __ movl(Address(end_to, 8), rax);
1786 __ BIND(L_exit);
1787 if (is_oop) {
1788 __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
1789 gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1790 }
1791 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1792 restore_arg_regs();
1793 __ xorptr(rax, rax); // return 0
1794 __ leave(); // required for proper stackwalking of RuntimeStub frame
1795 __ ret(0);
1797 // Copy 32-bytes chunks
1798 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1799 __ jmp(L_copy_4_bytes);
1801 return start;
1802 }
1804 // Arguments:
1805 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1806 // ignored
1807 // is_oop - true => oop array, so generate store check code
1808 // name - stub name string
1809 //
1810 // Inputs:
1811 // c_rarg0 - source array address
1812 // c_rarg1 - destination array address
1813 // c_rarg2 - element count, treated as ssize_t, can be zero
1814 //
1815 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1816 // the hardware handle it. The two dwords within qwords that span
1817 // cache line boundaries will still be loaded and stored atomicly.
1818 //
1819 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
1820 __ align(CodeEntryAlignment);
1821 StubCodeMark mark(this, "StubRoutines", name);
1822 address start = __ pc();
1824 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit;
1825 const Register from = rdi; // source array address
1826 const Register to = rsi; // destination array address
1827 const Register count = rdx; // elements count
1828 const Register dword_count = rcx;
1829 const Register qword_count = count;
1831 __ enter(); // required for proper stackwalking of RuntimeStub frame
1832 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1834 if (is_oop) {
1835 // no registers are destroyed by this call
1836 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1837 }
1839 (is_oop ? oop_copy_entry : int_copy_entry) = __ pc();
1840 BLOCK_COMMENT("Entry:");
1841 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1843 array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry,
1844 Address::times_4);
1845 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1846 // r9 and r10 may be used to save non-volatile registers
1848 assert_clean_int(count, rax); // Make sure 'count' is clean int.
1849 // 'from', 'to' and 'count' are now valid
1850 __ movptr(dword_count, count);
1851 __ shrptr(count, 1); // count => qword_count
1853 // Copy from high to low addresses. Use 'to' as scratch.
1855 // Check for and copy trailing dword
1856 __ testl(dword_count, 1);
1857 __ jcc(Assembler::zero, L_copy_32_bytes);
1858 __ movl(rax, Address(from, dword_count, Address::times_4, -4));
1859 __ movl(Address(to, dword_count, Address::times_4, -4), rax);
1860 __ jmp(L_copy_32_bytes);
1862 // Copy trailing qwords
1863 __ BIND(L_copy_8_bytes);
1864 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1865 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1866 __ decrement(qword_count);
1867 __ jcc(Assembler::notZero, L_copy_8_bytes);
1869 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1870 if (is_oop) {
1871 __ jmp(L_exit);
1872 }
1873 restore_arg_regs();
1874 __ xorptr(rax, rax); // return 0
1875 __ leave(); // required for proper stackwalking of RuntimeStub frame
1876 __ ret(0);
1878 // Copy in 32-bytes chunks
1879 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1881 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1882 __ bind(L_exit);
1883 if (is_oop) {
1884 Register end_to = rdx;
1885 __ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
1886 gen_write_ref_array_post_barrier(to, end_to, rax);
1887 }
1888 restore_arg_regs();
1889 __ xorptr(rax, rax); // return 0
1890 __ leave(); // required for proper stackwalking of RuntimeStub frame
1891 __ ret(0);
1893 return start;
1894 }
1896 // Arguments:
1897 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1898 // ignored
1899 // is_oop - true => oop array, so generate store check code
1900 // name - stub name string
1901 //
1902 // Inputs:
1903 // c_rarg0 - source array address
1904 // c_rarg1 - destination array address
1905 // c_rarg2 - element count, treated as ssize_t, can be zero
1906 //
1907 // Side Effects:
1908 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
1909 // no-overlap entry point used by generate_conjoint_long_oop_copy().
1910 //
1911 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) {
1912 __ align(CodeEntryAlignment);
1913 StubCodeMark mark(this, "StubRoutines", name);
1914 address start = __ pc();
1916 Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
1917 const Register from = rdi; // source array address
1918 const Register to = rsi; // destination array address
1919 const Register qword_count = rdx; // elements count
1920 const Register end_from = from; // source array end address
1921 const Register end_to = rcx; // destination array end address
1922 const Register saved_to = to;
1923 // End pointers are inclusive, and if count is not zero they point
1924 // to the last unit copied: end_to[0] := end_from[0]
1926 __ enter(); // required for proper stackwalking of RuntimeStub frame
1927 // Save no-overlap entry point for generate_conjoint_long_oop_copy()
1928 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1930 if (is_oop) {
1931 disjoint_oop_copy_entry = __ pc();
1932 // no registers are destroyed by this call
1933 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1934 } else {
1935 disjoint_long_copy_entry = __ pc();
1936 }
1937 BLOCK_COMMENT("Entry:");
1938 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1940 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1941 // r9 and r10 may be used to save non-volatile registers
1943 // 'from', 'to' and 'qword_count' are now valid
1945 // Copy from low to high addresses. Use 'to' as scratch.
1946 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1947 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1948 __ negptr(qword_count);
1949 __ jmp(L_copy_32_bytes);
1951 // Copy trailing qwords
1952 __ BIND(L_copy_8_bytes);
1953 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1954 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1955 __ increment(qword_count);
1956 __ jcc(Assembler::notZero, L_copy_8_bytes);
1958 if (is_oop) {
1959 __ jmp(L_exit);
1960 } else {
1961 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1962 restore_arg_regs();
1963 __ xorptr(rax, rax); // return 0
1964 __ leave(); // required for proper stackwalking of RuntimeStub frame
1965 __ ret(0);
1966 }
1968 // Copy 64-byte chunks
1969 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1971 if (is_oop) {
1972 __ BIND(L_exit);
1973 gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1974 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
1975 } else {
1976 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1977 }
1978 restore_arg_regs();
1979 __ xorptr(rax, rax); // return 0
1980 __ leave(); // required for proper stackwalking of RuntimeStub frame
1981 __ ret(0);
1983 return start;
1984 }
1986 // Arguments:
1987 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1988 // ignored
1989 // is_oop - true => oop array, so generate store check code
1990 // name - stub name string
1991 //
1992 // Inputs:
1993 // c_rarg0 - source array address
1994 // c_rarg1 - destination array address
1995 // c_rarg2 - element count, treated as ssize_t, can be zero
1996 //
1997 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) {
1998 __ align(CodeEntryAlignment);
1999 StubCodeMark mark(this, "StubRoutines", name);
2000 address start = __ pc();
2002 Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
2003 const Register from = rdi; // source array address
2004 const Register to = rsi; // destination array address
2005 const Register qword_count = rdx; // elements count
2006 const Register saved_count = rcx;
2008 __ enter(); // required for proper stackwalking of RuntimeStub frame
2009 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
2011 address disjoint_copy_entry = NULL;
2012 if (is_oop) {
2013 assert(!UseCompressedOops, "shouldn't be called for compressed oops");
2014 disjoint_copy_entry = disjoint_oop_copy_entry;
2015 oop_copy_entry = __ pc();
2016 array_overlap_test(disjoint_oop_copy_entry, Address::times_8);
2017 } else {
2018 disjoint_copy_entry = disjoint_long_copy_entry;
2019 long_copy_entry = __ pc();
2020 array_overlap_test(disjoint_long_copy_entry, Address::times_8);
2021 }
2022 BLOCK_COMMENT("Entry:");
2023 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2025 array_overlap_test(disjoint_copy_entry, Address::times_8);
2026 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2027 // r9 and r10 may be used to save non-volatile registers
2029 // 'from', 'to' and 'qword_count' are now valid
2031 if (is_oop) {
2032 // Save to and count for store barrier
2033 __ movptr(saved_count, qword_count);
2034 // No registers are destroyed by this call
2035 gen_write_ref_array_pre_barrier(to, saved_count);
2036 }
2038 __ jmp(L_copy_32_bytes);
2040 // Copy trailing qwords
2041 __ BIND(L_copy_8_bytes);
2042 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
2043 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
2044 __ decrement(qword_count);
2045 __ jcc(Assembler::notZero, L_copy_8_bytes);
2047 if (is_oop) {
2048 __ jmp(L_exit);
2049 } else {
2050 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2051 restore_arg_regs();
2052 __ xorptr(rax, rax); // return 0
2053 __ leave(); // required for proper stackwalking of RuntimeStub frame
2054 __ ret(0);
2055 }
2057 // Copy in 32-bytes chunks
2058 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
2060 if (is_oop) {
2061 __ BIND(L_exit);
2062 __ lea(rcx, Address(to, saved_count, Address::times_8, -8));
2063 gen_write_ref_array_post_barrier(to, rcx, rax);
2064 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
2065 } else {
2066 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2067 }
2068 restore_arg_regs();
2069 __ xorptr(rax, rax); // return 0
2070 __ leave(); // required for proper stackwalking of RuntimeStub frame
2071 __ ret(0);
2073 return start;
2074 }
2077 // Helper for generating a dynamic type check.
2078 // Smashes no registers.
2079 void generate_type_check(Register sub_klass,
2080 Register super_check_offset,
2081 Register super_klass,
2082 Label& L_success) {
2083 assert_different_registers(sub_klass, super_check_offset, super_klass);
2085 BLOCK_COMMENT("type_check:");
2087 Label L_miss;
2089 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL,
2090 super_check_offset);
2091 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL);
2093 // Fall through on failure!
2094 __ BIND(L_miss);
2095 }
2097 //
2098 // Generate checkcasting array copy stub
2099 //
2100 // Input:
2101 // c_rarg0 - source array address
2102 // c_rarg1 - destination array address
2103 // c_rarg2 - element count, treated as ssize_t, can be zero
2104 // c_rarg3 - size_t ckoff (super_check_offset)
2105 // not Win64
2106 // c_rarg4 - oop ckval (super_klass)
2107 // Win64
2108 // rsp+40 - oop ckval (super_klass)
2109 //
2110 // Output:
2111 // rax == 0 - success
2112 // rax == -1^K - failure, where K is partial transfer count
2113 //
2114 address generate_checkcast_copy(const char *name) {
2116 Label L_load_element, L_store_element, L_do_card_marks, L_done;
2118 // Input registers (after setup_arg_regs)
2119 const Register from = rdi; // source array address
2120 const Register to = rsi; // destination array address
2121 const Register length = rdx; // elements count
2122 const Register ckoff = rcx; // super_check_offset
2123 const Register ckval = r8; // super_klass
2125 // Registers used as temps (r13, r14 are save-on-entry)
2126 const Register end_from = from; // source array end address
2127 const Register end_to = r13; // destination array end address
2128 const Register count = rdx; // -(count_remaining)
2129 const Register r14_length = r14; // saved copy of length
2130 // End pointers are inclusive, and if length is not zero they point
2131 // to the last unit copied: end_to[0] := end_from[0]
2133 const Register rax_oop = rax; // actual oop copied
2134 const Register r11_klass = r11; // oop._klass
2136 //---------------------------------------------------------------
2137 // Assembler stub will be used for this call to arraycopy
2138 // if the two arrays are subtypes of Object[] but the
2139 // destination array type is not equal to or a supertype
2140 // of the source type. Each element must be separately
2141 // checked.
2143 __ align(CodeEntryAlignment);
2144 StubCodeMark mark(this, "StubRoutines", name);
2145 address start = __ pc();
2147 __ enter(); // required for proper stackwalking of RuntimeStub frame
2149 checkcast_copy_entry = __ pc();
2150 BLOCK_COMMENT("Entry:");
2152 #ifdef ASSERT
2153 // caller guarantees that the arrays really are different
2154 // otherwise, we would have to make conjoint checks
2155 { Label L;
2156 array_overlap_test(L, TIMES_OOP);
2157 __ stop("checkcast_copy within a single array");
2158 __ bind(L);
2159 }
2160 #endif //ASSERT
2162 // allocate spill slots for r13, r14
2163 enum {
2164 saved_r13_offset,
2165 saved_r14_offset,
2166 saved_rbp_offset,
2167 saved_rip_offset,
2168 saved_rarg0_offset
2169 };
2170 __ subptr(rsp, saved_rbp_offset * wordSize);
2171 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
2172 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
2173 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
2174 // ckoff => rcx, ckval => r8
2175 // r9 and r10 may be used to save non-volatile registers
2176 #ifdef _WIN64
2177 // last argument (#4) is on stack on Win64
2178 const int ckval_offset = saved_rarg0_offset + 4;
2179 __ movptr(ckval, Address(rsp, ckval_offset * wordSize));
2180 #endif
2182 // check that int operands are properly extended to size_t
2183 assert_clean_int(length, rax);
2184 assert_clean_int(ckoff, rax);
2186 #ifdef ASSERT
2187 BLOCK_COMMENT("assert consistent ckoff/ckval");
2188 // The ckoff and ckval must be mutually consistent,
2189 // even though caller generates both.
2190 { Label L;
2191 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2192 Klass::super_check_offset_offset_in_bytes());
2193 __ cmpl(ckoff, Address(ckval, sco_offset));
2194 __ jcc(Assembler::equal, L);
2195 __ stop("super_check_offset inconsistent");
2196 __ bind(L);
2197 }
2198 #endif //ASSERT
2200 // Loop-invariant addresses. They are exclusive end pointers.
2201 Address end_from_addr(from, length, TIMES_OOP, 0);
2202 Address end_to_addr(to, length, TIMES_OOP, 0);
2203 // Loop-variant addresses. They assume post-incremented count < 0.
2204 Address from_element_addr(end_from, count, TIMES_OOP, 0);
2205 Address to_element_addr(end_to, count, TIMES_OOP, 0);
2207 gen_write_ref_array_pre_barrier(to, count);
2209 // Copy from low to high addresses, indexed from the end of each array.
2210 __ lea(end_from, end_from_addr);
2211 __ lea(end_to, end_to_addr);
2212 __ movptr(r14_length, length); // save a copy of the length
2213 assert(length == count, ""); // else fix next line:
2214 __ negptr(count); // negate and test the length
2215 __ jcc(Assembler::notZero, L_load_element);
2217 // Empty array: Nothing to do.
2218 __ xorptr(rax, rax); // return 0 on (trivial) success
2219 __ jmp(L_done);
2221 // ======== begin loop ========
2222 // (Loop is rotated; its entry is L_load_element.)
2223 // Loop control:
2224 // for (count = -count; count != 0; count++)
2225 // Base pointers src, dst are biased by 8*(count-1),to last element.
2226 __ align(OptoLoopAlignment);
2228 __ BIND(L_store_element);
2229 __ store_heap_oop(to_element_addr, rax_oop); // store the oop
2230 __ increment(count); // increment the count toward zero
2231 __ jcc(Assembler::zero, L_do_card_marks);
2233 // ======== loop entry is here ========
2234 __ BIND(L_load_element);
2235 __ load_heap_oop(rax_oop, from_element_addr); // load the oop
2236 __ testptr(rax_oop, rax_oop);
2237 __ jcc(Assembler::zero, L_store_element);
2239 __ load_klass(r11_klass, rax_oop);// query the object klass
2240 generate_type_check(r11_klass, ckoff, ckval, L_store_element);
2241 // ======== end loop ========
2243 // It was a real error; we must depend on the caller to finish the job.
2244 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
2245 // Emit GC store barriers for the oops we have copied (r14 + rdx),
2246 // and report their number to the caller.
2247 assert_different_registers(rax, r14_length, count, to, end_to, rcx);
2248 __ lea(end_to, to_element_addr);
2249 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer
2250 gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2251 __ movptr(rax, r14_length); // original oops
2252 __ addptr(rax, count); // K = (original - remaining) oops
2253 __ notptr(rax); // report (-1^K) to caller
2254 __ jmp(L_done);
2256 // Come here on success only.
2257 __ BIND(L_do_card_marks);
2258 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer
2259 gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2260 __ xorptr(rax, rax); // return 0 on success
2262 // Common exit point (success or failure).
2263 __ BIND(L_done);
2264 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
2265 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
2266 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
2267 restore_arg_regs();
2268 __ leave(); // required for proper stackwalking of RuntimeStub frame
2269 __ ret(0);
2271 return start;
2272 }
2274 //
2275 // Generate 'unsafe' array copy stub
2276 // Though just as safe as the other stubs, it takes an unscaled
2277 // size_t argument instead of an element count.
2278 //
2279 // Input:
2280 // c_rarg0 - source array address
2281 // c_rarg1 - destination array address
2282 // c_rarg2 - byte count, treated as ssize_t, can be zero
2283 //
2284 // Examines the alignment of the operands and dispatches
2285 // to a long, int, short, or byte copy loop.
2286 //
2287 address generate_unsafe_copy(const char *name) {
2289 Label L_long_aligned, L_int_aligned, L_short_aligned;
2291 // Input registers (before setup_arg_regs)
2292 const Register from = c_rarg0; // source array address
2293 const Register to = c_rarg1; // destination array address
2294 const Register size = c_rarg2; // byte count (size_t)
2296 // Register used as a temp
2297 const Register bits = rax; // test copy of low bits
2299 __ align(CodeEntryAlignment);
2300 StubCodeMark mark(this, "StubRoutines", name);
2301 address start = __ pc();
2303 __ enter(); // required for proper stackwalking of RuntimeStub frame
2305 // bump this on entry, not on exit:
2306 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
2308 __ mov(bits, from);
2309 __ orptr(bits, to);
2310 __ orptr(bits, size);
2312 __ testb(bits, BytesPerLong-1);
2313 __ jccb(Assembler::zero, L_long_aligned);
2315 __ testb(bits, BytesPerInt-1);
2316 __ jccb(Assembler::zero, L_int_aligned);
2318 __ testb(bits, BytesPerShort-1);
2319 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
2321 __ BIND(L_short_aligned);
2322 __ shrptr(size, LogBytesPerShort); // size => short_count
2323 __ jump(RuntimeAddress(short_copy_entry));
2325 __ BIND(L_int_aligned);
2326 __ shrptr(size, LogBytesPerInt); // size => int_count
2327 __ jump(RuntimeAddress(int_copy_entry));
2329 __ BIND(L_long_aligned);
2330 __ shrptr(size, LogBytesPerLong); // size => qword_count
2331 __ jump(RuntimeAddress(long_copy_entry));
2333 return start;
2334 }
2336 // Perform range checks on the proposed arraycopy.
2337 // Kills temp, but nothing else.
2338 // Also, clean the sign bits of src_pos and dst_pos.
2339 void arraycopy_range_checks(Register src, // source array oop (c_rarg0)
2340 Register src_pos, // source position (c_rarg1)
2341 Register dst, // destination array oo (c_rarg2)
2342 Register dst_pos, // destination position (c_rarg3)
2343 Register length,
2344 Register temp,
2345 Label& L_failed) {
2346 BLOCK_COMMENT("arraycopy_range_checks:");
2348 // if (src_pos + length > arrayOop(src)->length()) FAIL;
2349 __ movl(temp, length);
2350 __ addl(temp, src_pos); // src_pos + length
2351 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes()));
2352 __ jcc(Assembler::above, L_failed);
2354 // if (dst_pos + length > arrayOop(dst)->length()) FAIL;
2355 __ movl(temp, length);
2356 __ addl(temp, dst_pos); // dst_pos + length
2357 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2358 __ jcc(Assembler::above, L_failed);
2360 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2361 // Move with sign extension can be used since they are positive.
2362 __ movslq(src_pos, src_pos);
2363 __ movslq(dst_pos, dst_pos);
2365 BLOCK_COMMENT("arraycopy_range_checks done");
2366 }
2368 //
2369 // Generate generic array copy stubs
2370 //
2371 // Input:
2372 // c_rarg0 - src oop
2373 // c_rarg1 - src_pos (32-bits)
2374 // c_rarg2 - dst oop
2375 // c_rarg3 - dst_pos (32-bits)
2376 // not Win64
2377 // c_rarg4 - element count (32-bits)
2378 // Win64
2379 // rsp+40 - element count (32-bits)
2380 //
2381 // Output:
2382 // rax == 0 - success
2383 // rax == -1^K - failure, where K is partial transfer count
2384 //
2385 address generate_generic_copy(const char *name) {
2387 Label L_failed, L_failed_0, L_objArray;
2388 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs;
2390 // Input registers
2391 const Register src = c_rarg0; // source array oop
2392 const Register src_pos = c_rarg1; // source position
2393 const Register dst = c_rarg2; // destination array oop
2394 const Register dst_pos = c_rarg3; // destination position
2395 // elements count is on stack on Win64
2396 #ifdef _WIN64
2397 #define C_RARG4 Address(rsp, 6 * wordSize)
2398 #else
2399 #define C_RARG4 c_rarg4
2400 #endif
2402 { int modulus = CodeEntryAlignment;
2403 int target = modulus - 5; // 5 = sizeof jmp(L_failed)
2404 int advance = target - (__ offset() % modulus);
2405 if (advance < 0) advance += modulus;
2406 if (advance > 0) __ nop(advance);
2407 }
2408 StubCodeMark mark(this, "StubRoutines", name);
2410 // Short-hop target to L_failed. Makes for denser prologue code.
2411 __ BIND(L_failed_0);
2412 __ jmp(L_failed);
2413 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed");
2415 __ align(CodeEntryAlignment);
2416 address start = __ pc();
2418 __ enter(); // required for proper stackwalking of RuntimeStub frame
2420 // bump this on entry, not on exit:
2421 inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
2423 //-----------------------------------------------------------------------
2424 // Assembler stub will be used for this call to arraycopy
2425 // if the following conditions are met:
2426 //
2427 // (1) src and dst must not be null.
2428 // (2) src_pos must not be negative.
2429 // (3) dst_pos must not be negative.
2430 // (4) length must not be negative.
2431 // (5) src klass and dst klass should be the same and not NULL.
2432 // (6) src and dst should be arrays.
2433 // (7) src_pos + length must not exceed length of src.
2434 // (8) dst_pos + length must not exceed length of dst.
2435 //
2437 // if (src == NULL) return -1;
2438 __ testptr(src, src); // src oop
2439 size_t j1off = __ offset();
2440 __ jccb(Assembler::zero, L_failed_0);
2442 // if (src_pos < 0) return -1;
2443 __ testl(src_pos, src_pos); // src_pos (32-bits)
2444 __ jccb(Assembler::negative, L_failed_0);
2446 // if (dst == NULL) return -1;
2447 __ testptr(dst, dst); // dst oop
2448 __ jccb(Assembler::zero, L_failed_0);
2450 // if (dst_pos < 0) return -1;
2451 __ testl(dst_pos, dst_pos); // dst_pos (32-bits)
2452 size_t j4off = __ offset();
2453 __ jccb(Assembler::negative, L_failed_0);
2455 // The first four tests are very dense code,
2456 // but not quite dense enough to put four
2457 // jumps in a 16-byte instruction fetch buffer.
2458 // That's good, because some branch predicters
2459 // do not like jumps so close together.
2460 // Make sure of this.
2461 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps");
2463 // registers used as temp
2464 const Register r11_length = r11; // elements count to copy
2465 const Register r10_src_klass = r10; // array klass
2466 const Register r9_dst_klass = r9; // dest array klass
2468 // if (length < 0) return -1;
2469 __ movl(r11_length, C_RARG4); // length (elements count, 32-bits value)
2470 __ testl(r11_length, r11_length);
2471 __ jccb(Assembler::negative, L_failed_0);
2473 __ load_klass(r10_src_klass, src);
2474 #ifdef ASSERT
2475 // assert(src->klass() != NULL);
2476 BLOCK_COMMENT("assert klasses not null");
2477 { Label L1, L2;
2478 __ testptr(r10_src_klass, r10_src_klass);
2479 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
2480 __ bind(L1);
2481 __ stop("broken null klass");
2482 __ bind(L2);
2483 __ load_klass(r9_dst_klass, dst);
2484 __ cmpq(r9_dst_klass, 0);
2485 __ jcc(Assembler::equal, L1); // this would be broken also
2486 BLOCK_COMMENT("assert done");
2487 }
2488 #endif
2490 // Load layout helper (32-bits)
2491 //
2492 // |array_tag| | header_size | element_type | |log2_element_size|
2493 // 32 30 24 16 8 2 0
2494 //
2495 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2496 //
2498 int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2499 Klass::layout_helper_offset_in_bytes();
2501 const Register rax_lh = rax; // layout helper
2503 __ movl(rax_lh, Address(r10_src_klass, lh_offset));
2505 // Handle objArrays completely differently...
2506 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2507 __ cmpl(rax_lh, objArray_lh);
2508 __ jcc(Assembler::equal, L_objArray);
2510 // if (src->klass() != dst->klass()) return -1;
2511 __ load_klass(r9_dst_klass, dst);
2512 __ cmpq(r10_src_klass, r9_dst_klass);
2513 __ jcc(Assembler::notEqual, L_failed);
2515 // if (!src->is_Array()) return -1;
2516 __ cmpl(rax_lh, Klass::_lh_neutral_value);
2517 __ jcc(Assembler::greaterEqual, L_failed);
2519 // At this point, it is known to be a typeArray (array_tag 0x3).
2520 #ifdef ASSERT
2521 { Label L;
2522 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
2523 __ jcc(Assembler::greaterEqual, L);
2524 __ stop("must be a primitive array");
2525 __ bind(L);
2526 }
2527 #endif
2529 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2530 r10, L_failed);
2532 // typeArrayKlass
2533 //
2534 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2535 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2536 //
2538 const Register r10_offset = r10; // array offset
2539 const Register rax_elsize = rax_lh; // element size
2541 __ movl(r10_offset, rax_lh);
2542 __ shrl(r10_offset, Klass::_lh_header_size_shift);
2543 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset
2544 __ addptr(src, r10_offset); // src array offset
2545 __ addptr(dst, r10_offset); // dst array offset
2546 BLOCK_COMMENT("choose copy loop based on element size");
2547 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
2549 // next registers should be set before the jump to corresponding stub
2550 const Register from = c_rarg0; // source array address
2551 const Register to = c_rarg1; // destination array address
2552 const Register count = c_rarg2; // elements count
2554 // 'from', 'to', 'count' registers should be set in such order
2555 // since they are the same as 'src', 'src_pos', 'dst'.
2557 __ BIND(L_copy_bytes);
2558 __ cmpl(rax_elsize, 0);
2559 __ jccb(Assembler::notEqual, L_copy_shorts);
2560 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr
2561 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr
2562 __ movl2ptr(count, r11_length); // length
2563 __ jump(RuntimeAddress(byte_copy_entry));
2565 __ BIND(L_copy_shorts);
2566 __ cmpl(rax_elsize, LogBytesPerShort);
2567 __ jccb(Assembler::notEqual, L_copy_ints);
2568 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr
2569 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr
2570 __ movl2ptr(count, r11_length); // length
2571 __ jump(RuntimeAddress(short_copy_entry));
2573 __ BIND(L_copy_ints);
2574 __ cmpl(rax_elsize, LogBytesPerInt);
2575 __ jccb(Assembler::notEqual, L_copy_longs);
2576 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr
2577 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr
2578 __ movl2ptr(count, r11_length); // length
2579 __ jump(RuntimeAddress(int_copy_entry));
2581 __ BIND(L_copy_longs);
2582 #ifdef ASSERT
2583 { Label L;
2584 __ cmpl(rax_elsize, LogBytesPerLong);
2585 __ jcc(Assembler::equal, L);
2586 __ stop("must be long copy, but elsize is wrong");
2587 __ bind(L);
2588 }
2589 #endif
2590 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr
2591 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr
2592 __ movl2ptr(count, r11_length); // length
2593 __ jump(RuntimeAddress(long_copy_entry));
2595 // objArrayKlass
2596 __ BIND(L_objArray);
2597 // live at this point: r10_src_klass, src[_pos], dst[_pos]
2599 Label L_plain_copy, L_checkcast_copy;
2600 // test array classes for subtyping
2601 __ load_klass(r9_dst_klass, dst);
2602 __ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality
2603 __ jcc(Assembler::notEqual, L_checkcast_copy);
2605 // Identically typed arrays can be copied without element-wise checks.
2606 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2607 r10, L_failed);
2609 __ lea(from, Address(src, src_pos, TIMES_OOP,
2610 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
2611 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2612 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
2613 __ movl2ptr(count, r11_length); // length
2614 __ BIND(L_plain_copy);
2615 __ jump(RuntimeAddress(oop_copy_entry));
2617 __ BIND(L_checkcast_copy);
2618 // live at this point: r10_src_klass, !r11_length
2619 {
2620 // assert(r11_length == C_RARG4); // will reload from here
2621 Register r11_dst_klass = r11;
2622 __ load_klass(r11_dst_klass, dst);
2624 // Before looking at dst.length, make sure dst is also an objArray.
2625 __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh);
2626 __ jcc(Assembler::notEqual, L_failed);
2628 // It is safe to examine both src.length and dst.length.
2629 #ifndef _WIN64
2630 arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4,
2631 rax, L_failed);
2632 #else
2633 __ movl(r11_length, C_RARG4); // reload
2634 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2635 rax, L_failed);
2636 __ load_klass(r11_dst_klass, dst); // reload
2637 #endif
2639 // Marshal the base address arguments now, freeing registers.
2640 __ lea(from, Address(src, src_pos, TIMES_OOP,
2641 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2642 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2643 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2644 __ movl(count, C_RARG4); // length (reloaded)
2645 Register sco_temp = c_rarg3; // this register is free now
2646 assert_different_registers(from, to, count, sco_temp,
2647 r11_dst_klass, r10_src_klass);
2648 assert_clean_int(count, sco_temp);
2650 // Generate the type check.
2651 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2652 Klass::super_check_offset_offset_in_bytes());
2653 __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2654 assert_clean_int(sco_temp, rax);
2655 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
2657 // Fetch destination element klass from the objArrayKlass header.
2658 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
2659 objArrayKlass::element_klass_offset_in_bytes());
2660 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
2661 __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2662 assert_clean_int(sco_temp, rax);
2664 // the checkcast_copy loop needs two extra arguments:
2665 assert(c_rarg3 == sco_temp, "#3 already in place");
2666 __ movptr(C_RARG4, r11_dst_klass); // dst.klass.element_klass
2667 __ jump(RuntimeAddress(checkcast_copy_entry));
2668 }
2670 __ BIND(L_failed);
2671 __ xorptr(rax, rax);
2672 __ notptr(rax); // return -1
2673 __ leave(); // required for proper stackwalking of RuntimeStub frame
2674 __ ret(0);
2676 return start;
2677 }
2679 #undef length_arg
2681 void generate_arraycopy_stubs() {
2682 // Call the conjoint generation methods immediately after
2683 // the disjoint ones so that short branches from the former
2684 // to the latter can be generated.
2685 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
2686 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
2688 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
2689 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
2691 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy");
2692 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy");
2694 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy");
2695 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy");
2698 if (UseCompressedOops) {
2699 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy");
2700 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy");
2701 } else {
2702 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
2703 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
2704 }
2706 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
2707 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy");
2708 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy");
2710 // We don't generate specialized code for HeapWord-aligned source
2711 // arrays, so just use the code we've already generated
2712 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy;
2713 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy;
2715 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy;
2716 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy;
2718 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy;
2719 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2721 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy;
2722 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2724 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy;
2725 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2726 }
2728 void generate_math_stubs() {
2729 {
2730 StubCodeMark mark(this, "StubRoutines", "log");
2731 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
2733 __ subq(rsp, 8);
2734 __ movdbl(Address(rsp, 0), xmm0);
2735 __ fld_d(Address(rsp, 0));
2736 __ flog();
2737 __ fstp_d(Address(rsp, 0));
2738 __ movdbl(xmm0, Address(rsp, 0));
2739 __ addq(rsp, 8);
2740 __ ret(0);
2741 }
2742 {
2743 StubCodeMark mark(this, "StubRoutines", "log10");
2744 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
2746 __ subq(rsp, 8);
2747 __ movdbl(Address(rsp, 0), xmm0);
2748 __ fld_d(Address(rsp, 0));
2749 __ flog10();
2750 __ fstp_d(Address(rsp, 0));
2751 __ movdbl(xmm0, Address(rsp, 0));
2752 __ addq(rsp, 8);
2753 __ ret(0);
2754 }
2755 {
2756 StubCodeMark mark(this, "StubRoutines", "sin");
2757 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
2759 __ subq(rsp, 8);
2760 __ movdbl(Address(rsp, 0), xmm0);
2761 __ fld_d(Address(rsp, 0));
2762 __ trigfunc('s');
2763 __ fstp_d(Address(rsp, 0));
2764 __ movdbl(xmm0, Address(rsp, 0));
2765 __ addq(rsp, 8);
2766 __ ret(0);
2767 }
2768 {
2769 StubCodeMark mark(this, "StubRoutines", "cos");
2770 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
2772 __ subq(rsp, 8);
2773 __ movdbl(Address(rsp, 0), xmm0);
2774 __ fld_d(Address(rsp, 0));
2775 __ trigfunc('c');
2776 __ fstp_d(Address(rsp, 0));
2777 __ movdbl(xmm0, Address(rsp, 0));
2778 __ addq(rsp, 8);
2779 __ ret(0);
2780 }
2781 {
2782 StubCodeMark mark(this, "StubRoutines", "tan");
2783 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
2785 __ subq(rsp, 8);
2786 __ movdbl(Address(rsp, 0), xmm0);
2787 __ fld_d(Address(rsp, 0));
2788 __ trigfunc('t');
2789 __ fstp_d(Address(rsp, 0));
2790 __ movdbl(xmm0, Address(rsp, 0));
2791 __ addq(rsp, 8);
2792 __ ret(0);
2793 }
2795 // The intrinsic version of these seem to return the same value as
2796 // the strict version.
2797 StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
2798 StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
2799 }
2801 #undef __
2802 #define __ masm->
2804 // Continuation point for throwing of implicit exceptions that are
2805 // not handled in the current activation. Fabricates an exception
2806 // oop and initiates normal exception dispatching in this
2807 // frame. Since we need to preserve callee-saved values (currently
2808 // only for C2, but done for C1 as well) we need a callee-saved oop
2809 // map and therefore have to make these stubs into RuntimeStubs
2810 // rather than BufferBlobs. If the compiler needs all registers to
2811 // be preserved between the fault point and the exception handler
2812 // then it must assume responsibility for that in
2813 // AbstractCompiler::continuation_for_implicit_null_exception or
2814 // continuation_for_implicit_division_by_zero_exception. All other
2815 // implicit exceptions (e.g., NullPointerException or
2816 // AbstractMethodError on entry) are either at call sites or
2817 // otherwise assume that stack unwinding will be initiated, so
2818 // caller saved registers were assumed volatile in the compiler.
2819 address generate_throw_exception(const char* name,
2820 address runtime_entry,
2821 bool restore_saved_exception_pc) {
2822 // Information about frame layout at time of blocking runtime call.
2823 // Note that we only have to preserve callee-saved registers since
2824 // the compilers are responsible for supplying a continuation point
2825 // if they expect all registers to be preserved.
2826 enum layout {
2827 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
2828 rbp_off2,
2829 return_off,
2830 return_off2,
2831 framesize // inclusive of return address
2832 };
2834 int insts_size = 512;
2835 int locs_size = 64;
2837 CodeBuffer code(name, insts_size, locs_size);
2838 OopMapSet* oop_maps = new OopMapSet();
2839 MacroAssembler* masm = new MacroAssembler(&code);
2841 address start = __ pc();
2843 // This is an inlined and slightly modified version of call_VM
2844 // which has the ability to fetch the return PC out of
2845 // thread-local storage and also sets up last_Java_sp slightly
2846 // differently than the real call_VM
2847 if (restore_saved_exception_pc) {
2848 __ movptr(rax,
2849 Address(r15_thread,
2850 in_bytes(JavaThread::saved_exception_pc_offset())));
2851 __ push(rax);
2852 }
2854 __ enter(); // required for proper stackwalking of RuntimeStub frame
2856 assert(is_even(framesize/2), "sp not 16-byte aligned");
2858 // return address and rbp are already in place
2859 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
2861 int frame_complete = __ pc() - start;
2863 // Set up last_Java_sp and last_Java_fp
2864 __ set_last_Java_frame(rsp, rbp, NULL);
2866 // Call runtime
2867 __ movptr(c_rarg0, r15_thread);
2868 BLOCK_COMMENT("call runtime_entry");
2869 __ call(RuntimeAddress(runtime_entry));
2871 // Generate oop map
2872 OopMap* map = new OopMap(framesize, 0);
2874 oop_maps->add_gc_map(__ pc() - start, map);
2876 __ reset_last_Java_frame(true, false);
2878 __ leave(); // required for proper stackwalking of RuntimeStub frame
2880 // check for pending exceptions
2881 #ifdef ASSERT
2882 Label L;
2883 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()),
2884 (int32_t) NULL_WORD);
2885 __ jcc(Assembler::notEqual, L);
2886 __ should_not_reach_here();
2887 __ bind(L);
2888 #endif // ASSERT
2889 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2892 // codeBlob framesize is in words (not VMRegImpl::slot_size)
2893 RuntimeStub* stub =
2894 RuntimeStub::new_runtime_stub(name,
2895 &code,
2896 frame_complete,
2897 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2898 oop_maps, false);
2899 return stub->entry_point();
2900 }
2902 // Initialization
2903 void generate_initial() {
2904 // Generates all stubs and initializes the entry points
2906 // This platform-specific stub is needed by generate_call_stub()
2907 StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80);
2909 // entry points that exist in all platforms Note: This is code
2910 // that could be shared among different platforms - however the
2911 // benefit seems to be smaller than the disadvantage of having a
2912 // much more complicated generator structure. See also comment in
2913 // stubRoutines.hpp.
2915 StubRoutines::_forward_exception_entry = generate_forward_exception();
2917 StubRoutines::_call_stub_entry =
2918 generate_call_stub(StubRoutines::_call_stub_return_address);
2920 // is referenced by megamorphic call
2921 StubRoutines::_catch_exception_entry = generate_catch_exception();
2923 // atomic calls
2924 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
2925 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr();
2926 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
2927 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
2928 StubRoutines::_atomic_add_entry = generate_atomic_add();
2929 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
2930 StubRoutines::_fence_entry = generate_orderaccess_fence();
2932 StubRoutines::_handler_for_unsafe_access_entry =
2933 generate_handler_for_unsafe_access();
2935 // platform dependent
2936 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
2938 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
2939 }
2941 void generate_all() {
2942 // Generates all stubs and initializes the entry points
2944 // These entry points require SharedInfo::stack0 to be set up in
2945 // non-core builds and need to be relocatable, so they each
2946 // fabricate a RuntimeStub internally.
2947 StubRoutines::_throw_AbstractMethodError_entry =
2948 generate_throw_exception("AbstractMethodError throw_exception",
2949 CAST_FROM_FN_PTR(address,
2950 SharedRuntime::
2951 throw_AbstractMethodError),
2952 false);
2954 StubRoutines::_throw_IncompatibleClassChangeError_entry =
2955 generate_throw_exception("IncompatibleClassChangeError throw_exception",
2956 CAST_FROM_FN_PTR(address,
2957 SharedRuntime::
2958 throw_IncompatibleClassChangeError),
2959 false);
2961 StubRoutines::_throw_ArithmeticException_entry =
2962 generate_throw_exception("ArithmeticException throw_exception",
2963 CAST_FROM_FN_PTR(address,
2964 SharedRuntime::
2965 throw_ArithmeticException),
2966 true);
2968 StubRoutines::_throw_NullPointerException_entry =
2969 generate_throw_exception("NullPointerException throw_exception",
2970 CAST_FROM_FN_PTR(address,
2971 SharedRuntime::
2972 throw_NullPointerException),
2973 true);
2975 StubRoutines::_throw_NullPointerException_at_call_entry =
2976 generate_throw_exception("NullPointerException at call throw_exception",
2977 CAST_FROM_FN_PTR(address,
2978 SharedRuntime::
2979 throw_NullPointerException_at_call),
2980 false);
2982 StubRoutines::_throw_StackOverflowError_entry =
2983 generate_throw_exception("StackOverflowError throw_exception",
2984 CAST_FROM_FN_PTR(address,
2985 SharedRuntime::
2986 throw_StackOverflowError),
2987 false);
2989 // entry points that are platform specific
2990 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
2991 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
2992 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
2993 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
2995 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
2996 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
2997 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
2998 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
3000 // support for verify_oop (must happen after universe_init)
3001 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
3003 // arraycopy stubs used by compilers
3004 generate_arraycopy_stubs();
3006 generate_math_stubs();
3007 }
3009 public:
3010 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3011 if (all) {
3012 generate_all();
3013 } else {
3014 generate_initial();
3015 }
3016 }
3017 }; // end class declaration
3019 address StubGenerator::disjoint_byte_copy_entry = NULL;
3020 address StubGenerator::disjoint_short_copy_entry = NULL;
3021 address StubGenerator::disjoint_int_copy_entry = NULL;
3022 address StubGenerator::disjoint_long_copy_entry = NULL;
3023 address StubGenerator::disjoint_oop_copy_entry = NULL;
3025 address StubGenerator::byte_copy_entry = NULL;
3026 address StubGenerator::short_copy_entry = NULL;
3027 address StubGenerator::int_copy_entry = NULL;
3028 address StubGenerator::long_copy_entry = NULL;
3029 address StubGenerator::oop_copy_entry = NULL;
3031 address StubGenerator::checkcast_copy_entry = NULL;
3033 void StubGenerator_generate(CodeBuffer* code, bool all) {
3034 StubGenerator g(code, all);
3035 }