src/cpu/x86/vm/stubGenerator_x86_64.cpp

changeset 739
dc7f315e41f7
parent 684
910a4cb98e9e
child 797
f8199438385b
equal deleted inserted replaced
738:fa4d1d240383 739:dc7f315e41f7
29 // For a more detailed description of the stub routine structure 29 // For a more detailed description of the stub routine structure
30 // see the comment in stubRoutines.hpp 30 // see the comment in stubRoutines.hpp
31 31
32 #define __ _masm-> 32 #define __ _masm->
33 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 33 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
34 #define a__ ((Assembler*)_masm)->
34 35
35 #ifdef PRODUCT 36 #ifdef PRODUCT
36 #define BLOCK_COMMENT(str) /* nothing */ 37 #define BLOCK_COMMENT(str) /* nothing */
37 #else 38 #else
38 #define BLOCK_COMMENT(str) __ block_comment(str) 39 #define BLOCK_COMMENT(str) __ block_comment(str)
208 const Address r12_save(rbp, r12_off * wordSize); 209 const Address r12_save(rbp, r12_off * wordSize);
209 const Address rbx_save(rbp, rbx_off * wordSize); 210 const Address rbx_save(rbp, rbx_off * wordSize);
210 211
211 // stub code 212 // stub code
212 __ enter(); 213 __ enter();
213 __ subq(rsp, -rsp_after_call_off * wordSize); 214 __ subptr(rsp, -rsp_after_call_off * wordSize);
214 215
215 // save register parameters 216 // save register parameters
216 #ifndef _WIN64 217 #ifndef _WIN64
217 __ movq(parameters, c_rarg5); // parameters 218 __ movptr(parameters, c_rarg5); // parameters
218 __ movq(entry_point, c_rarg4); // entry_point 219 __ movptr(entry_point, c_rarg4); // entry_point
219 #endif 220 #endif
220 221
221 __ movq(method, c_rarg3); // method 222 __ movptr(method, c_rarg3); // method
222 __ movl(result_type, c_rarg2); // result type 223 __ movl(result_type, c_rarg2); // result type
223 __ movq(result, c_rarg1); // result 224 __ movptr(result, c_rarg1); // result
224 __ movq(call_wrapper, c_rarg0); // call wrapper 225 __ movptr(call_wrapper, c_rarg0); // call wrapper
225 226
226 // save regs belonging to calling function 227 // save regs belonging to calling function
227 __ movq(rbx_save, rbx); 228 __ movptr(rbx_save, rbx);
228 __ movq(r12_save, r12); 229 __ movptr(r12_save, r12);
229 __ movq(r13_save, r13); 230 __ movptr(r13_save, r13);
230 __ movq(r14_save, r14); 231 __ movptr(r14_save, r14);
231 __ movq(r15_save, r15); 232 __ movptr(r15_save, r15);
232 233
233 #ifdef _WIN64 234 #ifdef _WIN64
234 const Address rdi_save(rbp, rdi_off * wordSize); 235 const Address rdi_save(rbp, rdi_off * wordSize);
235 const Address rsi_save(rbp, rsi_off * wordSize); 236 const Address rsi_save(rbp, rsi_off * wordSize);
236 237
237 __ movq(rsi_save, rsi); 238 __ movptr(rsi_save, rsi);
238 __ movq(rdi_save, rdi); 239 __ movptr(rdi_save, rdi);
239 #else 240 #else
240 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 241 const Address mxcsr_save(rbp, mxcsr_off * wordSize);
241 { 242 {
242 Label skip_ldmx; 243 Label skip_ldmx;
243 __ stmxcsr(mxcsr_save); 244 __ stmxcsr(mxcsr_save);
244 __ movl(rax, mxcsr_save); 245 __ movl(rax, mxcsr_save);
245 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 246 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
246 ExternalAddress mxcsr_std(StubRoutines::amd64::mxcsr_std()); 247 ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std());
247 __ cmp32(rax, mxcsr_std); 248 __ cmp32(rax, mxcsr_std);
248 __ jcc(Assembler::equal, skip_ldmx); 249 __ jcc(Assembler::equal, skip_ldmx);
249 __ ldmxcsr(mxcsr_std); 250 __ ldmxcsr(mxcsr_std);
250 __ bind(skip_ldmx); 251 __ bind(skip_ldmx);
251 } 252 }
252 #endif 253 #endif
253 254
254 // Load up thread register 255 // Load up thread register
255 __ movq(r15_thread, thread); 256 __ movptr(r15_thread, thread);
256 __ reinit_heapbase(); 257 __ reinit_heapbase();
257 258
258 #ifdef ASSERT 259 #ifdef ASSERT
259 // make sure we have no pending exceptions 260 // make sure we have no pending exceptions
260 { 261 {
261 Label L; 262 Label L;
262 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD); 263 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
263 __ jcc(Assembler::equal, L); 264 __ jcc(Assembler::equal, L);
264 __ stop("StubRoutines::call_stub: entered with pending exception"); 265 __ stop("StubRoutines::call_stub: entered with pending exception");
265 __ bind(L); 266 __ bind(L);
266 } 267 }
267 #endif 268 #endif
272 __ movl(c_rarg3, parameter_size); 273 __ movl(c_rarg3, parameter_size);
273 __ testl(c_rarg3, c_rarg3); 274 __ testl(c_rarg3, c_rarg3);
274 __ jcc(Assembler::zero, parameters_done); 275 __ jcc(Assembler::zero, parameters_done);
275 276
276 Label loop; 277 Label loop;
277 __ movq(c_rarg2, parameters); // parameter pointer 278 __ movptr(c_rarg2, parameters); // parameter pointer
278 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 279 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
279 __ BIND(loop); 280 __ BIND(loop);
280 if (TaggedStackInterpreter) { 281 if (TaggedStackInterpreter) {
281 __ movq(rax, Address(c_rarg2, 0)); // get tag 282 __ movl(rax, Address(c_rarg2, 0)); // get tag
282 __ addq(c_rarg2, wordSize); // advance to next tag 283 __ addptr(c_rarg2, wordSize); // advance to next tag
283 __ pushq(rax); // pass tag 284 __ push(rax); // pass tag
284 } 285 }
285 __ movq(rax, Address(c_rarg2, 0)); // get parameter 286 __ movptr(rax, Address(c_rarg2, 0));// get parameter
286 __ addq(c_rarg2, wordSize); // advance to next parameter 287 __ addptr(c_rarg2, wordSize); // advance to next parameter
287 __ decrementl(c_rarg1); // decrement counter 288 __ decrementl(c_rarg1); // decrement counter
288 __ pushq(rax); // pass parameter 289 __ push(rax); // pass parameter
289 __ jcc(Assembler::notZero, loop); 290 __ jcc(Assembler::notZero, loop);
290 291
291 // call Java function 292 // call Java function
292 __ BIND(parameters_done); 293 __ BIND(parameters_done);
293 __ movq(rbx, method); // get methodOop 294 __ movptr(rbx, method); // get methodOop
294 __ movq(c_rarg1, entry_point); // get entry_point 295 __ movptr(c_rarg1, entry_point); // get entry_point
295 __ movq(r13, rsp); // set sender sp 296 __ mov(r13, rsp); // set sender sp
296 BLOCK_COMMENT("call Java function"); 297 BLOCK_COMMENT("call Java function");
297 __ call(c_rarg1); 298 __ call(c_rarg1);
298 299
299 BLOCK_COMMENT("call_stub_return_address:"); 300 BLOCK_COMMENT("call_stub_return_address:");
300 return_address = __ pc(); 301 return_address = __ pc();
301 302
302 // store result depending on type (everything that is not 303 // store result depending on type (everything that is not
303 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 304 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
304 __ movq(c_rarg0, result); 305 __ movptr(c_rarg0, result);
305 Label is_long, is_float, is_double, exit; 306 Label is_long, is_float, is_double, exit;
306 __ movl(c_rarg1, result_type); 307 __ movl(c_rarg1, result_type);
307 __ cmpl(c_rarg1, T_OBJECT); 308 __ cmpl(c_rarg1, T_OBJECT);
308 __ jcc(Assembler::equal, is_long); 309 __ jcc(Assembler::equal, is_long);
309 __ cmpl(c_rarg1, T_LONG); 310 __ cmpl(c_rarg1, T_LONG);
317 __ movl(Address(c_rarg0, 0), rax); 318 __ movl(Address(c_rarg0, 0), rax);
318 319
319 __ BIND(exit); 320 __ BIND(exit);
320 321
321 // pop parameters 322 // pop parameters
322 __ leaq(rsp, rsp_after_call); 323 __ lea(rsp, rsp_after_call);
323 324
324 #ifdef ASSERT 325 #ifdef ASSERT
325 // verify that threads correspond 326 // verify that threads correspond
326 { 327 {
327 Label L, S; 328 Label L, S;
328 __ cmpq(r15_thread, thread); 329 __ cmpptr(r15_thread, thread);
329 __ jcc(Assembler::notEqual, S); 330 __ jcc(Assembler::notEqual, S);
330 __ get_thread(rbx); 331 __ get_thread(rbx);
331 __ cmpq(r15_thread, rbx); 332 __ cmpptr(r15_thread, rbx);
332 __ jcc(Assembler::equal, L); 333 __ jcc(Assembler::equal, L);
333 __ bind(S); 334 __ bind(S);
334 __ jcc(Assembler::equal, L); 335 __ jcc(Assembler::equal, L);
335 __ stop("StubRoutines::call_stub: threads must correspond"); 336 __ stop("StubRoutines::call_stub: threads must correspond");
336 __ bind(L); 337 __ bind(L);
337 } 338 }
338 #endif 339 #endif
339 340
340 // restore regs belonging to calling function 341 // restore regs belonging to calling function
341 __ movq(r15, r15_save); 342 __ movptr(r15, r15_save);
342 __ movq(r14, r14_save); 343 __ movptr(r14, r14_save);
343 __ movq(r13, r13_save); 344 __ movptr(r13, r13_save);
344 __ movq(r12, r12_save); 345 __ movptr(r12, r12_save);
345 __ movq(rbx, rbx_save); 346 __ movptr(rbx, rbx_save);
346 347
347 #ifdef _WIN64 348 #ifdef _WIN64
348 __ movq(rdi, rdi_save); 349 __ movptr(rdi, rdi_save);
349 __ movq(rsi, rsi_save); 350 __ movptr(rsi, rsi_save);
350 #else 351 #else
351 __ ldmxcsr(mxcsr_save); 352 __ ldmxcsr(mxcsr_save);
352 #endif 353 #endif
353 354
354 // restore rsp 355 // restore rsp
355 __ addq(rsp, -rsp_after_call_off * wordSize); 356 __ addptr(rsp, -rsp_after_call_off * wordSize);
356 357
357 // return 358 // return
358 __ popq(rbp); 359 __ pop(rbp);
359 __ ret(0); 360 __ ret(0);
360 361
361 // handle return types different from T_INT 362 // handle return types different from T_INT
362 __ BIND(is_long); 363 __ BIND(is_long);
363 __ movq(Address(c_rarg0, 0), rax); 364 __ movq(Address(c_rarg0, 0), rax);
396 397
397 #ifdef ASSERT 398 #ifdef ASSERT
398 // verify that threads correspond 399 // verify that threads correspond
399 { 400 {
400 Label L, S; 401 Label L, S;
401 __ cmpq(r15_thread, thread); 402 __ cmpptr(r15_thread, thread);
402 __ jcc(Assembler::notEqual, S); 403 __ jcc(Assembler::notEqual, S);
403 __ get_thread(rbx); 404 __ get_thread(rbx);
404 __ cmpq(r15_thread, rbx); 405 __ cmpptr(r15_thread, rbx);
405 __ jcc(Assembler::equal, L); 406 __ jcc(Assembler::equal, L);
406 __ bind(S); 407 __ bind(S);
407 __ stop("StubRoutines::catch_exception: threads must correspond"); 408 __ stop("StubRoutines::catch_exception: threads must correspond");
408 __ bind(L); 409 __ bind(L);
409 } 410 }
410 #endif 411 #endif
411 412
412 // set pending exception 413 // set pending exception
413 __ verify_oop(rax); 414 __ verify_oop(rax);
414 415
415 __ movq(Address(r15_thread, Thread::pending_exception_offset()), rax); 416 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
416 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 417 __ lea(rscratch1, ExternalAddress((address)__FILE__));
417 __ movq(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 418 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
418 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 419 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
419 420
420 // complete return to VM 421 // complete return to VM
421 assert(StubRoutines::_call_stub_return_address != NULL, 422 assert(StubRoutines::_call_stub_return_address != NULL,
422 "_call_stub_return_address must have been generated before"); 423 "_call_stub_return_address must have been generated before");
451 452
452 #ifdef ASSERT 453 #ifdef ASSERT
453 // make sure this code is only executed if there is a pending exception 454 // make sure this code is only executed if there is a pending exception
454 { 455 {
455 Label L; 456 Label L;
456 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL); 457 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL);
457 __ jcc(Assembler::notEqual, L); 458 __ jcc(Assembler::notEqual, L);
458 __ stop("StubRoutines::forward exception: no pending exception (1)"); 459 __ stop("StubRoutines::forward exception: no pending exception (1)");
459 __ bind(L); 460 __ bind(L);
460 } 461 }
461 #endif 462 #endif
462 463
463 // compute exception handler into rbx 464 // compute exception handler into rbx
464 __ movq(c_rarg0, Address(rsp, 0)); 465 __ movptr(c_rarg0, Address(rsp, 0));
465 BLOCK_COMMENT("call exception_handler_for_return_address"); 466 BLOCK_COMMENT("call exception_handler_for_return_address");
466 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 467 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
467 SharedRuntime::exception_handler_for_return_address), 468 SharedRuntime::exception_handler_for_return_address),
468 c_rarg0); 469 c_rarg0);
469 __ movq(rbx, rax); 470 __ mov(rbx, rax);
470 471
471 // setup rax & rdx, remove return address & clear pending exception 472 // setup rax & rdx, remove return address & clear pending exception
472 __ popq(rdx); 473 __ pop(rdx);
473 __ movq(rax, Address(r15_thread, Thread::pending_exception_offset())); 474 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
474 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD); 475 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
475 476
476 #ifdef ASSERT 477 #ifdef ASSERT
477 // make sure exception is set 478 // make sure exception is set
478 { 479 {
479 Label L; 480 Label L;
480 __ testq(rax, rax); 481 __ testptr(rax, rax);
481 __ jcc(Assembler::notEqual, L); 482 __ jcc(Assembler::notEqual, L);
482 __ stop("StubRoutines::forward exception: no pending exception (2)"); 483 __ stop("StubRoutines::forward exception: no pending exception (2)");
483 __ bind(L); 484 __ bind(L);
484 } 485 }
485 #endif 486 #endif
523 // *dest <- ex, return (orig *dest) 524 // *dest <- ex, return (orig *dest)
524 address generate_atomic_xchg_ptr() { 525 address generate_atomic_xchg_ptr() {
525 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 526 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
526 address start = __ pc(); 527 address start = __ pc();
527 528
528 __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow 529 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
529 __ xchgq(rax, Address(c_rarg1, 0)); // automatic LOCK 530 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
530 __ ret(0); 531 __ ret(0);
531 532
532 return start; 533 return start;
533 } 534 }
534 535
617 // return *dest; 618 // return *dest;
618 address generate_atomic_add_ptr() { 619 address generate_atomic_add_ptr() {
619 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 620 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
620 address start = __ pc(); 621 address start = __ pc();
621 622
622 __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow 623 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
623 if ( os::is_MP() ) __ lock(); 624 if ( os::is_MP() ) __ lock();
624 __ xaddl(Address(c_rarg1, 0), c_rarg0); 625 __ xaddptr(Address(c_rarg1, 0), c_rarg0);
625 __ addl(rax, c_rarg0); 626 __ addptr(rax, c_rarg0);
626 __ ret(0); 627 __ ret(0);
627 628
628 return start; 629 return start;
629 } 630 }
630 631
653 const Address old_fp(rbp, 0); 654 const Address old_fp(rbp, 0);
654 const Address older_fp(rax, 0); 655 const Address older_fp(rax, 0);
655 address start = __ pc(); 656 address start = __ pc();
656 657
657 __ enter(); 658 __ enter();
658 __ movq(rax, old_fp); // callers fp 659 __ movptr(rax, old_fp); // callers fp
659 __ movq(rax, older_fp); // the frame for ps() 660 __ movptr(rax, older_fp); // the frame for ps()
660 __ popq(rbp); 661 __ pop(rbp);
661 __ ret(0); 662 __ ret(0);
662 663
663 return start; 664 return start;
664 } 665 }
665 666
676 677
677 const Address mxcsr_save(rsp, 0); 678 const Address mxcsr_save(rsp, 0);
678 679
679 if (CheckJNICalls) { 680 if (CheckJNICalls) {
680 Label ok_ret; 681 Label ok_ret;
681 __ pushq(rax); 682 __ push(rax);
682 __ subq(rsp, wordSize); // allocate a temp location 683 __ subptr(rsp, wordSize); // allocate a temp location
683 __ stmxcsr(mxcsr_save); 684 __ stmxcsr(mxcsr_save);
684 __ movl(rax, mxcsr_save); 685 __ movl(rax, mxcsr_save);
685 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 686 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
686 __ cmpl(rax, *(int *)(StubRoutines::amd64::mxcsr_std())); 687 __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std()));
687 __ jcc(Assembler::equal, ok_ret); 688 __ jcc(Assembler::equal, ok_ret);
688 689
689 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 690 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
690 691
691 __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std())); 692 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
692 693
693 __ bind(ok_ret); 694 __ bind(ok_ret);
694 __ addq(rsp, wordSize); 695 __ addptr(rsp, wordSize);
695 __ popq(rax); 696 __ pop(rax);
696 } 697 }
697 698
698 __ ret(0); 699 __ ret(0);
699 700
700 return start; 701 return start;
706 707
707 address start = __ pc(); 708 address start = __ pc();
708 709
709 Label L; 710 Label L;
710 711
711 __ pushq(rax); 712 __ push(rax);
712 __ pushq(c_rarg3); 713 __ push(c_rarg3);
713 __ pushq(c_rarg2); 714 __ push(c_rarg2);
714 __ pushq(c_rarg1); 715 __ push(c_rarg1);
715 716
716 __ movl(rax, 0x7f800000); 717 __ movl(rax, 0x7f800000);
717 __ xorl(c_rarg3, c_rarg3); 718 __ xorl(c_rarg3, c_rarg3);
718 __ movl(c_rarg2, inout); 719 __ movl(c_rarg2, inout);
719 __ movl(c_rarg1, c_rarg2); 720 __ movl(c_rarg1, c_rarg2);
724 __ movl(c_rarg3, 0x80000000); 725 __ movl(c_rarg3, 0x80000000);
725 __ movl(rax, 0x7fffffff); 726 __ movl(rax, 0x7fffffff);
726 __ cmovl(Assembler::positive, c_rarg3, rax); 727 __ cmovl(Assembler::positive, c_rarg3, rax);
727 728
728 __ bind(L); 729 __ bind(L);
729 __ movq(inout, c_rarg3); 730 __ movptr(inout, c_rarg3);
730 731
731 __ popq(c_rarg1); 732 __ pop(c_rarg1);
732 __ popq(c_rarg2); 733 __ pop(c_rarg2);
733 __ popq(c_rarg3); 734 __ pop(c_rarg3);
734 __ popq(rax); 735 __ pop(rax);
735 736
736 __ ret(0); 737 __ ret(0);
737 738
738 return start; 739 return start;
739 } 740 }
743 Address inout(rsp, 5 * wordSize); // return address + 4 saves 744 Address inout(rsp, 5 * wordSize); // return address + 4 saves
744 address start = __ pc(); 745 address start = __ pc();
745 746
746 Label L; 747 Label L;
747 748
748 __ pushq(rax); 749 __ push(rax);
749 __ pushq(c_rarg3); 750 __ push(c_rarg3);
750 __ pushq(c_rarg2); 751 __ push(c_rarg2);
751 __ pushq(c_rarg1); 752 __ push(c_rarg1);
752 753
753 __ movl(rax, 0x7f800000); 754 __ movl(rax, 0x7f800000);
754 __ xorl(c_rarg3, c_rarg3); 755 __ xorl(c_rarg3, c_rarg3);
755 __ movl(c_rarg2, inout); 756 __ movl(c_rarg2, inout);
756 __ movl(c_rarg1, c_rarg2); 757 __ movl(c_rarg1, c_rarg2);
758 __ cmpl(rax, c_rarg1); // NaN? -> 0 759 __ cmpl(rax, c_rarg1); // NaN? -> 0
759 __ jcc(Assembler::negative, L); 760 __ jcc(Assembler::negative, L);
760 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 761 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
761 __ mov64(c_rarg3, 0x8000000000000000); 762 __ mov64(c_rarg3, 0x8000000000000000);
762 __ mov64(rax, 0x7fffffffffffffff); 763 __ mov64(rax, 0x7fffffffffffffff);
763 __ cmovq(Assembler::positive, c_rarg3, rax); 764 __ cmov(Assembler::positive, c_rarg3, rax);
764 765
765 __ bind(L); 766 __ bind(L);
766 __ movq(inout, c_rarg3); 767 __ movptr(inout, c_rarg3);
767 768
768 __ popq(c_rarg1); 769 __ pop(c_rarg1);
769 __ popq(c_rarg2); 770 __ pop(c_rarg2);
770 __ popq(c_rarg3); 771 __ pop(c_rarg3);
771 __ popq(rax); 772 __ pop(rax);
772 773
773 __ ret(0); 774 __ ret(0);
774 775
775 return start; 776 return start;
776 } 777 }
781 782
782 address start = __ pc(); 783 address start = __ pc();
783 784
784 Label L; 785 Label L;
785 786
786 __ pushq(rax); 787 __ push(rax);
787 __ pushq(c_rarg3); 788 __ push(c_rarg3);
788 __ pushq(c_rarg2); 789 __ push(c_rarg2);
789 __ pushq(c_rarg1); 790 __ push(c_rarg1);
790 __ pushq(c_rarg0); 791 __ push(c_rarg0);
791 792
792 __ movl(rax, 0x7ff00000); 793 __ movl(rax, 0x7ff00000);
793 __ movq(c_rarg2, inout); 794 __ movq(c_rarg2, inout);
794 __ movl(c_rarg3, c_rarg2); 795 __ movl(c_rarg3, c_rarg2);
795 __ movq(c_rarg1, c_rarg2); 796 __ mov(c_rarg1, c_rarg2);
796 __ movq(c_rarg0, c_rarg2); 797 __ mov(c_rarg0, c_rarg2);
797 __ negl(c_rarg3); 798 __ negl(c_rarg3);
798 __ shrq(c_rarg1, 0x20); 799 __ shrptr(c_rarg1, 0x20);
799 __ orl(c_rarg3, c_rarg2); 800 __ orl(c_rarg3, c_rarg2);
800 __ andl(c_rarg1, 0x7fffffff); 801 __ andl(c_rarg1, 0x7fffffff);
801 __ xorl(c_rarg2, c_rarg2); 802 __ xorl(c_rarg2, c_rarg2);
802 __ shrl(c_rarg3, 0x1f); 803 __ shrl(c_rarg3, 0x1f);
803 __ orl(c_rarg1, c_rarg3); 804 __ orl(c_rarg1, c_rarg3);
804 __ cmpl(rax, c_rarg1); 805 __ cmpl(rax, c_rarg1);
805 __ jcc(Assembler::negative, L); // NaN -> 0 806 __ jcc(Assembler::negative, L); // NaN -> 0
806 __ testq(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 807 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
807 __ movl(c_rarg2, 0x80000000); 808 __ movl(c_rarg2, 0x80000000);
808 __ movl(rax, 0x7fffffff); 809 __ movl(rax, 0x7fffffff);
809 __ cmovl(Assembler::positive, c_rarg2, rax); 810 __ cmov(Assembler::positive, c_rarg2, rax);
810 811
811 __ bind(L); 812 __ bind(L);
812 __ movq(inout, c_rarg2); 813 __ movptr(inout, c_rarg2);
813 814
814 __ popq(c_rarg0); 815 __ pop(c_rarg0);
815 __ popq(c_rarg1); 816 __ pop(c_rarg1);
816 __ popq(c_rarg2); 817 __ pop(c_rarg2);
817 __ popq(c_rarg3); 818 __ pop(c_rarg3);
818 __ popq(rax); 819 __ pop(rax);
819 820
820 __ ret(0); 821 __ ret(0);
821 822
822 return start; 823 return start;
823 } 824 }
828 829
829 address start = __ pc(); 830 address start = __ pc();
830 831
831 Label L; 832 Label L;
832 833
833 __ pushq(rax); 834 __ push(rax);
834 __ pushq(c_rarg3); 835 __ push(c_rarg3);
835 __ pushq(c_rarg2); 836 __ push(c_rarg2);
836 __ pushq(c_rarg1); 837 __ push(c_rarg1);
837 __ pushq(c_rarg0); 838 __ push(c_rarg0);
838 839
839 __ movl(rax, 0x7ff00000); 840 __ movl(rax, 0x7ff00000);
840 __ movq(c_rarg2, inout); 841 __ movq(c_rarg2, inout);
841 __ movl(c_rarg3, c_rarg2); 842 __ movl(c_rarg3, c_rarg2);
842 __ movq(c_rarg1, c_rarg2); 843 __ mov(c_rarg1, c_rarg2);
843 __ movq(c_rarg0, c_rarg2); 844 __ mov(c_rarg0, c_rarg2);
844 __ negl(c_rarg3); 845 __ negl(c_rarg3);
845 __ shrq(c_rarg1, 0x20); 846 __ shrptr(c_rarg1, 0x20);
846 __ orl(c_rarg3, c_rarg2); 847 __ orl(c_rarg3, c_rarg2);
847 __ andl(c_rarg1, 0x7fffffff); 848 __ andl(c_rarg1, 0x7fffffff);
848 __ xorl(c_rarg2, c_rarg2); 849 __ xorl(c_rarg2, c_rarg2);
849 __ shrl(c_rarg3, 0x1f); 850 __ shrl(c_rarg3, 0x1f);
850 __ orl(c_rarg1, c_rarg3); 851 __ orl(c_rarg1, c_rarg3);
856 __ cmovq(Assembler::positive, c_rarg2, rax); 857 __ cmovq(Assembler::positive, c_rarg2, rax);
857 858
858 __ bind(L); 859 __ bind(L);
859 __ movq(inout, c_rarg2); 860 __ movq(inout, c_rarg2);
860 861
861 __ popq(c_rarg0); 862 __ pop(c_rarg0);
862 __ popq(c_rarg1); 863 __ pop(c_rarg1);
863 __ popq(c_rarg2); 864 __ pop(c_rarg2);
864 __ popq(c_rarg3); 865 __ pop(c_rarg3);
865 __ popq(rax); 866 __ pop(rax);
866 867
867 __ ret(0); 868 __ ret(0);
868 869
869 return start; 870 return start;
870 } 871 }
887 // SIGBUS/OBJERR.) 888 // SIGBUS/OBJERR.)
888 address generate_handler_for_unsafe_access() { 889 address generate_handler_for_unsafe_access() {
889 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 890 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
890 address start = __ pc(); 891 address start = __ pc();
891 892
892 __ pushq(0); // hole for return address-to-be 893 __ push(0); // hole for return address-to-be
893 __ pushaq(); // push registers 894 __ pusha(); // push registers
894 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 895 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
895 896
896 __ subq(rsp, frame::arg_reg_save_area_bytes); 897 __ subptr(rsp, frame::arg_reg_save_area_bytes);
897 BLOCK_COMMENT("call handle_unsafe_access"); 898 BLOCK_COMMENT("call handle_unsafe_access");
898 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 899 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
899 __ addq(rsp, frame::arg_reg_save_area_bytes); 900 __ addptr(rsp, frame::arg_reg_save_area_bytes);
900 901
901 __ movq(next_pc, rax); // stuff next address 902 __ movptr(next_pc, rax); // stuff next address
902 __ popaq(); 903 __ popa();
903 __ ret(0); // jump to next address 904 __ ret(0); // jump to next address
904 905
905 return start; 906 return start;
906 } 907 }
907 908
924 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 925 StubCodeMark mark(this, "StubRoutines", "verify_oop");
925 address start = __ pc(); 926 address start = __ pc();
926 927
927 Label exit, error; 928 Label exit, error;
928 929
929 __ pushfq(); 930 __ pushf();
930 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 931 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
931 932
932 __ pushq(r12); 933 __ push(r12);
933 934
934 // save c_rarg2 and c_rarg3 935 // save c_rarg2 and c_rarg3
935 __ pushq(c_rarg2); 936 __ push(c_rarg2);
936 __ pushq(c_rarg3); 937 __ push(c_rarg3);
937 938
938 enum { 939 enum {
939 // After previous pushes. 940 // After previous pushes.
940 oop_to_verify = 6 * wordSize, 941 oop_to_verify = 6 * wordSize,
941 saved_rax = 7 * wordSize, 942 saved_rax = 7 * wordSize,
944 return_addr = 16 * wordSize, 945 return_addr = 16 * wordSize,
945 error_msg = 17 * wordSize 946 error_msg = 17 * wordSize
946 }; 947 };
947 948
948 // get object 949 // get object
949 __ movq(rax, Address(rsp, oop_to_verify)); 950 __ movptr(rax, Address(rsp, oop_to_verify));
950 951
951 // make sure object is 'reasonable' 952 // make sure object is 'reasonable'
952 __ testq(rax, rax); 953 __ testptr(rax, rax);
953 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 954 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
954 // Check if the oop is in the right area of memory 955 // Check if the oop is in the right area of memory
955 __ movq(c_rarg2, rax); 956 __ movptr(c_rarg2, rax);
956 __ movptr(c_rarg3, (int64_t) Universe::verify_oop_mask()); 957 __ movptr(c_rarg3, (int64_t) Universe::verify_oop_mask());
957 __ andq(c_rarg2, c_rarg3); 958 __ andptr(c_rarg2, c_rarg3);
958 __ movptr(c_rarg3, (int64_t) Universe::verify_oop_bits()); 959 __ movptr(c_rarg3, (int64_t) Universe::verify_oop_bits());
959 __ cmpq(c_rarg2, c_rarg3); 960 __ cmpptr(c_rarg2, c_rarg3);
960 __ jcc(Assembler::notZero, error); 961 __ jcc(Assembler::notZero, error);
961 962
962 // set r12 to heapbase for load_klass() 963 // set r12 to heapbase for load_klass()
963 __ reinit_heapbase(); 964 __ reinit_heapbase();
964 965
965 // make sure klass is 'reasonable' 966 // make sure klass is 'reasonable'
966 __ load_klass(rax, rax); // get klass 967 __ load_klass(rax, rax); // get klass
967 __ testq(rax, rax); 968 __ testptr(rax, rax);
968 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 969 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
969 // Check if the klass is in the right area of memory 970 // Check if the klass is in the right area of memory
970 __ movq(c_rarg2, rax); 971 __ mov(c_rarg2, rax);
971 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask()); 972 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
972 __ andq(c_rarg2, c_rarg3); 973 __ andptr(c_rarg2, c_rarg3);
973 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits()); 974 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
974 __ cmpq(c_rarg2, c_rarg3); 975 __ cmpptr(c_rarg2, c_rarg3);
975 __ jcc(Assembler::notZero, error); 976 __ jcc(Assembler::notZero, error);
976 977
977 // make sure klass' klass is 'reasonable' 978 // make sure klass' klass is 'reasonable'
978 __ load_klass(rax, rax); 979 __ load_klass(rax, rax);
979 __ testq(rax, rax); 980 __ testptr(rax, rax);
980 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken 981 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
981 // Check if the klass' klass is in the right area of memory 982 // Check if the klass' klass is in the right area of memory
982 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask()); 983 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
983 __ andq(rax, c_rarg3); 984 __ andptr(rax, c_rarg3);
984 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits()); 985 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
985 __ cmpq(rax, c_rarg3); 986 __ cmpptr(rax, c_rarg3);
986 __ jcc(Assembler::notZero, error); 987 __ jcc(Assembler::notZero, error);
987 988
988 // return if everything seems ok 989 // return if everything seems ok
989 __ bind(exit); 990 __ bind(exit);
990 __ movq(rax, Address(rsp, saved_rax)); // get saved rax back 991 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
991 __ popq(c_rarg3); // restore c_rarg3 992 __ pop(c_rarg3); // restore c_rarg3
992 __ popq(c_rarg2); // restore c_rarg2 993 __ pop(c_rarg2); // restore c_rarg2
993 __ popq(r12); // restore r12 994 __ pop(r12); // restore r12
994 __ popfq(); // restore flags 995 __ popf(); // restore flags
995 __ ret(3 * wordSize); // pop caller saved stuff 996 __ ret(3 * wordSize); // pop caller saved stuff
996 997
997 // handle errors 998 // handle errors
998 __ bind(error); 999 __ bind(error);
999 __ movq(rax, Address(rsp, saved_rax)); // get saved rax back 1000 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1000 __ popq(c_rarg3); // get saved c_rarg3 back 1001 __ pop(c_rarg3); // get saved c_rarg3 back
1001 __ popq(c_rarg2); // get saved c_rarg2 back 1002 __ pop(c_rarg2); // get saved c_rarg2 back
1002 __ popq(r12); // get saved r12 back 1003 __ pop(r12); // get saved r12 back
1003 __ popfq(); // get saved flags off stack -- 1004 __ popf(); // get saved flags off stack --
1004 // will be ignored 1005 // will be ignored
1005 1006
1006 __ pushaq(); // push registers 1007 __ pusha(); // push registers
1007 // (rip is already 1008 // (rip is already
1008 // already pushed) 1009 // already pushed)
1009 // debug(char* msg, int64_t pc, int64_t regs[]) 1010 // debug(char* msg, int64_t pc, int64_t regs[])
1010 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1011 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
1011 // pushed all the registers, so now the stack looks like: 1012 // pushed all the registers, so now the stack looks like:
1014 // * [tos + 17] error message (char*) 1015 // * [tos + 17] error message (char*)
1015 // * [tos + 18] object to verify (oop) 1016 // * [tos + 18] object to verify (oop)
1016 // * [tos + 19] saved rax - saved by caller and bashed 1017 // * [tos + 19] saved rax - saved by caller and bashed
1017 // * = popped on exit 1018 // * = popped on exit
1018 1019
1019 __ movq(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1020 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
1020 __ movq(c_rarg1, Address(rsp, return_addr)); // pass return address 1021 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address
1021 __ movq(c_rarg2, rsp); // pass address of regs on stack 1022 __ movq(c_rarg2, rsp); // pass address of regs on stack
1022 __ movq(r12, rsp); // remember rsp 1023 __ mov(r12, rsp); // remember rsp
1023 __ subq(rsp, frame::arg_reg_save_area_bytes);// windows 1024 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1024 __ andq(rsp, -16); // align stack as required by ABI 1025 __ andptr(rsp, -16); // align stack as required by ABI
1025 BLOCK_COMMENT("call MacroAssembler::debug"); 1026 BLOCK_COMMENT("call MacroAssembler::debug");
1026 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug))); 1027 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1027 __ movq(rsp, r12); // restore rsp 1028 __ mov(rsp, r12); // restore rsp
1028 __ popaq(); // pop registers (includes r12) 1029 __ popa(); // pop registers (includes r12)
1029 __ ret(3 * wordSize); // pop caller saved stuff 1030 __ ret(3 * wordSize); // pop caller saved stuff
1030 1031
1031 return start; 1032 return start;
1032 } 1033 }
1033 1034
1034 static address disjoint_byte_copy_entry; 1035 static address disjoint_byte_copy_entry;
1086 const Register from = c_rarg0; 1087 const Register from = c_rarg0;
1087 const Register to = c_rarg1; 1088 const Register to = c_rarg1;
1088 const Register count = c_rarg2; 1089 const Register count = c_rarg2;
1089 const Register end_from = rax; 1090 const Register end_from = rax;
1090 1091
1091 __ cmpq(to, from); 1092 __ cmpptr(to, from);
1092 __ leaq(end_from, Address(from, count, sf, 0)); 1093 __ lea(end_from, Address(from, count, sf, 0));
1093 if (NOLp == NULL) { 1094 if (NOLp == NULL) {
1094 ExternalAddress no_overlap(no_overlap_target); 1095 ExternalAddress no_overlap(no_overlap_target);
1095 __ jump_cc(Assembler::belowEqual, no_overlap); 1096 __ jump_cc(Assembler::belowEqual, no_overlap);
1096 __ cmpq(to, end_from); 1097 __ cmpptr(to, end_from);
1097 __ jump_cc(Assembler::aboveEqual, no_overlap); 1098 __ jump_cc(Assembler::aboveEqual, no_overlap);
1098 } else { 1099 } else {
1099 __ jcc(Assembler::belowEqual, (*NOLp)); 1100 __ jcc(Assembler::belowEqual, (*NOLp));
1100 __ cmpq(to, end_from); 1101 __ cmpptr(to, end_from);
1101 __ jcc(Assembler::aboveEqual, (*NOLp)); 1102 __ jcc(Assembler::aboveEqual, (*NOLp));
1102 } 1103 }
1103 } 1104 }
1104 1105
1105 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1106 // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1119 assert(nargs == 3 || nargs == 4, "else fix"); 1120 assert(nargs == 3 || nargs == 4, "else fix");
1120 #ifdef _WIN64 1121 #ifdef _WIN64
1121 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1122 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1122 "unexpected argument registers"); 1123 "unexpected argument registers");
1123 if (nargs >= 4) 1124 if (nargs >= 4)
1124 __ movq(rax, r9); // r9 is also saved_rdi 1125 __ mov(rax, r9); // r9 is also saved_rdi
1125 __ movq(saved_rdi, rdi); 1126 __ movptr(saved_rdi, rdi);
1126 __ movq(saved_rsi, rsi); 1127 __ movptr(saved_rsi, rsi);
1127 __ movq(rdi, rcx); // c_rarg0 1128 __ mov(rdi, rcx); // c_rarg0
1128 __ movq(rsi, rdx); // c_rarg1 1129 __ mov(rsi, rdx); // c_rarg1
1129 __ movq(rdx, r8); // c_rarg2 1130 __ mov(rdx, r8); // c_rarg2
1130 if (nargs >= 4) 1131 if (nargs >= 4)
1131 __ movq(rcx, rax); // c_rarg3 (via rax) 1132 __ mov(rcx, rax); // c_rarg3 (via rax)
1132 #else 1133 #else
1133 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1134 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1134 "unexpected argument registers"); 1135 "unexpected argument registers");
1135 #endif 1136 #endif
1136 } 1137 }
1137 1138
1138 void restore_arg_regs() { 1139 void restore_arg_regs() {
1139 const Register saved_rdi = r9; 1140 const Register saved_rdi = r9;
1140 const Register saved_rsi = r10; 1141 const Register saved_rsi = r10;
1141 #ifdef _WIN64 1142 #ifdef _WIN64
1142 __ movq(rdi, saved_rdi); 1143 __ movptr(rdi, saved_rdi);
1143 __ movq(rsi, saved_rsi); 1144 __ movptr(rsi, saved_rsi);
1144 #endif 1145 #endif
1145 } 1146 }
1146 1147
1147 // Generate code for an array write pre barrier 1148 // Generate code for an array write pre barrier
1148 // 1149 //
1158 BarrierSet* bs = Universe::heap()->barrier_set(); 1159 BarrierSet* bs = Universe::heap()->barrier_set();
1159 switch (bs->kind()) { 1160 switch (bs->kind()) {
1160 case BarrierSet::G1SATBCT: 1161 case BarrierSet::G1SATBCT:
1161 case BarrierSet::G1SATBCTLogging: 1162 case BarrierSet::G1SATBCTLogging:
1162 { 1163 {
1163 __ pushaq(); // push registers 1164 __ pusha(); // push registers
1164 __ movq(c_rarg0, addr); 1165 __ movptr(c_rarg0, addr);
1165 __ movq(c_rarg1, count); 1166 __ movptr(c_rarg1, count);
1166 __ call(RuntimeAddress(BarrierSet::static_write_ref_array_pre)); 1167 __ call(RuntimeAddress(BarrierSet::static_write_ref_array_pre));
1167 __ popaq(); 1168 __ popa();
1168 } 1169 }
1169 break; 1170 break;
1170 case BarrierSet::CardTableModRef: 1171 case BarrierSet::CardTableModRef:
1171 case BarrierSet::CardTableExtension: 1172 case BarrierSet::CardTableExtension:
1172 case BarrierSet::ModRef: 1173 case BarrierSet::ModRef:
1195 #if 0 // G1 - only 1196 #if 0 // G1 - only
1196 case BarrierSet::G1SATBCT: 1197 case BarrierSet::G1SATBCT:
1197 case BarrierSet::G1SATBCTLogging: 1198 case BarrierSet::G1SATBCTLogging:
1198 1199
1199 { 1200 {
1200 __ pushaq(); // push registers (overkill) 1201 __ pusha(); // push registers (overkill)
1201 // must compute element count unless barrier set interface is changed (other platforms supply count) 1202 // must compute element count unless barrier set interface is changed (other platforms supply count)
1202 assert_different_registers(start, end, scratch); 1203 assert_different_registers(start, end, scratch);
1203 __ leaq(scratch, Address(end, wordSize)); 1204 __ lea(scratch, Address(end, wordSize));
1204 __ subq(scratch, start); 1205 __ subptr(scratch, start);
1205 __ shrq(scratch, LogBytesPerWord); 1206 __ shrptr(scratch, LogBytesPerWord);
1206 __ movq(c_rarg0, start); 1207 __ mov(c_rarg0, start);
1207 __ movq(c_rarg1, scratch); 1208 __ mov(c_rarg1, scratch);
1208 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)); 1209 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
1209 __ popaq(); 1210 __ popa();
1210 } 1211 }
1211 break; 1212 break;
1212 #endif // 0 G1 - only 1213 #endif // 0 G1 - only
1213 case BarrierSet::CardTableModRef: 1214 case BarrierSet::CardTableModRef:
1214 case BarrierSet::CardTableExtension: 1215 case BarrierSet::CardTableExtension:
1216 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 1217 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1217 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1218 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1218 1219
1219 Label L_loop; 1220 Label L_loop;
1220 1221
1221 __ shrq(start, CardTableModRefBS::card_shift); 1222 __ shrptr(start, CardTableModRefBS::card_shift);
1222 __ shrq(end, CardTableModRefBS::card_shift); 1223 __ shrptr(end, CardTableModRefBS::card_shift);
1223 __ subq(end, start); // number of bytes to copy 1224 __ subptr(end, start); // number of bytes to copy
1224 1225
1225 intptr_t disp = (intptr_t) ct->byte_map_base; 1226 intptr_t disp = (intptr_t) ct->byte_map_base;
1226 if (__ is_simm32(disp)) { 1227 if (__ is_simm32(disp)) {
1227 Address cardtable(noreg, noreg, Address::no_scale, disp); 1228 Address cardtable(noreg, noreg, Address::no_scale, disp);
1228 __ lea(scratch, cardtable); 1229 __ lea(scratch, cardtable);
1230 ExternalAddress cardtable((address)disp); 1231 ExternalAddress cardtable((address)disp);
1231 __ lea(scratch, cardtable); 1232 __ lea(scratch, cardtable);
1232 } 1233 }
1233 1234
1234 const Register count = end; // 'end' register contains bytes count now 1235 const Register count = end; // 'end' register contains bytes count now
1235 __ addq(start, scratch); 1236 __ addptr(start, scratch);
1236 __ BIND(L_loop); 1237 __ BIND(L_loop);
1237 __ movb(Address(start, count, Address::times_1), 0); 1238 __ movb(Address(start, count, Address::times_1), 0);
1238 __ decrementq(count); 1239 __ decrement(count);
1239 __ jcc(Assembler::greaterEqual, L_loop); 1240 __ jcc(Assembler::greaterEqual, L_loop);
1240 } 1241 }
1241 } 1242 }
1242 } 1243 }
1243 1244
1265 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1266 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
1266 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1267 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
1267 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1268 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
1268 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1269 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
1269 __ BIND(L_copy_32_bytes); 1270 __ BIND(L_copy_32_bytes);
1270 __ addq(qword_count, 4); 1271 __ addptr(qword_count, 4);
1271 __ jcc(Assembler::lessEqual, L_loop); 1272 __ jcc(Assembler::lessEqual, L_loop);
1272 __ subq(qword_count, 4); 1273 __ subptr(qword_count, 4);
1273 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1274 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
1274 } 1275 }
1275 1276
1276 1277
1277 // Copy big chunks backward 1278 // Copy big chunks backward
1298 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1299 __ movq(to, Address(from, qword_count, Address::times_8, 8));
1299 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1300 __ movq(Address(dest, qword_count, Address::times_8, 8), to);
1300 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1301 __ movq(to, Address(from, qword_count, Address::times_8, 0));
1301 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1302 __ movq(Address(dest, qword_count, Address::times_8, 0), to);
1302 __ BIND(L_copy_32_bytes); 1303 __ BIND(L_copy_32_bytes);
1303 __ subq(qword_count, 4); 1304 __ subptr(qword_count, 4);
1304 __ jcc(Assembler::greaterEqual, L_loop); 1305 __ jcc(Assembler::greaterEqual, L_loop);
1305 __ addq(qword_count, 4); 1306 __ addptr(qword_count, 4);
1306 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1307 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
1307 } 1308 }
1308 1309
1309 1310
1310 // Arguments: 1311 // Arguments:
1352 1353
1353 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1354 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1354 // r9 and r10 may be used to save non-volatile registers 1355 // r9 and r10 may be used to save non-volatile registers
1355 1356
1356 // 'from', 'to' and 'count' are now valid 1357 // 'from', 'to' and 'count' are now valid
1357 __ movq(byte_count, count); 1358 __ movptr(byte_count, count);
1358 __ shrq(count, 3); // count => qword_count 1359 __ shrptr(count, 3); // count => qword_count
1359 1360
1360 // Copy from low to high addresses. Use 'to' as scratch. 1361 // Copy from low to high addresses. Use 'to' as scratch.
1361 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); 1362 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1362 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); 1363 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1363 __ negq(qword_count); // make the count negative 1364 __ negptr(qword_count); // make the count negative
1364 __ jmp(L_copy_32_bytes); 1365 __ jmp(L_copy_32_bytes);
1365 1366
1366 // Copy trailing qwords 1367 // Copy trailing qwords
1367 __ BIND(L_copy_8_bytes); 1368 __ BIND(L_copy_8_bytes);
1368 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1369 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1369 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1370 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1370 __ incrementq(qword_count); 1371 __ increment(qword_count);
1371 __ jcc(Assembler::notZero, L_copy_8_bytes); 1372 __ jcc(Assembler::notZero, L_copy_8_bytes);
1372 1373
1373 // Check for and copy trailing dword 1374 // Check for and copy trailing dword
1374 __ BIND(L_copy_4_bytes); 1375 __ BIND(L_copy_4_bytes);
1375 __ testq(byte_count, 4); 1376 __ testl(byte_count, 4);
1376 __ jccb(Assembler::zero, L_copy_2_bytes); 1377 __ jccb(Assembler::zero, L_copy_2_bytes);
1377 __ movl(rax, Address(end_from, 8)); 1378 __ movl(rax, Address(end_from, 8));
1378 __ movl(Address(end_to, 8), rax); 1379 __ movl(Address(end_to, 8), rax);
1379 1380
1380 __ addq(end_from, 4); 1381 __ addptr(end_from, 4);
1381 __ addq(end_to, 4); 1382 __ addptr(end_to, 4);
1382 1383
1383 // Check for and copy trailing word 1384 // Check for and copy trailing word
1384 __ BIND(L_copy_2_bytes); 1385 __ BIND(L_copy_2_bytes);
1385 __ testq(byte_count, 2); 1386 __ testl(byte_count, 2);
1386 __ jccb(Assembler::zero, L_copy_byte); 1387 __ jccb(Assembler::zero, L_copy_byte);
1387 __ movw(rax, Address(end_from, 8)); 1388 __ movw(rax, Address(end_from, 8));
1388 __ movw(Address(end_to, 8), rax); 1389 __ movw(Address(end_to, 8), rax);
1389 1390
1390 __ addq(end_from, 2); 1391 __ addptr(end_from, 2);
1391 __ addq(end_to, 2); 1392 __ addptr(end_to, 2);
1392 1393
1393 // Check for and copy trailing byte 1394 // Check for and copy trailing byte
1394 __ BIND(L_copy_byte); 1395 __ BIND(L_copy_byte);
1395 __ testq(byte_count, 1); 1396 __ testl(byte_count, 1);
1396 __ jccb(Assembler::zero, L_exit); 1397 __ jccb(Assembler::zero, L_exit);
1397 __ movb(rax, Address(end_from, 8)); 1398 __ movb(rax, Address(end_from, 8));
1398 __ movb(Address(end_to, 8), rax); 1399 __ movb(Address(end_to, 8), rax);
1399 1400
1400 __ BIND(L_exit); 1401 __ BIND(L_exit);
1401 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); 1402 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1402 restore_arg_regs(); 1403 restore_arg_regs();
1403 __ xorq(rax, rax); // return 0 1404 __ xorptr(rax, rax); // return 0
1404 __ leave(); // required for proper stackwalking of RuntimeStub frame 1405 __ leave(); // required for proper stackwalking of RuntimeStub frame
1405 __ ret(0); 1406 __ ret(0);
1406 1407
1407 // Copy in 32-bytes chunks 1408 // Copy in 32-bytes chunks
1408 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1409 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1448 array_overlap_test(disjoint_byte_copy_entry, Address::times_1); 1449 array_overlap_test(disjoint_byte_copy_entry, Address::times_1);
1449 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1450 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1450 // r9 and r10 may be used to save non-volatile registers 1451 // r9 and r10 may be used to save non-volatile registers
1451 1452
1452 // 'from', 'to' and 'count' are now valid 1453 // 'from', 'to' and 'count' are now valid
1453 __ movq(byte_count, count); 1454 __ movptr(byte_count, count);
1454 __ shrq(count, 3); // count => qword_count 1455 __ shrptr(count, 3); // count => qword_count
1455 1456
1456 // Copy from high to low addresses. 1457 // Copy from high to low addresses.
1457 1458
1458 // Check for and copy trailing byte 1459 // Check for and copy trailing byte
1459 __ testq(byte_count, 1); 1460 __ testl(byte_count, 1);
1460 __ jcc(Assembler::zero, L_copy_2_bytes); 1461 __ jcc(Assembler::zero, L_copy_2_bytes);
1461 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1462 __ movb(rax, Address(from, byte_count, Address::times_1, -1));
1462 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1463 __ movb(Address(to, byte_count, Address::times_1, -1), rax);
1463 __ decrementq(byte_count); // Adjust for possible trailing word 1464 __ decrement(byte_count); // Adjust for possible trailing word
1464 1465
1465 // Check for and copy trailing word 1466 // Check for and copy trailing word
1466 __ BIND(L_copy_2_bytes); 1467 __ BIND(L_copy_2_bytes);
1467 __ testq(byte_count, 2); 1468 __ testl(byte_count, 2);
1468 __ jcc(Assembler::zero, L_copy_4_bytes); 1469 __ jcc(Assembler::zero, L_copy_4_bytes);
1469 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1470 __ movw(rax, Address(from, byte_count, Address::times_1, -2));
1470 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1471 __ movw(Address(to, byte_count, Address::times_1, -2), rax);
1471 1472
1472 // Check for and copy trailing dword 1473 // Check for and copy trailing dword
1473 __ BIND(L_copy_4_bytes); 1474 __ BIND(L_copy_4_bytes);
1474 __ testq(byte_count, 4); 1475 __ testl(byte_count, 4);
1475 __ jcc(Assembler::zero, L_copy_32_bytes); 1476 __ jcc(Assembler::zero, L_copy_32_bytes);
1476 __ movl(rax, Address(from, qword_count, Address::times_8)); 1477 __ movl(rax, Address(from, qword_count, Address::times_8));
1477 __ movl(Address(to, qword_count, Address::times_8), rax); 1478 __ movl(Address(to, qword_count, Address::times_8), rax);
1478 __ jmp(L_copy_32_bytes); 1479 __ jmp(L_copy_32_bytes);
1479 1480
1480 // Copy trailing qwords 1481 // Copy trailing qwords
1481 __ BIND(L_copy_8_bytes); 1482 __ BIND(L_copy_8_bytes);
1482 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1483 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1483 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1484 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1484 __ decrementq(qword_count); 1485 __ decrement(qword_count);
1485 __ jcc(Assembler::notZero, L_copy_8_bytes); 1486 __ jcc(Assembler::notZero, L_copy_8_bytes);
1486 1487
1487 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); 1488 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1488 restore_arg_regs(); 1489 restore_arg_regs();
1489 __ xorq(rax, rax); // return 0 1490 __ xorptr(rax, rax); // return 0
1490 __ leave(); // required for proper stackwalking of RuntimeStub frame 1491 __ leave(); // required for proper stackwalking of RuntimeStub frame
1491 __ ret(0); 1492 __ ret(0);
1492 1493
1493 // Copy in 32-bytes chunks 1494 // Copy in 32-bytes chunks
1494 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1495 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1495 1496
1496 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); 1497 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1497 restore_arg_regs(); 1498 restore_arg_regs();
1498 __ xorq(rax, rax); // return 0 1499 __ xorptr(rax, rax); // return 0
1499 __ leave(); // required for proper stackwalking of RuntimeStub frame 1500 __ leave(); // required for proper stackwalking of RuntimeStub frame
1500 __ ret(0); 1501 __ ret(0);
1501 1502
1502 return start; 1503 return start;
1503 } 1504 }
1546 1547
1547 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1548 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1548 // r9 and r10 may be used to save non-volatile registers 1549 // r9 and r10 may be used to save non-volatile registers
1549 1550
1550 // 'from', 'to' and 'count' are now valid 1551 // 'from', 'to' and 'count' are now valid
1551 __ movq(word_count, count); 1552 __ movptr(word_count, count);
1552 __ shrq(count, 2); // count => qword_count 1553 __ shrptr(count, 2); // count => qword_count
1553 1554
1554 // Copy from low to high addresses. Use 'to' as scratch. 1555 // Copy from low to high addresses. Use 'to' as scratch.
1555 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); 1556 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1556 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); 1557 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1557 __ negq(qword_count); 1558 __ negptr(qword_count);
1558 __ jmp(L_copy_32_bytes); 1559 __ jmp(L_copy_32_bytes);
1559 1560
1560 // Copy trailing qwords 1561 // Copy trailing qwords
1561 __ BIND(L_copy_8_bytes); 1562 __ BIND(L_copy_8_bytes);
1562 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1563 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1563 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1564 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1564 __ incrementq(qword_count); 1565 __ increment(qword_count);
1565 __ jcc(Assembler::notZero, L_copy_8_bytes); 1566 __ jcc(Assembler::notZero, L_copy_8_bytes);
1566 1567
1567 // Original 'dest' is trashed, so we can't use it as a 1568 // Original 'dest' is trashed, so we can't use it as a
1568 // base register for a possible trailing word copy 1569 // base register for a possible trailing word copy
1569 1570
1570 // Check for and copy trailing dword 1571 // Check for and copy trailing dword
1571 __ BIND(L_copy_4_bytes); 1572 __ BIND(L_copy_4_bytes);
1572 __ testq(word_count, 2); 1573 __ testl(word_count, 2);
1573 __ jccb(Assembler::zero, L_copy_2_bytes); 1574 __ jccb(Assembler::zero, L_copy_2_bytes);
1574 __ movl(rax, Address(end_from, 8)); 1575 __ movl(rax, Address(end_from, 8));
1575 __ movl(Address(end_to, 8), rax); 1576 __ movl(Address(end_to, 8), rax);
1576 1577
1577 __ addq(end_from, 4); 1578 __ addptr(end_from, 4);
1578 __ addq(end_to, 4); 1579 __ addptr(end_to, 4);
1579 1580
1580 // Check for and copy trailing word 1581 // Check for and copy trailing word
1581 __ BIND(L_copy_2_bytes); 1582 __ BIND(L_copy_2_bytes);
1582 __ testq(word_count, 1); 1583 __ testl(word_count, 1);
1583 __ jccb(Assembler::zero, L_exit); 1584 __ jccb(Assembler::zero, L_exit);
1584 __ movw(rax, Address(end_from, 8)); 1585 __ movw(rax, Address(end_from, 8));
1585 __ movw(Address(end_to, 8), rax); 1586 __ movw(Address(end_to, 8), rax);
1586 1587
1587 __ BIND(L_exit); 1588 __ BIND(L_exit);
1588 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); 1589 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1589 restore_arg_regs(); 1590 restore_arg_regs();
1590 __ xorq(rax, rax); // return 0 1591 __ xorptr(rax, rax); // return 0
1591 __ leave(); // required for proper stackwalking of RuntimeStub frame 1592 __ leave(); // required for proper stackwalking of RuntimeStub frame
1592 __ ret(0); 1593 __ ret(0);
1593 1594
1594 // Copy in 32-bytes chunks 1595 // Copy in 32-bytes chunks
1595 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1596 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1635 array_overlap_test(disjoint_short_copy_entry, Address::times_2); 1636 array_overlap_test(disjoint_short_copy_entry, Address::times_2);
1636 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1637 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1637 // r9 and r10 may be used to save non-volatile registers 1638 // r9 and r10 may be used to save non-volatile registers
1638 1639
1639 // 'from', 'to' and 'count' are now valid 1640 // 'from', 'to' and 'count' are now valid
1640 __ movq(word_count, count); 1641 __ movptr(word_count, count);
1641 __ shrq(count, 2); // count => qword_count 1642 __ shrptr(count, 2); // count => qword_count
1642 1643
1643 // Copy from high to low addresses. Use 'to' as scratch. 1644 // Copy from high to low addresses. Use 'to' as scratch.
1644 1645
1645 // Check for and copy trailing word 1646 // Check for and copy trailing word
1646 __ testq(word_count, 1); 1647 __ testl(word_count, 1);
1647 __ jccb(Assembler::zero, L_copy_4_bytes); 1648 __ jccb(Assembler::zero, L_copy_4_bytes);
1648 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1649 __ movw(rax, Address(from, word_count, Address::times_2, -2));
1649 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1650 __ movw(Address(to, word_count, Address::times_2, -2), rax);
1650 1651
1651 // Check for and copy trailing dword 1652 // Check for and copy trailing dword
1652 __ BIND(L_copy_4_bytes); 1653 __ BIND(L_copy_4_bytes);
1653 __ testq(word_count, 2); 1654 __ testl(word_count, 2);
1654 __ jcc(Assembler::zero, L_copy_32_bytes); 1655 __ jcc(Assembler::zero, L_copy_32_bytes);
1655 __ movl(rax, Address(from, qword_count, Address::times_8)); 1656 __ movl(rax, Address(from, qword_count, Address::times_8));
1656 __ movl(Address(to, qword_count, Address::times_8), rax); 1657 __ movl(Address(to, qword_count, Address::times_8), rax);
1657 __ jmp(L_copy_32_bytes); 1658 __ jmp(L_copy_32_bytes);
1658 1659
1659 // Copy trailing qwords 1660 // Copy trailing qwords
1660 __ BIND(L_copy_8_bytes); 1661 __ BIND(L_copy_8_bytes);
1661 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1662 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1662 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1663 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1663 __ decrementq(qword_count); 1664 __ decrement(qword_count);
1664 __ jcc(Assembler::notZero, L_copy_8_bytes); 1665 __ jcc(Assembler::notZero, L_copy_8_bytes);
1665 1666
1666 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); 1667 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1667 restore_arg_regs(); 1668 restore_arg_regs();
1668 __ xorq(rax, rax); // return 0 1669 __ xorptr(rax, rax); // return 0
1669 __ leave(); // required for proper stackwalking of RuntimeStub frame 1670 __ leave(); // required for proper stackwalking of RuntimeStub frame
1670 __ ret(0); 1671 __ ret(0);
1671 1672
1672 // Copy in 32-bytes chunks 1673 // Copy in 32-bytes chunks
1673 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1674 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1674 1675
1675 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); 1676 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1676 restore_arg_regs(); 1677 restore_arg_regs();
1677 __ xorq(rax, rax); // return 0 1678 __ xorptr(rax, rax); // return 0
1678 __ leave(); // required for proper stackwalking of RuntimeStub frame 1679 __ leave(); // required for proper stackwalking of RuntimeStub frame
1679 __ ret(0); 1680 __ ret(0);
1680 1681
1681 return start; 1682 return start;
1682 } 1683 }
1736 if (is_oop) { 1737 if (is_oop) {
1737 __ movq(saved_to, to); 1738 __ movq(saved_to, to);
1738 } 1739 }
1739 1740
1740 // 'from', 'to' and 'count' are now valid 1741 // 'from', 'to' and 'count' are now valid
1741 __ movq(dword_count, count); 1742 __ movptr(dword_count, count);
1742 __ shrq(count, 1); // count => qword_count 1743 __ shrptr(count, 1); // count => qword_count
1743 1744
1744 // Copy from low to high addresses. Use 'to' as scratch. 1745 // Copy from low to high addresses. Use 'to' as scratch.
1745 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); 1746 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1746 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); 1747 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1747 __ negq(qword_count); 1748 __ negptr(qword_count);
1748 __ jmp(L_copy_32_bytes); 1749 __ jmp(L_copy_32_bytes);
1749 1750
1750 // Copy trailing qwords 1751 // Copy trailing qwords
1751 __ BIND(L_copy_8_bytes); 1752 __ BIND(L_copy_8_bytes);
1752 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1753 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1753 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1754 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1754 __ incrementq(qword_count); 1755 __ increment(qword_count);
1755 __ jcc(Assembler::notZero, L_copy_8_bytes); 1756 __ jcc(Assembler::notZero, L_copy_8_bytes);
1756 1757
1757 // Check for and copy trailing dword 1758 // Check for and copy trailing dword
1758 __ BIND(L_copy_4_bytes); 1759 __ BIND(L_copy_4_bytes);
1759 __ testq(dword_count, 1); // Only byte test since the value is 0 or 1 1760 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1
1760 __ jccb(Assembler::zero, L_exit); 1761 __ jccb(Assembler::zero, L_exit);
1761 __ movl(rax, Address(end_from, 8)); 1762 __ movl(rax, Address(end_from, 8));
1762 __ movl(Address(end_to, 8), rax); 1763 __ movl(Address(end_to, 8), rax);
1763 1764
1764 __ BIND(L_exit); 1765 __ BIND(L_exit);
1766 __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4)); 1767 __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
1767 gen_write_ref_array_post_barrier(saved_to, end_to, rax); 1768 gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1768 } 1769 }
1769 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); 1770 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1770 restore_arg_regs(); 1771 restore_arg_regs();
1771 __ xorq(rax, rax); // return 0 1772 __ xorptr(rax, rax); // return 0
1772 __ leave(); // required for proper stackwalking of RuntimeStub frame 1773 __ leave(); // required for proper stackwalking of RuntimeStub frame
1773 __ ret(0); 1774 __ ret(0);
1774 1775
1775 // Copy 32-bytes chunks 1776 // Copy 32-bytes chunks
1776 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1777 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1823 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1824 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1824 // r9 and r10 may be used to save non-volatile registers 1825 // r9 and r10 may be used to save non-volatile registers
1825 1826
1826 assert_clean_int(count, rax); // Make sure 'count' is clean int. 1827 assert_clean_int(count, rax); // Make sure 'count' is clean int.
1827 // 'from', 'to' and 'count' are now valid 1828 // 'from', 'to' and 'count' are now valid
1828 __ movq(dword_count, count); 1829 __ movptr(dword_count, count);
1829 __ shrq(count, 1); // count => qword_count 1830 __ shrptr(count, 1); // count => qword_count
1830 1831
1831 // Copy from high to low addresses. Use 'to' as scratch. 1832 // Copy from high to low addresses. Use 'to' as scratch.
1832 1833
1833 // Check for and copy trailing dword 1834 // Check for and copy trailing dword
1834 __ testq(dword_count, 1); 1835 __ testl(dword_count, 1);
1835 __ jcc(Assembler::zero, L_copy_32_bytes); 1836 __ jcc(Assembler::zero, L_copy_32_bytes);
1836 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 1837 __ movl(rax, Address(from, dword_count, Address::times_4, -4));
1837 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 1838 __ movl(Address(to, dword_count, Address::times_4, -4), rax);
1838 __ jmp(L_copy_32_bytes); 1839 __ jmp(L_copy_32_bytes);
1839 1840
1840 // Copy trailing qwords 1841 // Copy trailing qwords
1841 __ BIND(L_copy_8_bytes); 1842 __ BIND(L_copy_8_bytes);
1842 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1843 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1843 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1844 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1844 __ decrementq(qword_count); 1845 __ decrement(qword_count);
1845 __ jcc(Assembler::notZero, L_copy_8_bytes); 1846 __ jcc(Assembler::notZero, L_copy_8_bytes);
1846 1847
1847 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); 1848 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1848 if (is_oop) { 1849 if (is_oop) {
1849 __ jmp(L_exit); 1850 __ jmp(L_exit);
1850 } 1851 }
1851 restore_arg_regs(); 1852 restore_arg_regs();
1852 __ xorq(rax, rax); // return 0 1853 __ xorptr(rax, rax); // return 0
1853 __ leave(); // required for proper stackwalking of RuntimeStub frame 1854 __ leave(); // required for proper stackwalking of RuntimeStub frame
1854 __ ret(0); 1855 __ ret(0);
1855 1856
1856 // Copy in 32-bytes chunks 1857 // Copy in 32-bytes chunks
1857 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1858 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1862 Register end_to = rdx; 1863 Register end_to = rdx;
1863 __ leaq(end_to, Address(to, dword_count, Address::times_4, -4)); 1864 __ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
1864 gen_write_ref_array_post_barrier(to, end_to, rax); 1865 gen_write_ref_array_post_barrier(to, end_to, rax);
1865 } 1866 }
1866 restore_arg_regs(); 1867 restore_arg_regs();
1867 __ xorq(rax, rax); // return 0 1868 __ xorptr(rax, rax); // return 0
1868 __ leave(); // required for proper stackwalking of RuntimeStub frame 1869 __ leave(); // required for proper stackwalking of RuntimeStub frame
1869 __ ret(0); 1870 __ ret(0);
1870 1871
1871 return start; 1872 return start;
1872 } 1873 }
1919 // r9 and r10 may be used to save non-volatile registers 1920 // r9 and r10 may be used to save non-volatile registers
1920 1921
1921 // 'from', 'to' and 'qword_count' are now valid 1922 // 'from', 'to' and 'qword_count' are now valid
1922 1923
1923 // Copy from low to high addresses. Use 'to' as scratch. 1924 // Copy from low to high addresses. Use 'to' as scratch.
1924 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); 1925 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1925 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); 1926 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1926 __ negq(qword_count); 1927 __ negptr(qword_count);
1927 __ jmp(L_copy_32_bytes); 1928 __ jmp(L_copy_32_bytes);
1928 1929
1929 // Copy trailing qwords 1930 // Copy trailing qwords
1930 __ BIND(L_copy_8_bytes); 1931 __ BIND(L_copy_8_bytes);
1931 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1932 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1932 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1933 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1933 __ incrementq(qword_count); 1934 __ increment(qword_count);
1934 __ jcc(Assembler::notZero, L_copy_8_bytes); 1935 __ jcc(Assembler::notZero, L_copy_8_bytes);
1935 1936
1936 if (is_oop) { 1937 if (is_oop) {
1937 __ jmp(L_exit); 1938 __ jmp(L_exit);
1938 } else { 1939 } else {
1939 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 1940 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1940 restore_arg_regs(); 1941 restore_arg_regs();
1941 __ xorq(rax, rax); // return 0 1942 __ xorptr(rax, rax); // return 0
1942 __ leave(); // required for proper stackwalking of RuntimeStub frame 1943 __ leave(); // required for proper stackwalking of RuntimeStub frame
1943 __ ret(0); 1944 __ ret(0);
1944 } 1945 }
1945 1946
1946 // Copy 64-byte chunks 1947 // Copy 64-byte chunks
1952 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); 1953 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
1953 } else { 1954 } else {
1954 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 1955 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1955 } 1956 }
1956 restore_arg_regs(); 1957 restore_arg_regs();
1957 __ xorq(rax, rax); // return 0 1958 __ xorptr(rax, rax); // return 0
1958 __ leave(); // required for proper stackwalking of RuntimeStub frame 1959 __ leave(); // required for proper stackwalking of RuntimeStub frame
1959 __ ret(0); 1960 __ ret(0);
1960 1961
1961 return start; 1962 return start;
1962 } 1963 }
2006 2007
2007 // 'from', 'to' and 'qword_count' are now valid 2008 // 'from', 'to' and 'qword_count' are now valid
2008 2009
2009 if (is_oop) { 2010 if (is_oop) {
2010 // Save to and count for store barrier 2011 // Save to and count for store barrier
2011 __ movq(saved_count, qword_count); 2012 __ movptr(saved_count, qword_count);
2012 // No registers are destroyed by this call 2013 // No registers are destroyed by this call
2013 gen_write_ref_array_pre_barrier(to, saved_count); 2014 gen_write_ref_array_pre_barrier(to, saved_count);
2014 } 2015 }
2015 2016
2016 __ jmp(L_copy_32_bytes); 2017 __ jmp(L_copy_32_bytes);
2017 2018
2018 // Copy trailing qwords 2019 // Copy trailing qwords
2019 __ BIND(L_copy_8_bytes); 2020 __ BIND(L_copy_8_bytes);
2020 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2021 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
2021 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2022 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
2022 __ decrementq(qword_count); 2023 __ decrement(qword_count);
2023 __ jcc(Assembler::notZero, L_copy_8_bytes); 2024 __ jcc(Assembler::notZero, L_copy_8_bytes);
2024 2025
2025 if (is_oop) { 2026 if (is_oop) {
2026 __ jmp(L_exit); 2027 __ jmp(L_exit);
2027 } else { 2028 } else {
2028 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 2029 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2029 restore_arg_regs(); 2030 restore_arg_regs();
2030 __ xorq(rax, rax); // return 0 2031 __ xorptr(rax, rax); // return 0
2031 __ leave(); // required for proper stackwalking of RuntimeStub frame 2032 __ leave(); // required for proper stackwalking of RuntimeStub frame
2032 __ ret(0); 2033 __ ret(0);
2033 } 2034 }
2034 2035
2035 // Copy in 32-bytes chunks 2036 // Copy in 32-bytes chunks
2036 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 2037 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
2037 2038
2038 if (is_oop) { 2039 if (is_oop) {
2039 __ BIND(L_exit); 2040 __ BIND(L_exit);
2040 __ leaq(rcx, Address(to, saved_count, Address::times_8, -8)); 2041 __ lea(rcx, Address(to, saved_count, Address::times_8, -8));
2041 gen_write_ref_array_post_barrier(to, rcx, rax); 2042 gen_write_ref_array_post_barrier(to, rcx, rax);
2042 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); 2043 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
2043 } else { 2044 } else {
2044 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 2045 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2045 } 2046 }
2046 restore_arg_regs(); 2047 restore_arg_regs();
2047 __ xorq(rax, rax); // return 0 2048 __ xorptr(rax, rax); // return 0
2048 __ leave(); // required for proper stackwalking of RuntimeStub frame 2049 __ leave(); // required for proper stackwalking of RuntimeStub frame
2049 __ ret(0); 2050 __ ret(0);
2050 2051
2051 return start; 2052 return start;
2052 } 2053 }
2071 Klass::secondary_super_cache_offset_in_bytes()); 2072 Klass::secondary_super_cache_offset_in_bytes());
2072 Address secondary_supers_addr(sub_klass, ss_offset); 2073 Address secondary_supers_addr(sub_klass, ss_offset);
2073 Address super_cache_addr( sub_klass, sc_offset); 2074 Address super_cache_addr( sub_klass, sc_offset);
2074 2075
2075 // if the pointers are equal, we are done (e.g., String[] elements) 2076 // if the pointers are equal, we are done (e.g., String[] elements)
2076 __ cmpq(super_klass, sub_klass); 2077 __ cmpptr(super_klass, sub_klass);
2077 __ jcc(Assembler::equal, L_success); 2078 __ jcc(Assembler::equal, L_success);
2078 2079
2079 // check the supertype display: 2080 // check the supertype display:
2080 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 2081 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
2081 __ cmpq(super_klass, super_check_addr); // test the super type 2082 __ cmpptr(super_klass, super_check_addr); // test the super type
2082 __ jcc(Assembler::equal, L_success); 2083 __ jcc(Assembler::equal, L_success);
2083 2084
2084 // if it was a primary super, we can just fail immediately 2085 // if it was a primary super, we can just fail immediately
2085 __ cmpl(super_check_offset, sc_offset); 2086 __ cmpl(super_check_offset, sc_offset);
2086 __ jcc(Assembler::notEqual, L_miss); 2087 __ jcc(Assembler::notEqual, L_miss);
2089 // The repne_scan instruction uses fixed registers, which we must spill. 2090 // The repne_scan instruction uses fixed registers, which we must spill.
2090 // (We need a couple more temps in any case.) 2091 // (We need a couple more temps in any case.)
2091 // This code is rarely used, so simplicity is a virtue here. 2092 // This code is rarely used, so simplicity is a virtue here.
2092 inc_counter_np(SharedRuntime::_partial_subtype_ctr); 2093 inc_counter_np(SharedRuntime::_partial_subtype_ctr);
2093 { 2094 {
2094 __ pushq(rax); 2095 __ push(rax);
2095 __ pushq(rcx); 2096 __ push(rcx);
2096 __ pushq(rdi); 2097 __ push(rdi);
2097 assert_different_registers(sub_klass, super_klass, rax, rcx, rdi); 2098 assert_different_registers(sub_klass, super_klass, rax, rcx, rdi);
2098 2099
2099 __ movq(rdi, secondary_supers_addr); 2100 __ movptr(rdi, secondary_supers_addr);
2100 // Load the array length. 2101 // Load the array length.
2101 __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); 2102 __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
2102 // Skip to start of data. 2103 // Skip to start of data.
2103 __ addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); 2104 __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
2104 // Scan rcx words at [rdi] for occurance of rax 2105 // Scan rcx words at [rdi] for occurance of rax
2105 // Set NZ/Z based on last compare 2106 // Set NZ/Z based on last compare
2106 __ movq(rax, super_klass); 2107 __ movptr(rax, super_klass);
2107 if (UseCompressedOops) { 2108 if (UseCompressedOops) {
2108 // Compare against compressed form. Don't need to uncompress because 2109 // Compare against compressed form. Don't need to uncompress because
2109 // looks like orig rax is restored in popq below. 2110 // looks like orig rax is restored in popq below.
2110 __ encode_heap_oop(rax); 2111 __ encode_heap_oop(rax);
2111 __ repne_scanl(); 2112 __ repne_scanl();
2112 } else { 2113 } else {
2113 __ repne_scanq(); 2114 __ repne_scan();
2114 } 2115 }
2115 2116
2116 // Unspill the temp. registers: 2117 // Unspill the temp. registers:
2117 __ popq(rdi); 2118 __ pop(rdi);
2118 __ popq(rcx); 2119 __ pop(rcx);
2119 __ popq(rax); 2120 __ pop(rax);
2120 2121
2121 __ jcc(Assembler::notEqual, L_miss); 2122 __ jcc(Assembler::notEqual, L_miss);
2122 } 2123 }
2123 2124
2124 // Success. Cache the super we found and proceed in triumph. 2125 // Success. Cache the super we found and proceed in triumph.
2125 __ movq(super_cache_addr, super_klass); // note: rax is dead 2126 __ movptr(super_cache_addr, super_klass); // note: rax is dead
2126 __ jmp(L_success); 2127 __ jmp(L_success);
2127 2128
2128 // Fall through on failure! 2129 // Fall through on failure!
2129 __ BIND(L_miss); 2130 __ BIND(L_miss);
2130 } 2131 }
2200 saved_r14_offset, 2201 saved_r14_offset,
2201 saved_rbp_offset, 2202 saved_rbp_offset,
2202 saved_rip_offset, 2203 saved_rip_offset,
2203 saved_rarg0_offset 2204 saved_rarg0_offset
2204 }; 2205 };
2205 __ subq(rsp, saved_rbp_offset * wordSize); 2206 __ subptr(rsp, saved_rbp_offset * wordSize);
2206 __ movq(Address(rsp, saved_r13_offset * wordSize), r13); 2207 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
2207 __ movq(Address(rsp, saved_r14_offset * wordSize), r14); 2208 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
2208 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2209 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
2209 // ckoff => rcx, ckval => r8 2210 // ckoff => rcx, ckval => r8
2210 // r9 and r10 may be used to save non-volatile registers 2211 // r9 and r10 may be used to save non-volatile registers
2211 #ifdef _WIN64 2212 #ifdef _WIN64
2212 // last argument (#4) is on stack on Win64 2213 // last argument (#4) is on stack on Win64
2213 const int ckval_offset = saved_rarg0_offset + 4; 2214 const int ckval_offset = saved_rarg0_offset + 4;
2214 __ movq(ckval, Address(rsp, ckval_offset * wordSize)); 2215 __ movptr(ckval, Address(rsp, ckval_offset * wordSize));
2215 #endif 2216 #endif
2216 2217
2217 // check that int operands are properly extended to size_t 2218 // check that int operands are properly extended to size_t
2218 assert_clean_int(length, rax); 2219 assert_clean_int(length, rax);
2219 assert_clean_int(ckoff, rax); 2220 assert_clean_int(ckoff, rax);
2240 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2241 Address to_element_addr(end_to, count, TIMES_OOP, 0);
2241 2242
2242 gen_write_ref_array_pre_barrier(to, count); 2243 gen_write_ref_array_pre_barrier(to, count);
2243 2244
2244 // Copy from low to high addresses, indexed from the end of each array. 2245 // Copy from low to high addresses, indexed from the end of each array.
2245 __ leaq(end_from, end_from_addr); 2246 __ lea(end_from, end_from_addr);
2246 __ leaq(end_to, end_to_addr); 2247 __ lea(end_to, end_to_addr);
2247 __ movq(r14_length, length); // save a copy of the length 2248 __ movptr(r14_length, length); // save a copy of the length
2248 assert(length == count, ""); // else fix next line: 2249 assert(length == count, ""); // else fix next line:
2249 __ negq(count); // negate and test the length 2250 __ negptr(count); // negate and test the length
2250 __ jcc(Assembler::notZero, L_load_element); 2251 __ jcc(Assembler::notZero, L_load_element);
2251 2252
2252 // Empty array: Nothing to do. 2253 // Empty array: Nothing to do.
2253 __ xorq(rax, rax); // return 0 on (trivial) success 2254 __ xorptr(rax, rax); // return 0 on (trivial) success
2254 __ jmp(L_done); 2255 __ jmp(L_done);
2255 2256
2256 // ======== begin loop ======== 2257 // ======== begin loop ========
2257 // (Loop is rotated; its entry is L_load_element.) 2258 // (Loop is rotated; its entry is L_load_element.)
2258 // Loop control: 2259 // Loop control:
2260 // Base pointers src, dst are biased by 8*(count-1),to last element. 2261 // Base pointers src, dst are biased by 8*(count-1),to last element.
2261 __ align(16); 2262 __ align(16);
2262 2263
2263 __ BIND(L_store_element); 2264 __ BIND(L_store_element);
2264 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2265 __ store_heap_oop(to_element_addr, rax_oop); // store the oop
2265 __ incrementq(count); // increment the count toward zero 2266 __ increment(count); // increment the count toward zero
2266 __ jcc(Assembler::zero, L_do_card_marks); 2267 __ jcc(Assembler::zero, L_do_card_marks);
2267 2268
2268 // ======== loop entry is here ======== 2269 // ======== loop entry is here ========
2269 __ BIND(L_load_element); 2270 __ BIND(L_load_element);
2270 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2271 __ load_heap_oop(rax_oop, from_element_addr); // load the oop
2271 __ testq(rax_oop, rax_oop); 2272 __ testptr(rax_oop, rax_oop);
2272 __ jcc(Assembler::zero, L_store_element); 2273 __ jcc(Assembler::zero, L_store_element);
2273 2274
2274 __ load_klass(r11_klass, rax_oop);// query the object klass 2275 __ load_klass(r11_klass, rax_oop);// query the object klass
2275 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2276 generate_type_check(r11_klass, ckoff, ckval, L_store_element);
2276 // ======== end loop ======== 2277 // ======== end loop ========
2278 // It was a real error; we must depend on the caller to finish the job. 2279 // It was a real error; we must depend on the caller to finish the job.
2279 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2280 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
2280 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2281 // Emit GC store barriers for the oops we have copied (r14 + rdx),
2281 // and report their number to the caller. 2282 // and report their number to the caller.
2282 assert_different_registers(rax, r14_length, count, to, end_to, rcx); 2283 assert_different_registers(rax, r14_length, count, to, end_to, rcx);
2283 __ leaq(end_to, to_element_addr); 2284 __ lea(end_to, to_element_addr);
2284 gen_write_ref_array_post_barrier(to, end_to, rcx); 2285 gen_write_ref_array_post_barrier(to, end_to, rcx);
2285 __ movq(rax, r14_length); // original oops 2286 __ movptr(rax, r14_length); // original oops
2286 __ addq(rax, count); // K = (original - remaining) oops 2287 __ addptr(rax, count); // K = (original - remaining) oops
2287 __ notq(rax); // report (-1^K) to caller 2288 __ notptr(rax); // report (-1^K) to caller
2288 __ jmp(L_done); 2289 __ jmp(L_done);
2289 2290
2290 // Come here on success only. 2291 // Come here on success only.
2291 __ BIND(L_do_card_marks); 2292 __ BIND(L_do_card_marks);
2292 __ addq(end_to, -wordSize); // make an inclusive end pointer 2293 __ addptr(end_to, -wordSize); // make an inclusive end pointer
2293 gen_write_ref_array_post_barrier(to, end_to, rcx); 2294 gen_write_ref_array_post_barrier(to, end_to, rcx);
2294 __ xorq(rax, rax); // return 0 on success 2295 __ xorptr(rax, rax); // return 0 on success
2295 2296
2296 // Common exit point (success or failure). 2297 // Common exit point (success or failure).
2297 __ BIND(L_done); 2298 __ BIND(L_done);
2298 __ movq(r13, Address(rsp, saved_r13_offset * wordSize)); 2299 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
2299 __ movq(r14, Address(rsp, saved_r14_offset * wordSize)); 2300 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
2300 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); 2301 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
2301 restore_arg_regs(); 2302 restore_arg_regs();
2302 __ leave(); // required for proper stackwalking of RuntimeStub frame 2303 __ leave(); // required for proper stackwalking of RuntimeStub frame
2303 __ ret(0); 2304 __ ret(0);
2304 2305
2337 __ enter(); // required for proper stackwalking of RuntimeStub frame 2338 __ enter(); // required for proper stackwalking of RuntimeStub frame
2338 2339
2339 // bump this on entry, not on exit: 2340 // bump this on entry, not on exit:
2340 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2341 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
2341 2342
2342 __ movq(bits, from); 2343 __ mov(bits, from);
2343 __ orq(bits, to); 2344 __ orptr(bits, to);
2344 __ orq(bits, size); 2345 __ orptr(bits, size);
2345 2346
2346 __ testb(bits, BytesPerLong-1); 2347 __ testb(bits, BytesPerLong-1);
2347 __ jccb(Assembler::zero, L_long_aligned); 2348 __ jccb(Assembler::zero, L_long_aligned);
2348 2349
2349 __ testb(bits, BytesPerInt-1); 2350 __ testb(bits, BytesPerInt-1);
2351 2352
2352 __ testb(bits, BytesPerShort-1); 2353 __ testb(bits, BytesPerShort-1);
2353 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2354 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
2354 2355
2355 __ BIND(L_short_aligned); 2356 __ BIND(L_short_aligned);
2356 __ shrq(size, LogBytesPerShort); // size => short_count 2357 __ shrptr(size, LogBytesPerShort); // size => short_count
2357 __ jump(RuntimeAddress(short_copy_entry)); 2358 __ jump(RuntimeAddress(short_copy_entry));
2358 2359
2359 __ BIND(L_int_aligned); 2360 __ BIND(L_int_aligned);
2360 __ shrq(size, LogBytesPerInt); // size => int_count 2361 __ shrptr(size, LogBytesPerInt); // size => int_count
2361 __ jump(RuntimeAddress(int_copy_entry)); 2362 __ jump(RuntimeAddress(int_copy_entry));
2362 2363
2363 __ BIND(L_long_aligned); 2364 __ BIND(L_long_aligned);
2364 __ shrq(size, LogBytesPerLong); // size => qword_count 2365 __ shrptr(size, LogBytesPerLong); // size => qword_count
2365 __ jump(RuntimeAddress(long_copy_entry)); 2366 __ jump(RuntimeAddress(long_copy_entry));
2366 2367
2367 return start; 2368 return start;
2368 } 2369 }
2369 2370
2467 // (7) src_pos + length must not exceed length of src. 2468 // (7) src_pos + length must not exceed length of src.
2468 // (8) dst_pos + length must not exceed length of dst. 2469 // (8) dst_pos + length must not exceed length of dst.
2469 // 2470 //
2470 2471
2471 // if (src == NULL) return -1; 2472 // if (src == NULL) return -1;
2472 __ testq(src, src); // src oop 2473 __ testptr(src, src); // src oop
2473 size_t j1off = __ offset(); 2474 size_t j1off = __ offset();
2474 __ jccb(Assembler::zero, L_failed_0); 2475 __ jccb(Assembler::zero, L_failed_0);
2475 2476
2476 // if (src_pos < 0) return -1; 2477 // if (src_pos < 0) return -1;
2477 __ testl(src_pos, src_pos); // src_pos (32-bits) 2478 __ testl(src_pos, src_pos); // src_pos (32-bits)
2478 __ jccb(Assembler::negative, L_failed_0); 2479 __ jccb(Assembler::negative, L_failed_0);
2479 2480
2480 // if (dst == NULL) return -1; 2481 // if (dst == NULL) return -1;
2481 __ testq(dst, dst); // dst oop 2482 __ testptr(dst, dst); // dst oop
2482 __ jccb(Assembler::zero, L_failed_0); 2483 __ jccb(Assembler::zero, L_failed_0);
2483 2484
2484 // if (dst_pos < 0) return -1; 2485 // if (dst_pos < 0) return -1;
2485 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2486 __ testl(dst_pos, dst_pos); // dst_pos (32-bits)
2486 size_t j4off = __ offset(); 2487 size_t j4off = __ offset();
2507 __ load_klass(r10_src_klass, src); 2508 __ load_klass(r10_src_klass, src);
2508 #ifdef ASSERT 2509 #ifdef ASSERT
2509 // assert(src->klass() != NULL); 2510 // assert(src->klass() != NULL);
2510 BLOCK_COMMENT("assert klasses not null"); 2511 BLOCK_COMMENT("assert klasses not null");
2511 { Label L1, L2; 2512 { Label L1, L2;
2512 __ testq(r10_src_klass, r10_src_klass); 2513 __ testptr(r10_src_klass, r10_src_klass);
2513 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2514 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
2514 __ bind(L1); 2515 __ bind(L1);
2515 __ stop("broken null klass"); 2516 __ stop("broken null klass");
2516 __ bind(L2); 2517 __ bind(L2);
2517 __ load_klass(r9_dst_klass, dst); 2518 __ load_klass(r9_dst_klass, dst);
2572 const Register r10_offset = r10; // array offset 2573 const Register r10_offset = r10; // array offset
2573 const Register rax_elsize = rax_lh; // element size 2574 const Register rax_elsize = rax_lh; // element size
2574 2575
2575 __ movl(r10_offset, rax_lh); 2576 __ movl(r10_offset, rax_lh);
2576 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2577 __ shrl(r10_offset, Klass::_lh_header_size_shift);
2577 __ andq(r10_offset, Klass::_lh_header_size_mask); // array_offset 2578 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset
2578 __ addq(src, r10_offset); // src array offset 2579 __ addptr(src, r10_offset); // src array offset
2579 __ addq(dst, r10_offset); // dst array offset 2580 __ addptr(dst, r10_offset); // dst array offset
2580 BLOCK_COMMENT("choose copy loop based on element size"); 2581 BLOCK_COMMENT("choose copy loop based on element size");
2581 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2582 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
2582 2583
2583 // next registers should be set before the jump to corresponding stub 2584 // next registers should be set before the jump to corresponding stub
2584 const Register from = c_rarg0; // source array address 2585 const Register from = c_rarg0; // source array address
2589 // since they are the same as 'src', 'src_pos', 'dst'. 2590 // since they are the same as 'src', 'src_pos', 'dst'.
2590 2591
2591 __ BIND(L_copy_bytes); 2592 __ BIND(L_copy_bytes);
2592 __ cmpl(rax_elsize, 0); 2593 __ cmpl(rax_elsize, 0);
2593 __ jccb(Assembler::notEqual, L_copy_shorts); 2594 __ jccb(Assembler::notEqual, L_copy_shorts);
2594 __ leaq(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2595 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr
2595 __ leaq(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2596 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr
2596 __ movslq(count, r11_length); // length 2597 __ movl2ptr(count, r11_length); // length
2597 __ jump(RuntimeAddress(byte_copy_entry)); 2598 __ jump(RuntimeAddress(byte_copy_entry));
2598 2599
2599 __ BIND(L_copy_shorts); 2600 __ BIND(L_copy_shorts);
2600 __ cmpl(rax_elsize, LogBytesPerShort); 2601 __ cmpl(rax_elsize, LogBytesPerShort);
2601 __ jccb(Assembler::notEqual, L_copy_ints); 2602 __ jccb(Assembler::notEqual, L_copy_ints);
2602 __ leaq(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2603 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr
2603 __ leaq(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2604 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr
2604 __ movslq(count, r11_length); // length 2605 __ movl2ptr(count, r11_length); // length
2605 __ jump(RuntimeAddress(short_copy_entry)); 2606 __ jump(RuntimeAddress(short_copy_entry));
2606 2607
2607 __ BIND(L_copy_ints); 2608 __ BIND(L_copy_ints);
2608 __ cmpl(rax_elsize, LogBytesPerInt); 2609 __ cmpl(rax_elsize, LogBytesPerInt);
2609 __ jccb(Assembler::notEqual, L_copy_longs); 2610 __ jccb(Assembler::notEqual, L_copy_longs);
2610 __ leaq(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2611 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr
2611 __ leaq(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2612 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr
2612 __ movslq(count, r11_length); // length 2613 __ movl2ptr(count, r11_length); // length
2613 __ jump(RuntimeAddress(int_copy_entry)); 2614 __ jump(RuntimeAddress(int_copy_entry));
2614 2615
2615 __ BIND(L_copy_longs); 2616 __ BIND(L_copy_longs);
2616 #ifdef ASSERT 2617 #ifdef ASSERT
2617 { Label L; 2618 { Label L;
2619 __ jcc(Assembler::equal, L); 2620 __ jcc(Assembler::equal, L);
2620 __ stop("must be long copy, but elsize is wrong"); 2621 __ stop("must be long copy, but elsize is wrong");
2621 __ bind(L); 2622 __ bind(L);
2622 } 2623 }
2623 #endif 2624 #endif
2624 __ leaq(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2625 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr
2625 __ leaq(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2626 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr
2626 __ movslq(count, r11_length); // length 2627 __ movl2ptr(count, r11_length); // length
2627 __ jump(RuntimeAddress(long_copy_entry)); 2628 __ jump(RuntimeAddress(long_copy_entry));
2628 2629
2629 // objArrayKlass 2630 // objArrayKlass
2630 __ BIND(L_objArray); 2631 __ BIND(L_objArray);
2631 // live at this point: r10_src_klass, src[_pos], dst[_pos] 2632 // live at this point: r10_src_klass, src[_pos], dst[_pos]
2638 2639
2639 // Identically typed arrays can be copied without element-wise checks. 2640 // Identically typed arrays can be copied without element-wise checks.
2640 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2641 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2641 r10, L_failed); 2642 r10, L_failed);
2642 2643
2643 __ leaq(from, Address(src, src_pos, TIMES_OOP, 2644 __ lea(from, Address(src, src_pos, TIMES_OOP,
2644 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2645 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
2645 __ leaq(to, Address(dst, dst_pos, TIMES_OOP, 2646 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2646 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2647 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
2647 __ movslq(count, r11_length); // length 2648 __ movl2ptr(count, r11_length); // length
2648 __ BIND(L_plain_copy); 2649 __ BIND(L_plain_copy);
2649 __ jump(RuntimeAddress(oop_copy_entry)); 2650 __ jump(RuntimeAddress(oop_copy_entry));
2650 2651
2651 __ BIND(L_checkcast_copy); 2652 __ BIND(L_checkcast_copy);
2652 // live at this point: r10_src_klass, !r11_length 2653 // live at this point: r10_src_klass, !r11_length
2669 rax, L_failed); 2670 rax, L_failed);
2670 __ load_klass(r11_dst_klass, dst); // reload 2671 __ load_klass(r11_dst_klass, dst); // reload
2671 #endif 2672 #endif
2672 2673
2673 // Marshal the base address arguments now, freeing registers. 2674 // Marshal the base address arguments now, freeing registers.
2674 __ leaq(from, Address(src, src_pos, TIMES_OOP, 2675 __ lea(from, Address(src, src_pos, TIMES_OOP,
2675 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2676 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2676 __ leaq(to, Address(dst, dst_pos, TIMES_OOP, 2677 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2677 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2678 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2678 __ movl(count, C_RARG4); // length (reloaded) 2679 __ movl(count, C_RARG4); // length (reloaded)
2679 Register sco_temp = c_rarg3; // this register is free now 2680 Register sco_temp = c_rarg3; // this register is free now
2680 assert_different_registers(from, to, count, sco_temp, 2681 assert_different_registers(from, to, count, sco_temp,
2681 r11_dst_klass, r10_src_klass); 2682 r11_dst_klass, r10_src_klass);
2689 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2690 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
2690 2691
2691 // Fetch destination element klass from the objArrayKlass header. 2692 // Fetch destination element klass from the objArrayKlass header.
2692 int ek_offset = (klassOopDesc::header_size() * HeapWordSize + 2693 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
2693 objArrayKlass::element_klass_offset_in_bytes()); 2694 objArrayKlass::element_klass_offset_in_bytes());
2694 __ movq(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2695 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
2695 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2696 __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2696 assert_clean_int(sco_temp, rax); 2697 assert_clean_int(sco_temp, rax);
2697 2698
2698 // the checkcast_copy loop needs two extra arguments: 2699 // the checkcast_copy loop needs two extra arguments:
2699 assert(c_rarg3 == sco_temp, "#3 already in place"); 2700 assert(c_rarg3 == sco_temp, "#3 already in place");
2700 __ movq(C_RARG4, r11_dst_klass); // dst.klass.element_klass 2701 __ movptr(C_RARG4, r11_dst_klass); // dst.klass.element_klass
2701 __ jump(RuntimeAddress(checkcast_copy_entry)); 2702 __ jump(RuntimeAddress(checkcast_copy_entry));
2702 } 2703 }
2703 2704
2704 __ BIND(L_failed); 2705 __ BIND(L_failed);
2705 __ xorq(rax, rax); 2706 __ xorptr(rax, rax);
2706 __ notq(rax); // return -1 2707 __ notptr(rax); // return -1
2707 __ leave(); // required for proper stackwalking of RuntimeStub frame 2708 __ leave(); // required for proper stackwalking of RuntimeStub frame
2708 __ ret(0); 2709 __ ret(0);
2709 2710
2710 return start; 2711 return start;
2711 } 2712 }
2804 // This is an inlined and slightly modified version of call_VM 2805 // This is an inlined and slightly modified version of call_VM
2805 // which has the ability to fetch the return PC out of 2806 // which has the ability to fetch the return PC out of
2806 // thread-local storage and also sets up last_Java_sp slightly 2807 // thread-local storage and also sets up last_Java_sp slightly
2807 // differently than the real call_VM 2808 // differently than the real call_VM
2808 if (restore_saved_exception_pc) { 2809 if (restore_saved_exception_pc) {
2809 __ movq(rax, 2810 __ movptr(rax,
2810 Address(r15_thread, 2811 Address(r15_thread,
2811 in_bytes(JavaThread::saved_exception_pc_offset()))); 2812 in_bytes(JavaThread::saved_exception_pc_offset())));
2812 __ pushq(rax); 2813 __ push(rax);
2813 } 2814 }
2814 2815
2815 __ enter(); // required for proper stackwalking of RuntimeStub frame 2816 __ enter(); // required for proper stackwalking of RuntimeStub frame
2816 2817
2817 assert(is_even(framesize/2), "sp not 16-byte aligned"); 2818 assert(is_even(framesize/2), "sp not 16-byte aligned");
2818 2819
2819 // return address and rbp are already in place 2820 // return address and rbp are already in place
2820 __ subq(rsp, (framesize-4) << LogBytesPerInt); // prolog 2821 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
2821 2822
2822 int frame_complete = __ pc() - start; 2823 int frame_complete = __ pc() - start;
2823 2824
2824 // Set up last_Java_sp and last_Java_fp 2825 // Set up last_Java_sp and last_Java_fp
2825 __ set_last_Java_frame(rsp, rbp, NULL); 2826 __ set_last_Java_frame(rsp, rbp, NULL);
2826 2827
2827 // Call runtime 2828 // Call runtime
2828 __ movq(c_rarg0, r15_thread); 2829 __ movptr(c_rarg0, r15_thread);
2829 BLOCK_COMMENT("call runtime_entry"); 2830 BLOCK_COMMENT("call runtime_entry");
2830 __ call(RuntimeAddress(runtime_entry)); 2831 __ call(RuntimeAddress(runtime_entry));
2831 2832
2832 // Generate oop map 2833 // Generate oop map
2833 OopMap* map = new OopMap(framesize, 0); 2834 OopMap* map = new OopMap(framesize, 0);
2839 __ leave(); // required for proper stackwalking of RuntimeStub frame 2840 __ leave(); // required for proper stackwalking of RuntimeStub frame
2840 2841
2841 // check for pending exceptions 2842 // check for pending exceptions
2842 #ifdef ASSERT 2843 #ifdef ASSERT
2843 Label L; 2844 Label L;
2844 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), 2845 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()),
2845 (int) NULL); 2846 (int32_t) NULL_WORD);
2846 __ jcc(Assembler::notEqual, L); 2847 __ jcc(Assembler::notEqual, L);
2847 __ should_not_reach_here(); 2848 __ should_not_reach_here();
2848 __ bind(L); 2849 __ bind(L);
2849 #endif // ASSERT 2850 #endif // ASSERT
2850 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2851 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2863 // Initialization 2864 // Initialization
2864 void generate_initial() { 2865 void generate_initial() {
2865 // Generates all stubs and initializes the entry points 2866 // Generates all stubs and initializes the entry points
2866 2867
2867 // This platform-specific stub is needed by generate_call_stub() 2868 // This platform-specific stub is needed by generate_call_stub()
2868 StubRoutines::amd64::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80); 2869 StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80);
2869 2870
2870 // entry points that exist in all platforms Note: This is code 2871 // entry points that exist in all platforms Note: This is code
2871 // that could be shared among different platforms - however the 2872 // that could be shared among different platforms - however the
2872 // benefit seems to be smaller than the disadvantage of having a 2873 // benefit seems to be smaller than the disadvantage of having a
2873 // much more complicated generator structure. See also comment in 2874 // much more complicated generator structure. See also comment in
2892 2893
2893 StubRoutines::_handler_for_unsafe_access_entry = 2894 StubRoutines::_handler_for_unsafe_access_entry =
2894 generate_handler_for_unsafe_access(); 2895 generate_handler_for_unsafe_access();
2895 2896
2896 // platform dependent 2897 // platform dependent
2897 StubRoutines::amd64::_get_previous_fp_entry = generate_get_previous_fp(); 2898 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
2898 2899
2899 StubRoutines::amd64::_verify_mxcsr_entry = generate_verify_mxcsr(); 2900 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
2900 } 2901 }
2901 2902
2902 void generate_all() { 2903 void generate_all() {
2903 // Generates all stubs and initializes the entry points 2904 // Generates all stubs and initializes the entry points
2904 2905
2946 SharedRuntime:: 2947 SharedRuntime::
2947 throw_StackOverflowError), 2948 throw_StackOverflowError),
2948 false); 2949 false);
2949 2950
2950 // entry points that are platform specific 2951 // entry points that are platform specific
2951 StubRoutines::amd64::_f2i_fixup = generate_f2i_fixup(); 2952 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
2952 StubRoutines::amd64::_f2l_fixup = generate_f2l_fixup(); 2953 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
2953 StubRoutines::amd64::_d2i_fixup = generate_d2i_fixup(); 2954 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
2954 StubRoutines::amd64::_d2l_fixup = generate_d2l_fixup(); 2955 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
2955 2956
2956 StubRoutines::amd64::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 2957 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
2957 StubRoutines::amd64::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 2958 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
2958 StubRoutines::amd64::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 2959 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
2959 StubRoutines::amd64::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 2960 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
2960 2961
2961 // support for verify_oop (must happen after universe_init) 2962 // support for verify_oop (must happen after universe_init)
2962 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 2963 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
2963 2964
2964 // arraycopy stubs used by compilers 2965 // arraycopy stubs used by compilers

mercurial