Thu, 16 Feb 2012 17:12:49 -0800
7145346: VerifyStackAtCalls is broken
Summary: Replace call_epilog() encoding with macroassembler use. Moved duplicated code to x86.ad. Fixed return_addr() definition.
Reviewed-by: never
duke@435 | 1 | // |
trims@2384 | 2 | // Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | // |
duke@435 | 5 | // This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | // under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | // published by the Free Software Foundation. |
duke@435 | 8 | // |
duke@435 | 9 | // This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | // version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | // accompanied this code). |
duke@435 | 14 | // |
duke@435 | 15 | // You should have received a copy of the GNU General Public License version |
duke@435 | 16 | // 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | // |
trims@1907 | 19 | // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | // or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | // questions. |
duke@435 | 22 | // |
duke@435 | 23 | // |
duke@435 | 24 | |
duke@435 | 25 | // The argument size of each inline directive is ignored by the compiler |
duke@435 | 26 | // and is set to the number of arguments as documentation. |
duke@435 | 27 | |
duke@435 | 28 | // Get the raw thread ID from %gs:0 |
duke@435 | 29 | .inline _raw_thread_id,0 |
duke@435 | 30 | movq %fs:0, %rax |
duke@435 | 31 | .end |
duke@435 | 32 | |
phh@3378 | 33 | // Get current fp |
coleenp@907 | 34 | .inline _get_current_fp,0 |
coleenp@907 | 35 | .volatile |
duke@435 | 36 | movq %rbp, %rax |
duke@435 | 37 | .end |
duke@435 | 38 | |
phh@3378 | 39 | // Support for os::rdtsc() |
phh@3378 | 40 | .inline _raw_rdtsc,0 |
phh@3378 | 41 | rdtsc |
phh@3378 | 42 | salq $32, %rdx |
phh@3378 | 43 | orq %rdx, %rax |
phh@3378 | 44 | .end |
phh@3378 | 45 | |
duke@435 | 46 | // Support for jint Atomic::add(jint add_value, volatile jint* dest) |
jcoomes@1902 | 47 | .inline _Atomic_add,2 |
duke@435 | 48 | movl %edi, %eax // save add_value for return |
duke@435 | 49 | lock |
jcoomes@1902 | 50 | xaddl %edi, (%rsi) |
duke@435 | 51 | addl %edi, %eax |
duke@435 | 52 | .end |
duke@435 | 53 | |
duke@435 | 54 | // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest) |
jcoomes@1902 | 55 | .inline _Atomic_add_long,2 |
duke@435 | 56 | movq %rdi, %rax // save add_value for return |
duke@435 | 57 | lock |
jcoomes@1902 | 58 | xaddq %rdi, (%rsi) |
duke@435 | 59 | addq %rdi, %rax |
duke@435 | 60 | .end |
duke@435 | 61 | |
duke@435 | 62 | // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). |
duke@435 | 63 | .inline _Atomic_xchg,2 |
duke@435 | 64 | xchgl (%rsi), %edi |
duke@435 | 65 | movl %edi, %eax |
duke@435 | 66 | .end |
duke@435 | 67 | |
duke@435 | 68 | // Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest). |
duke@435 | 69 | .inline _Atomic_xchg_long,2 |
duke@435 | 70 | xchgq (%rsi), %rdi |
duke@435 | 71 | movq %rdi, %rax |
duke@435 | 72 | .end |
duke@435 | 73 | |
duke@435 | 74 | // Support for jint Atomic::cmpxchg(jint exchange_value, |
duke@435 | 75 | // volatile jint *dest, |
duke@435 | 76 | // jint compare_value) |
jcoomes@1902 | 77 | .inline _Atomic_cmpxchg,3 |
duke@435 | 78 | movl %edx, %eax // compare_value |
duke@435 | 79 | lock |
jcoomes@1902 | 80 | cmpxchgl %edi, (%rsi) |
duke@435 | 81 | .end |
duke@435 | 82 | |
duke@435 | 83 | // Support for jlong Atomic::cmpxchg(jlong exchange_value, |
duke@435 | 84 | // volatile jlong* dest, |
duke@435 | 85 | // jlong compare_value) |
jcoomes@1902 | 86 | .inline _Atomic_cmpxchg_long,3 |
duke@435 | 87 | movq %rdx, %rax // compare_value |
duke@435 | 88 | lock |
jcoomes@1902 | 89 | cmpxchgq %rdi, (%rsi) |
duke@435 | 90 | .end |
duke@435 | 91 | |
duke@435 | 92 | // Support for OrderAccess::acquire() |
duke@435 | 93 | .inline _OrderAccess_acquire,0 |
duke@435 | 94 | movl 0(%rsp), %eax |
duke@435 | 95 | .end |
duke@435 | 96 | |
duke@435 | 97 | // Support for OrderAccess::fence() |
duke@435 | 98 | .inline _OrderAccess_fence,0 |
duke@435 | 99 | lock |
duke@435 | 100 | addl $0, (%rsp) |
duke@435 | 101 | .end |
duke@435 | 102 | |
duke@435 | 103 | // Support for u2 Bytes::swap_u2(u2 x) |
duke@435 | 104 | .inline _raw_swap_u2,1 |
duke@435 | 105 | movw %di, %ax |
duke@435 | 106 | rorw $8, %ax |
duke@435 | 107 | .end |
duke@435 | 108 | |
duke@435 | 109 | // Support for u4 Bytes::swap_u4(u4 x) |
duke@435 | 110 | .inline _raw_swap_u4,1 |
duke@435 | 111 | movl %edi, %eax |
duke@435 | 112 | bswapl %eax |
duke@435 | 113 | .end |
duke@435 | 114 | |
duke@435 | 115 | // Support for u8 Bytes::swap_u8(u8 x) |
duke@435 | 116 | .inline _raw_swap_u8,1 |
duke@435 | 117 | movq %rdi, %rax |
duke@435 | 118 | bswapq %rax |
duke@435 | 119 | .end |
duke@435 | 120 | |
duke@435 | 121 | // Support for void Prefetch::read |
duke@435 | 122 | .inline _Prefetch_read,2 |
duke@435 | 123 | prefetcht0 (%rdi, %rsi, 1) |
duke@435 | 124 | .end |
duke@435 | 125 | |
duke@435 | 126 | // Support for void Prefetch::write |
duke@435 | 127 | // We use prefetcht0 because em64t doesn't support prefetchw. |
duke@435 | 128 | // prefetchw is a 3dnow instruction. |
duke@435 | 129 | .inline _Prefetch_write,2 |
duke@435 | 130 | prefetcht0 (%rdi, %rsi, 1) |
duke@435 | 131 | .end |