Thu, 16 Feb 2012 17:12:49 -0800
7145346: VerifyStackAtCalls is broken
Summary: Replace call_epilog() encoding with macroassembler use. Moved duplicated code to x86.ad. Fixed return_addr() definition.
Reviewed-by: never
1 //
2 // Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 // or visit www.oracle.com if you need additional information or have any
21 // questions.
22 //
23 //
26 // Support for u8 os::setup_fpu()
27 .inline _solaris_raw_setup_fpu,1
28 movl 0(%esp), %eax
29 fldcw (%eax)
30 .end
32 // The argument size of each inline directive is ignored by the compiler
33 // and is set to 0 for compatibility reason.
35 // Get the raw thread ID from %gs:0
36 .inline _raw_thread_id,0
37 movl %gs:0, %eax
38 .end
40 // Get current fp
41 .inline _get_current_fp,0
42 .volatile
43 movl %ebp, %eax
44 .end
46 // Support for os::rdtsc()
47 .inline _raw_rdtsc,0
48 rdtsc
49 .end
51 // Support for jint Atomic::add(jint inc, volatile jint* dest)
52 // An additional bool (os::is_MP()) is passed as the last argument.
53 .inline _Atomic_add,3
54 movl 0(%esp), %eax // inc
55 movl 4(%esp), %edx // dest
56 movl %eax, %ecx
57 cmpl $0, 8(%esp) // MP test
58 jne 1f
59 xaddl %eax, (%edx)
60 jmp 2f
61 1: lock
62 xaddl %eax, (%edx)
63 2: addl %ecx, %eax
64 .end
66 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
67 .inline _Atomic_xchg,2
68 movl 0(%esp), %eax // exchange_value
69 movl 4(%esp), %ecx // dest
70 xchgl (%ecx), %eax
71 .end
73 // Support for jint Atomic::cmpxchg(jint exchange_value,
74 // volatile jint *dest,
75 // jint compare_value)
76 // An additional bool (os::is_MP()) is passed as the last argument.
77 .inline _Atomic_cmpxchg,4
78 movl 8(%esp), %eax // compare_value
79 movl 0(%esp), %ecx // exchange_value
80 movl 4(%esp), %edx // dest
81 cmp $0, 12(%esp) // MP test
82 jne 1f
83 cmpxchgl %ecx, (%edx)
84 jmp 2f
85 1: lock
86 cmpxchgl %ecx, (%edx)
87 2:
88 .end
90 // Support for jlong Atomic::cmpxchg(jlong exchange_value,
91 // volatile jlong* dest,
92 // jlong compare_value)
93 // An additional bool (os::is_MP()) is passed as the last argument.
94 .inline _Atomic_cmpxchg_long,6
95 pushl %ebx
96 pushl %edi
97 movl 20(%esp), %eax // compare_value (low)
98 movl 24(%esp), %edx // compare_value (high)
99 movl 16(%esp), %edi // dest
100 movl 8(%esp), %ebx // exchange_value (low)
101 movl 12(%esp), %ecx // exchange_high (high)
102 cmp $0, 28(%esp) // MP test
103 jne 1f
104 cmpxchg8b (%edi)
105 jmp 2f
106 1: lock
107 cmpxchg8b (%edi)
108 2: popl %edi
109 popl %ebx
110 .end
112 // Support for jlong Atomic::load and Atomic::store.
113 // void _Atomic_move_long(volatile jlong* src, volatile jlong* dst)
114 .inline _Atomic_move_long,2
115 movl 0(%esp), %eax // src
116 fildll (%eax)
117 movl 4(%esp), %eax // dest
118 fistpll (%eax)
119 .end
121 // Support for OrderAccess::acquire()
122 .inline _OrderAccess_acquire,0
123 movl 0(%esp), %eax
124 .end
126 // Support for OrderAccess::fence()
127 .inline _OrderAccess_fence,0
128 lock
129 addl $0, (%esp)
130 .end
132 // Support for u2 Bytes::swap_u2(u2 x)
133 .inline _raw_swap_u2,1
134 movl 0(%esp), %eax
135 xchgb %al, %ah
136 .end
138 // Support for u4 Bytes::swap_u4(u4 x)
139 .inline _raw_swap_u4,1
140 movl 0(%esp), %eax
141 bswap %eax
142 .end
144 // Support for u8 Bytes::swap_u8_base(u4 x, u4 y)
145 .inline _raw_swap_u8,2
146 movl 4(%esp), %eax // y
147 movl 0(%esp), %edx // x
148 bswap %eax
149 bswap %edx
150 .end