src/os_cpu/linux_x86/vm/linux_x86_64.s

Wed, 25 Sep 2013 13:58:13 +0200

author
dsimms
date
Wed, 25 Sep 2013 13:58:13 +0200
changeset 5781
899ecf76b570
parent 5400
980532a806a5
child 6198
55fb97c4c58d
permissions
-rw-r--r--

8023956: Provide a work-around to broken Linux 32 bit "Exec Shield" using CS for NX emulation (crashing with SI_KERNEL)
Summary: Execute some code at a high virtual address value, and keep mapped
Reviewed-by: coleenp, zgu

duke@435 1 #
trims@1907 2 # Copyright (c) 2004, 2007, Oracle and/or its affiliates. All rights reserved.
duke@435 3 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 #
duke@435 5 # This code is free software; you can redistribute it and/or modify it
duke@435 6 # under the terms of the GNU General Public License version 2 only, as
duke@435 7 # published by the Free Software Foundation.
duke@435 8 #
duke@435 9 # This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 # version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 # accompanied this code).
duke@435 14 #
duke@435 15 # You should have received a copy of the GNU General Public License version
duke@435 16 # 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 #
trims@1907 19 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 # or visit www.oracle.com if you need additional information or have any
trims@1907 21 # questions.
duke@435 22 #
duke@435 23
duke@435 24
duke@435 25 # NOTE WELL! The _Copy functions are called directly
duke@435 26 # from server-compiler-generated code via CallLeafNoFP,
duke@435 27 # which means that they *must* either not use floating
duke@435 28 # point or use it in the same manner as does the server
duke@435 29 # compiler.
duke@435 30
duke@435 31 .globl _Copy_arrayof_conjoint_bytes
duke@435 32 .globl _Copy_arrayof_conjoint_jshorts
duke@435 33 .globl _Copy_conjoint_jshorts_atomic
duke@435 34 .globl _Copy_arrayof_conjoint_jints
duke@435 35 .globl _Copy_conjoint_jints_atomic
duke@435 36 .globl _Copy_arrayof_conjoint_jlongs
duke@435 37 .globl _Copy_conjoint_jlongs_atomic
duke@435 38
duke@435 39 .text
duke@435 40
duke@435 41 .globl SpinPause
duke@435 42 .align 16
duke@435 43 .type SpinPause,@function
duke@435 44 SpinPause:
duke@435 45 rep
duke@435 46 nop
duke@435 47 movq $1, %rax
duke@435 48 ret
duke@435 49
duke@435 50 # Support for void Copy::arrayof_conjoint_bytes(void* from,
duke@435 51 # void* to,
duke@435 52 # size_t count)
duke@435 53 # rdi - from
duke@435 54 # rsi - to
duke@435 55 # rdx - count, treated as ssize_t
duke@435 56 #
duke@435 57 .p2align 4,,15
duke@435 58 .type _Copy_arrayof_conjoint_bytes,@function
duke@435 59 _Copy_arrayof_conjoint_bytes:
duke@435 60 movq %rdx,%r8 # byte count
duke@435 61 shrq $3,%rdx # qword count
duke@435 62 cmpq %rdi,%rsi
duke@435 63 leaq -1(%rdi,%r8,1),%rax # from + bcount*1 - 1
duke@435 64 jbe acb_CopyRight
duke@435 65 cmpq %rax,%rsi
duke@435 66 jbe acb_CopyLeft
duke@435 67 acb_CopyRight:
duke@435 68 leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
duke@435 69 leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
duke@435 70 negq %rdx
duke@435 71 jmp 7f
duke@435 72 .p2align 4,,15
duke@435 73 1: movq 8(%rax,%rdx,8),%rsi
duke@435 74 movq %rsi,8(%rcx,%rdx,8)
duke@435 75 addq $1,%rdx
duke@435 76 jnz 1b
duke@435 77 2: testq $4,%r8 # check for trailing dword
duke@435 78 jz 3f
duke@435 79 movl 8(%rax),%esi # copy trailing dword
duke@435 80 movl %esi,8(%rcx)
duke@435 81 addq $4,%rax
duke@435 82 addq $4,%rcx # original %rsi is trashed, so we
duke@435 83 # can't use it as a base register
duke@435 84 3: testq $2,%r8 # check for trailing word
duke@435 85 jz 4f
duke@435 86 movw 8(%rax),%si # copy trailing word
duke@435 87 movw %si,8(%rcx)
duke@435 88 addq $2,%rcx
duke@435 89 4: testq $1,%r8 # check for trailing byte
duke@435 90 jz 5f
duke@435 91 movb -1(%rdi,%r8,1),%al # copy trailing byte
duke@435 92 movb %al,8(%rcx)
duke@435 93 5: ret
duke@435 94 .p2align 4,,15
duke@435 95 6: movq -24(%rax,%rdx,8),%rsi
duke@435 96 movq %rsi,-24(%rcx,%rdx,8)
duke@435 97 movq -16(%rax,%rdx,8),%rsi
duke@435 98 movq %rsi,-16(%rcx,%rdx,8)
duke@435 99 movq -8(%rax,%rdx,8),%rsi
duke@435 100 movq %rsi,-8(%rcx,%rdx,8)
duke@435 101 movq (%rax,%rdx,8),%rsi
duke@435 102 movq %rsi,(%rcx,%rdx,8)
duke@435 103 7: addq $4,%rdx
duke@435 104 jle 6b
duke@435 105 subq $4,%rdx
duke@435 106 jl 1b
duke@435 107 jmp 2b
duke@435 108 acb_CopyLeft:
duke@435 109 testq $1,%r8 # check for trailing byte
duke@435 110 jz 1f
duke@435 111 movb -1(%rdi,%r8,1),%cl # copy trailing byte
duke@435 112 movb %cl,-1(%rsi,%r8,1)
duke@435 113 subq $1,%r8 # adjust for possible trailing word
duke@435 114 1: testq $2,%r8 # check for trailing word
duke@435 115 jz 2f
duke@435 116 movw -2(%rdi,%r8,1),%cx # copy trailing word
duke@435 117 movw %cx,-2(%rsi,%r8,1)
duke@435 118 2: testq $4,%r8 # check for trailing dword
duke@435 119 jz 5f
duke@435 120 movl (%rdi,%rdx,8),%ecx # copy trailing dword
duke@435 121 movl %ecx,(%rsi,%rdx,8)
duke@435 122 jmp 5f
duke@435 123 .p2align 4,,15
duke@435 124 3: movq -8(%rdi,%rdx,8),%rcx
duke@435 125 movq %rcx,-8(%rsi,%rdx,8)
duke@435 126 subq $1,%rdx
duke@435 127 jnz 3b
duke@435 128 ret
duke@435 129 .p2align 4,,15
duke@435 130 4: movq 24(%rdi,%rdx,8),%rcx
duke@435 131 movq %rcx,24(%rsi,%rdx,8)
duke@435 132 movq 16(%rdi,%rdx,8),%rcx
duke@435 133 movq %rcx,16(%rsi,%rdx,8)
duke@435 134 movq 8(%rdi,%rdx,8),%rcx
duke@435 135 movq %rcx,8(%rsi,%rdx,8)
duke@435 136 movq (%rdi,%rdx,8),%rcx
duke@435 137 movq %rcx,(%rsi,%rdx,8)
duke@435 138 5: subq $4,%rdx
duke@435 139 jge 4b
duke@435 140 addq $4,%rdx
duke@435 141 jg 3b
duke@435 142 ret
duke@435 143
duke@435 144 # Support for void Copy::arrayof_conjoint_jshorts(void* from,
duke@435 145 # void* to,
duke@435 146 # size_t count)
duke@435 147 # Equivalent to
duke@435 148 # conjoint_jshorts_atomic
duke@435 149 #
duke@435 150 # If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
duke@435 151 # let the hardware handle it. The tow or four words within dwords
duke@435 152 # or qwords that span cache line boundaries will still be loaded
duke@435 153 # and stored atomically.
duke@435 154 #
duke@435 155 # rdi - from
duke@435 156 # rsi - to
duke@435 157 # rdx - count, treated as ssize_t
duke@435 158 #
duke@435 159 .p2align 4,,15
duke@435 160 .type _Copy_arrayof_conjoint_jshorts,@function
duke@435 161 .type _Copy_conjoint_jshorts_atomic,@function
duke@435 162 _Copy_arrayof_conjoint_jshorts:
duke@435 163 _Copy_conjoint_jshorts_atomic:
duke@435 164 movq %rdx,%r8 # word count
duke@435 165 shrq $2,%rdx # qword count
duke@435 166 cmpq %rdi,%rsi
duke@435 167 leaq -2(%rdi,%r8,2),%rax # from + wcount*2 - 2
duke@435 168 jbe acs_CopyRight
duke@435 169 cmpq %rax,%rsi
duke@435 170 jbe acs_CopyLeft
duke@435 171 acs_CopyRight:
duke@435 172 leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
duke@435 173 leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
duke@435 174 negq %rdx
duke@435 175 jmp 6f
duke@435 176 1: movq 8(%rax,%rdx,8),%rsi
duke@435 177 movq %rsi,8(%rcx,%rdx,8)
duke@435 178 addq $1,%rdx
duke@435 179 jnz 1b
duke@435 180 2: testq $2,%r8 # check for trailing dword
duke@435 181 jz 3f
duke@435 182 movl 8(%rax),%esi # copy trailing dword
duke@435 183 movl %esi,8(%rcx)
duke@435 184 addq $4,%rcx # original %rsi is trashed, so we
duke@435 185 # can't use it as a base register
duke@435 186 3: testq $1,%r8 # check for trailing word
duke@435 187 jz 4f
duke@435 188 movw -2(%rdi,%r8,2),%si # copy trailing word
duke@435 189 movw %si,8(%rcx)
duke@435 190 4: ret
duke@435 191 .p2align 4,,15
duke@435 192 5: movq -24(%rax,%rdx,8),%rsi
duke@435 193 movq %rsi,-24(%rcx,%rdx,8)
duke@435 194 movq -16(%rax,%rdx,8),%rsi
duke@435 195 movq %rsi,-16(%rcx,%rdx,8)
duke@435 196 movq -8(%rax,%rdx,8),%rsi
duke@435 197 movq %rsi,-8(%rcx,%rdx,8)
duke@435 198 movq (%rax,%rdx,8),%rsi
duke@435 199 movq %rsi,(%rcx,%rdx,8)
duke@435 200 6: addq $4,%rdx
duke@435 201 jle 5b
duke@435 202 subq $4,%rdx
duke@435 203 jl 1b
duke@435 204 jmp 2b
duke@435 205 acs_CopyLeft:
duke@435 206 testq $1,%r8 # check for trailing word
duke@435 207 jz 1f
duke@435 208 movw -2(%rdi,%r8,2),%cx # copy trailing word
duke@435 209 movw %cx,-2(%rsi,%r8,2)
duke@435 210 1: testq $2,%r8 # check for trailing dword
duke@435 211 jz 4f
duke@435 212 movl (%rdi,%rdx,8),%ecx # copy trailing dword
duke@435 213 movl %ecx,(%rsi,%rdx,8)
duke@435 214 jmp 4f
duke@435 215 2: movq -8(%rdi,%rdx,8),%rcx
duke@435 216 movq %rcx,-8(%rsi,%rdx,8)
duke@435 217 subq $1,%rdx
duke@435 218 jnz 2b
duke@435 219 ret
duke@435 220 .p2align 4,,15
duke@435 221 3: movq 24(%rdi,%rdx,8),%rcx
duke@435 222 movq %rcx,24(%rsi,%rdx,8)
duke@435 223 movq 16(%rdi,%rdx,8),%rcx
duke@435 224 movq %rcx,16(%rsi,%rdx,8)
duke@435 225 movq 8(%rdi,%rdx,8),%rcx
duke@435 226 movq %rcx,8(%rsi,%rdx,8)
duke@435 227 movq (%rdi,%rdx,8),%rcx
duke@435 228 movq %rcx,(%rsi,%rdx,8)
duke@435 229 4: subq $4,%rdx
duke@435 230 jge 3b
duke@435 231 addq $4,%rdx
duke@435 232 jg 2b
duke@435 233 ret
duke@435 234
duke@435 235 # Support for void Copy::arrayof_conjoint_jints(jint* from,
duke@435 236 # jint* to,
duke@435 237 # size_t count)
duke@435 238 # Equivalent to
duke@435 239 # conjoint_jints_atomic
duke@435 240 #
duke@435 241 # If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
duke@435 242 # the hardware handle it. The two dwords within qwords that span
duke@435 243 # cache line boundaries will still be loaded and stored atomically.
duke@435 244 #
duke@435 245 # rdi - from
duke@435 246 # rsi - to
duke@435 247 # rdx - count, treated as ssize_t
duke@435 248 #
duke@435 249 .p2align 4,,15
duke@435 250 .type _Copy_arrayof_conjoint_jints,@function
duke@435 251 .type _Copy_conjoint_jints_atomic,@function
duke@435 252 _Copy_arrayof_conjoint_jints:
duke@435 253 _Copy_conjoint_jints_atomic:
duke@435 254 movq %rdx,%r8 # dword count
duke@435 255 shrq %rdx # qword count
duke@435 256 cmpq %rdi,%rsi
duke@435 257 leaq -4(%rdi,%r8,4),%rax # from + dcount*4 - 4
duke@435 258 jbe aci_CopyRight
duke@435 259 cmpq %rax,%rsi
duke@435 260 jbe aci_CopyLeft
duke@435 261 aci_CopyRight:
duke@435 262 leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
duke@435 263 leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
duke@435 264 negq %rdx
duke@435 265 jmp 5f
duke@435 266 .p2align 4,,15
duke@435 267 1: movq 8(%rax,%rdx,8),%rsi
duke@435 268 movq %rsi,8(%rcx,%rdx,8)
duke@435 269 addq $1,%rdx
duke@435 270 jnz 1b
duke@435 271 2: testq $1,%r8 # check for trailing dword
duke@435 272 jz 3f
duke@435 273 movl 8(%rax),%esi # copy trailing dword
duke@435 274 movl %esi,8(%rcx)
duke@435 275 3: ret
duke@435 276 .p2align 4,,15
duke@435 277 4: movq -24(%rax,%rdx,8),%rsi
duke@435 278 movq %rsi,-24(%rcx,%rdx,8)
duke@435 279 movq -16(%rax,%rdx,8),%rsi
duke@435 280 movq %rsi,-16(%rcx,%rdx,8)
duke@435 281 movq -8(%rax,%rdx,8),%rsi
duke@435 282 movq %rsi,-8(%rcx,%rdx,8)
duke@435 283 movq (%rax,%rdx,8),%rsi
duke@435 284 movq %rsi,(%rcx,%rdx,8)
duke@435 285 5: addq $4,%rdx
duke@435 286 jle 4b
duke@435 287 subq $4,%rdx
duke@435 288 jl 1b
duke@435 289 jmp 2b
duke@435 290 aci_CopyLeft:
duke@435 291 testq $1,%r8 # check for trailing dword
duke@435 292 jz 3f
duke@435 293 movl -4(%rdi,%r8,4),%ecx # copy trailing dword
duke@435 294 movl %ecx,-4(%rsi,%r8,4)
duke@435 295 jmp 3f
duke@435 296 1: movq -8(%rdi,%rdx,8),%rcx
duke@435 297 movq %rcx,-8(%rsi,%rdx,8)
duke@435 298 subq $1,%rdx
duke@435 299 jnz 1b
duke@435 300 ret
duke@435 301 .p2align 4,,15
duke@435 302 2: movq 24(%rdi,%rdx,8),%rcx
duke@435 303 movq %rcx,24(%rsi,%rdx,8)
duke@435 304 movq 16(%rdi,%rdx,8),%rcx
duke@435 305 movq %rcx,16(%rsi,%rdx,8)
duke@435 306 movq 8(%rdi,%rdx,8),%rcx
duke@435 307 movq %rcx,8(%rsi,%rdx,8)
duke@435 308 movq (%rdi,%rdx,8),%rcx
duke@435 309 movq %rcx,(%rsi,%rdx,8)
duke@435 310 3: subq $4,%rdx
duke@435 311 jge 2b
duke@435 312 addq $4,%rdx
duke@435 313 jg 1b
duke@435 314 ret
duke@435 315
duke@435 316 # Support for void Copy::arrayof_conjoint_jlongs(jlong* from,
duke@435 317 # jlong* to,
duke@435 318 # size_t count)
duke@435 319 # Equivalent to
duke@435 320 # conjoint_jlongs_atomic
duke@435 321 # arrayof_conjoint_oops
duke@435 322 # conjoint_oops_atomic
duke@435 323 #
duke@435 324 # rdi - from
duke@435 325 # rsi - to
duke@435 326 # rdx - count, treated as ssize_t
duke@435 327 #
duke@435 328 .p2align 4,,15
duke@435 329 .type _Copy_arrayof_conjoint_jlongs,@function
duke@435 330 .type _Copy_conjoint_jlongs_atomic,@function
duke@435 331 _Copy_arrayof_conjoint_jlongs:
duke@435 332 _Copy_conjoint_jlongs_atomic:
duke@435 333 cmpq %rdi,%rsi
duke@435 334 leaq -8(%rdi,%rdx,8),%rax # from + count*8 - 8
duke@435 335 jbe acl_CopyRight
duke@435 336 cmpq %rax,%rsi
duke@435 337 jbe acl_CopyLeft
duke@435 338 acl_CopyRight:
duke@435 339 leaq -8(%rsi,%rdx,8),%rcx # to + count*8 - 8
duke@435 340 negq %rdx
duke@435 341 jmp 3f
duke@435 342 1: movq 8(%rax,%rdx,8),%rsi
duke@435 343 movq %rsi,8(%rcx,%rdx,8)
duke@435 344 addq $1,%rdx
duke@435 345 jnz 1b
duke@435 346 ret
duke@435 347 .p2align 4,,15
duke@435 348 2: movq -24(%rax,%rdx,8),%rsi
duke@435 349 movq %rsi,-24(%rcx,%rdx,8)
duke@435 350 movq -16(%rax,%rdx,8),%rsi
duke@435 351 movq %rsi,-16(%rcx,%rdx,8)
duke@435 352 movq -8(%rax,%rdx,8),%rsi
duke@435 353 movq %rsi,-8(%rcx,%rdx,8)
duke@435 354 movq (%rax,%rdx,8),%rsi
duke@435 355 movq %rsi,(%rcx,%rdx,8)
duke@435 356 3: addq $4,%rdx
duke@435 357 jle 2b
duke@435 358 subq $4,%rdx
duke@435 359 jl 1b
duke@435 360 ret
duke@435 361 4: movq -8(%rdi,%rdx,8),%rcx
duke@435 362 movq %rcx,-8(%rsi,%rdx,8)
duke@435 363 subq $1,%rdx
duke@435 364 jnz 4b
duke@435 365 ret
duke@435 366 .p2align 4,,15
duke@435 367 5: movq 24(%rdi,%rdx,8),%rcx
duke@435 368 movq %rcx,24(%rsi,%rdx,8)
duke@435 369 movq 16(%rdi,%rdx,8),%rcx
duke@435 370 movq %rcx,16(%rsi,%rdx,8)
duke@435 371 movq 8(%rdi,%rdx,8),%rcx
duke@435 372 movq %rcx,8(%rsi,%rdx,8)
duke@435 373 movq (%rdi,%rdx,8),%rcx
duke@435 374 movq %rcx,(%rsi,%rdx,8)
duke@435 375 acl_CopyLeft:
duke@435 376 subq $4,%rdx
duke@435 377 jge 5b
duke@435 378 addq $4,%rdx
duke@435 379 jg 4b
duke@435 380 ret

mercurial