src/os_cpu/solaris_x86/vm/solaris_x86_64.s

Thu, 21 Oct 2010 11:55:10 -0700

author
never
date
Thu, 21 Oct 2010 11:55:10 -0700
changeset 2262
1e9a9d2e6509
parent 1907
c18cbe5936b8
child 5400
980532a806a5
permissions
-rw-r--r--

6970683: improvements to hs_err output
Reviewed-by: kvn, jrose, dholmes, coleenp

duke@435 1 /
trims@1907 2 / Copyright (c) 2004, 2005, Oracle and/or its affiliates. All rights reserved.
duke@435 3 / DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 /
duke@435 5 / This code is free software; you can redistribute it and/or modify it
duke@435 6 / under the terms of the GNU General Public License version 2 only, as
duke@435 7 / published by the Free Software Foundation.
duke@435 8 /
duke@435 9 / This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 / ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 / FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 / version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 / accompanied this code).
duke@435 14 /
duke@435 15 / You should have received a copy of the GNU General Public License version
duke@435 16 / 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 / Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 /
trims@1907 19 / Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 / or visit www.oracle.com if you need additional information or have any
trims@1907 21 / questions.
duke@435 22 /
duke@435 23
duke@435 24 .globl fs_load
duke@435 25 .globl fs_thread
duke@435 26
duke@435 27 // NOTE WELL! The _Copy functions are called directly
duke@435 28 // from server-compiler-generated code via CallLeafNoFP,
duke@435 29 // which means that they *must* either not use floating
duke@435 30 // point or use it in the same manner as does the server
duke@435 31 // compiler.
duke@435 32
duke@435 33 .globl _Copy_arrayof_conjoint_bytes
duke@435 34 .globl _Copy_conjoint_jshorts_atomic
duke@435 35 .globl _Copy_arrayof_conjoint_jshorts
duke@435 36 .globl _Copy_conjoint_jints_atomic
duke@435 37 .globl _Copy_arrayof_conjoint_jints
duke@435 38 .globl _Copy_conjoint_jlongs_atomic
duke@435 39 .globl _Copy_arrayof_conjoint_jlongs
duke@435 40
duke@435 41 .section .text,"ax"
duke@435 42
duke@435 43 / Fast thread accessors, used by threadLS_solaris_amd64.cpp
duke@435 44 .align 16
duke@435 45 fs_load:
duke@435 46 movq %fs:(%rdi),%rax
duke@435 47 ret
duke@435 48
duke@435 49 .align 16
duke@435 50 fs_thread:
duke@435 51 movq %fs:0x0,%rax
duke@435 52 ret
duke@435 53
duke@435 54 .globl SafeFetch32, Fetch32PFI, Fetch32Resume
duke@435 55 .align 16
duke@435 56 // Prototype: int SafeFetch32 (int * Adr, int ErrValue)
duke@435 57 SafeFetch32:
duke@435 58 movl %esi, %eax
duke@435 59 Fetch32PFI:
duke@435 60 movl (%rdi), %eax
duke@435 61 Fetch32Resume:
duke@435 62 ret
duke@435 63
duke@435 64 .globl SafeFetchN, FetchNPFI, FetchNResume
duke@435 65 .align 16
duke@435 66 // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue)
duke@435 67 SafeFetchN:
duke@435 68 movq %rsi, %rax
duke@435 69 FetchNPFI:
duke@435 70 movq (%rdi), %rax
duke@435 71 FetchNResume:
duke@435 72 ret
duke@435 73
duke@435 74 .globl SpinPause
duke@435 75 .align 16
duke@435 76 SpinPause:
duke@435 77 rep
duke@435 78 nop
duke@435 79 movq $1, %rax
duke@435 80 ret
duke@435 81
duke@435 82
duke@435 83 / Support for void Copy::arrayof_conjoint_bytes(void* from,
duke@435 84 / void* to,
duke@435 85 / size_t count)
duke@435 86 / rdi - from
duke@435 87 / rsi - to
duke@435 88 / rdx - count, treated as ssize_t
duke@435 89 /
duke@435 90 .align 16
duke@435 91 _Copy_arrayof_conjoint_bytes:
duke@435 92 movq %rdx,%r8 / byte count
duke@435 93 shrq $3,%rdx / qword count
duke@435 94 cmpq %rdi,%rsi
duke@435 95 leaq -1(%rdi,%r8,1),%rax / from + bcount*1 - 1
duke@435 96 jbe acb_CopyRight
duke@435 97 cmpq %rax,%rsi
duke@435 98 jbe acb_CopyLeft
duke@435 99 acb_CopyRight:
duke@435 100 leaq -8(%rdi,%rdx,8),%rax / from + qcount*8 - 8
duke@435 101 leaq -8(%rsi,%rdx,8),%rcx / to + qcount*8 - 8
duke@435 102 negq %rdx
duke@435 103 jmp 7f
duke@435 104 .align 16
duke@435 105 1: movq 8(%rax,%rdx,8),%rsi
duke@435 106 movq %rsi,8(%rcx,%rdx,8)
duke@435 107 addq $1,%rdx
duke@435 108 jnz 1b
duke@435 109 2: testq $4,%r8 / check for trailing dword
duke@435 110 jz 3f
duke@435 111 movl 8(%rax),%esi / copy trailing dword
duke@435 112 movl %esi,8(%rcx)
duke@435 113 addq $4,%rax
duke@435 114 addq $4,%rcx / original %rsi is trashed, so we
duke@435 115 / can't use it as a base register
duke@435 116 3: testq $2,%r8 / check for trailing word
duke@435 117 jz 4f
duke@435 118 movw 8(%rax),%si / copy trailing word
duke@435 119 movw %si,8(%rcx)
duke@435 120 addq $2,%rcx
duke@435 121 4: testq $1,%r8 / check for trailing byte
duke@435 122 jz 5f
duke@435 123 movb -1(%rdi,%r8,1),%al / copy trailing byte
duke@435 124 movb %al,8(%rcx)
duke@435 125 5: ret
duke@435 126 .align 16
duke@435 127 6: movq -24(%rax,%rdx,8),%rsi
duke@435 128 movq %rsi,-24(%rcx,%rdx,8)
duke@435 129 movq -16(%rax,%rdx,8),%rsi
duke@435 130 movq %rsi,-16(%rcx,%rdx,8)
duke@435 131 movq -8(%rax,%rdx,8),%rsi
duke@435 132 movq %rsi,-8(%rcx,%rdx,8)
duke@435 133 movq (%rax,%rdx,8),%rsi
duke@435 134 movq %rsi,(%rcx,%rdx,8)
duke@435 135 7: addq $4,%rdx
duke@435 136 jle 6b
duke@435 137 subq $4,%rdx
duke@435 138 jl 1b
duke@435 139 jmp 2b
duke@435 140 acb_CopyLeft:
duke@435 141 testq $1,%r8 / check for trailing byte
duke@435 142 jz 1f
duke@435 143 movb -1(%rdi,%r8,1),%cl / copy trailing byte
duke@435 144 movb %cl,-1(%rsi,%r8,1)
duke@435 145 subq $1,%r8 / adjust for possible trailing word
duke@435 146 1: testq $2,%r8 / check for trailing word
duke@435 147 jz 2f
duke@435 148 movw -2(%rdi,%r8,1),%cx / copy trailing word
duke@435 149 movw %cx,-2(%rsi,%r8,1)
duke@435 150 2: testq $4,%r8 / check for trailing dword
duke@435 151 jz 5f
duke@435 152 movl (%rdi,%rdx,8),%ecx / copy trailing dword
duke@435 153 movl %ecx,(%rsi,%rdx,8)
duke@435 154 jmp 5f
duke@435 155 .align 16
duke@435 156 3: movq -8(%rdi,%rdx,8),%rcx
duke@435 157 movq %rcx,-8(%rsi,%rdx,8)
duke@435 158 subq $1,%rdx
duke@435 159 jnz 3b
duke@435 160 ret
duke@435 161 .align 16
duke@435 162 4: movq 24(%rdi,%rdx,8),%rcx
duke@435 163 movq %rcx,24(%rsi,%rdx,8)
duke@435 164 movq 16(%rdi,%rdx,8),%rcx
duke@435 165 movq %rcx,16(%rsi,%rdx,8)
duke@435 166 movq 8(%rdi,%rdx,8),%rcx
duke@435 167 movq %rcx,8(%rsi,%rdx,8)
duke@435 168 movq (%rdi,%rdx,8),%rcx
duke@435 169 movq %rcx,(%rsi,%rdx,8)
duke@435 170 5: subq $4,%rdx
duke@435 171 jge 4b
duke@435 172 addq $4,%rdx
duke@435 173 jg 3b
duke@435 174 ret
duke@435 175
duke@435 176 / Support for void Copy::arrayof_conjoint_jshorts(void* from,
duke@435 177 / void* to,
duke@435 178 / size_t count)
duke@435 179 / Equivalent to
duke@435 180 / conjoint_jshorts_atomic
duke@435 181 /
duke@435 182 / If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
duke@435 183 / let the hardware handle it. The tow or four words within dwords
duke@435 184 / or qwords that span cache line boundaries will still be loaded
duke@435 185 / and stored atomically.
duke@435 186 /
duke@435 187 / rdi - from
duke@435 188 / rsi - to
duke@435 189 / rdx - count, treated as ssize_t
duke@435 190 /
duke@435 191 .align 16
duke@435 192 _Copy_arrayof_conjoint_jshorts:
duke@435 193 _Copy_conjoint_jshorts_atomic:
duke@435 194 movq %rdx,%r8 / word count
duke@435 195 shrq $2,%rdx / qword count
duke@435 196 cmpq %rdi,%rsi
duke@435 197 leaq -2(%rdi,%r8,2),%rax / from + wcount*2 - 2
duke@435 198 jbe acs_CopyRight
duke@435 199 cmpq %rax,%rsi
duke@435 200 jbe acs_CopyLeft
duke@435 201 acs_CopyRight:
duke@435 202 leaq -8(%rdi,%rdx,8),%rax / from + qcount*8 - 8
duke@435 203 leaq -8(%rsi,%rdx,8),%rcx / to + qcount*8 - 8
duke@435 204 negq %rdx
duke@435 205 jmp 6f
duke@435 206 1: movq 8(%rax,%rdx,8),%rsi
duke@435 207 movq %rsi,8(%rcx,%rdx,8)
duke@435 208 addq $1,%rdx
duke@435 209 jnz 1b
duke@435 210 2: testq $2,%r8 / check for trailing dword
duke@435 211 jz 3f
duke@435 212 movl 8(%rax),%esi / copy trailing dword
duke@435 213 movl %esi,8(%rcx)
duke@435 214 addq $4,%rcx / original %rsi is trashed, so we
duke@435 215 / can't use it as a base register
duke@435 216 3: testq $1,%r8 / check for trailing word
duke@435 217 jz 4f
duke@435 218 movw -2(%rdi,%r8,2),%si / copy trailing word
duke@435 219 movw %si,8(%rcx)
duke@435 220 4: ret
duke@435 221 .align 16
duke@435 222 5: movq -24(%rax,%rdx,8),%rsi
duke@435 223 movq %rsi,-24(%rcx,%rdx,8)
duke@435 224 movq -16(%rax,%rdx,8),%rsi
duke@435 225 movq %rsi,-16(%rcx,%rdx,8)
duke@435 226 movq -8(%rax,%rdx,8),%rsi
duke@435 227 movq %rsi,-8(%rcx,%rdx,8)
duke@435 228 movq (%rax,%rdx,8),%rsi
duke@435 229 movq %rsi,(%rcx,%rdx,8)
duke@435 230 6: addq $4,%rdx
duke@435 231 jle 5b
duke@435 232 subq $4,%rdx
duke@435 233 jl 1b
duke@435 234 jmp 2b
duke@435 235 acs_CopyLeft:
duke@435 236 testq $1,%r8 / check for trailing word
duke@435 237 jz 1f
duke@435 238 movw -2(%rdi,%r8,2),%cx / copy trailing word
duke@435 239 movw %cx,-2(%rsi,%r8,2)
duke@435 240 1: testq $2,%r8 / check for trailing dword
duke@435 241 jz 4f
duke@435 242 movl (%rdi,%rdx,8),%ecx / copy trailing dword
duke@435 243 movl %ecx,(%rsi,%rdx,8)
duke@435 244 jmp 4f
duke@435 245 2: movq -8(%rdi,%rdx,8),%rcx
duke@435 246 movq %rcx,-8(%rsi,%rdx,8)
duke@435 247 subq $1,%rdx
duke@435 248 jnz 2b
duke@435 249 ret
duke@435 250 .align 16
duke@435 251 3: movq 24(%rdi,%rdx,8),%rcx
duke@435 252 movq %rcx,24(%rsi,%rdx,8)
duke@435 253 movq 16(%rdi,%rdx,8),%rcx
duke@435 254 movq %rcx,16(%rsi,%rdx,8)
duke@435 255 movq 8(%rdi,%rdx,8),%rcx
duke@435 256 movq %rcx,8(%rsi,%rdx,8)
duke@435 257 movq (%rdi,%rdx,8),%rcx
duke@435 258 movq %rcx,(%rsi,%rdx,8)
duke@435 259 4: subq $4,%rdx
duke@435 260 jge 3b
duke@435 261 addq $4,%rdx
duke@435 262 jg 2b
duke@435 263 ret
duke@435 264
duke@435 265 / Support for void Copy::arrayof_conjoint_jints(jint* from,
duke@435 266 / jint* to,
duke@435 267 / size_t count)
duke@435 268 / Equivalent to
duke@435 269 / conjoint_jints_atomic
duke@435 270 /
duke@435 271 / If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
duke@435 272 / the hardware handle it. The two dwords within qwords that span
duke@435 273 / cache line boundaries will still be loaded and stored atomically.
duke@435 274 /
duke@435 275 / rdi - from
duke@435 276 / rsi - to
duke@435 277 / rdx - count, treated as ssize_t
duke@435 278 /
duke@435 279 .align 16
duke@435 280 _Copy_arrayof_conjoint_jints:
duke@435 281 _Copy_conjoint_jints_atomic:
duke@435 282 movq %rdx,%r8 / dword count
duke@435 283 shrq %rdx / qword count
duke@435 284 cmpq %rdi,%rsi
duke@435 285 leaq -4(%rdi,%r8,4),%rax / from + dcount*4 - 4
duke@435 286 jbe aci_CopyRight
duke@435 287 cmpq %rax,%rsi
duke@435 288 jbe aci_CopyLeft
duke@435 289 aci_CopyRight:
duke@435 290 leaq -8(%rdi,%rdx,8),%rax / from + qcount*8 - 8
duke@435 291 leaq -8(%rsi,%rdx,8),%rcx / to + qcount*8 - 8
duke@435 292 negq %rdx
duke@435 293 jmp 5f
duke@435 294 .align 16
duke@435 295 1: movq 8(%rax,%rdx,8),%rsi
duke@435 296 movq %rsi,8(%rcx,%rdx,8)
duke@435 297 addq $1,%rdx
duke@435 298 jnz 1b
duke@435 299 2: testq $1,%r8 / check for trailing dword
duke@435 300 jz 3f
duke@435 301 movl 8(%rax),%esi / copy trailing dword
duke@435 302 movl %esi,8(%rcx)
duke@435 303 3: ret
duke@435 304 .align 16
duke@435 305 4: movq -24(%rax,%rdx,8),%rsi
duke@435 306 movq %rsi,-24(%rcx,%rdx,8)
duke@435 307 movq -16(%rax,%rdx,8),%rsi
duke@435 308 movq %rsi,-16(%rcx,%rdx,8)
duke@435 309 movq -8(%rax,%rdx,8),%rsi
duke@435 310 movq %rsi,-8(%rcx,%rdx,8)
duke@435 311 movq (%rax,%rdx,8),%rsi
duke@435 312 movq %rsi,(%rcx,%rdx,8)
duke@435 313 5: addq $4,%rdx
duke@435 314 jle 4b
duke@435 315 subq $4,%rdx
duke@435 316 jl 1b
duke@435 317 jmp 2b
duke@435 318 aci_CopyLeft:
duke@435 319 testq $1,%r8 / check for trailing dword
duke@435 320 jz 3f
duke@435 321 movl -4(%rdi,%r8,4),%ecx / copy trailing dword
duke@435 322 movl %ecx,-4(%rsi,%r8,4)
duke@435 323 jmp 3f
duke@435 324 1: movq -8(%rdi,%rdx,8),%rcx
duke@435 325 movq %rcx,-8(%rsi,%rdx,8)
duke@435 326 subq $1,%rdx
duke@435 327 jnz 1b
duke@435 328 ret
duke@435 329 .align 16
duke@435 330 2: movq 24(%rdi,%rdx,8),%rcx
duke@435 331 movq %rcx,24(%rsi,%rdx,8)
duke@435 332 movq 16(%rdi,%rdx,8),%rcx
duke@435 333 movq %rcx,16(%rsi,%rdx,8)
duke@435 334 movq 8(%rdi,%rdx,8),%rcx
duke@435 335 movq %rcx,8(%rsi,%rdx,8)
duke@435 336 movq (%rdi,%rdx,8),%rcx
duke@435 337 movq %rcx,(%rsi,%rdx,8)
duke@435 338 3: subq $4,%rdx
duke@435 339 jge 2b
duke@435 340 addq $4,%rdx
duke@435 341 jg 1b
duke@435 342 ret
duke@435 343
duke@435 344 / Support for void Copy::arrayof_conjoint_jlongs(jlong* from,
duke@435 345 / jlong* to,
duke@435 346 / size_t count)
duke@435 347 / Equivalent to
duke@435 348 / conjoint_jlongs_atomic
duke@435 349 / arrayof_conjoint_oops
duke@435 350 / conjoint_oops_atomic
duke@435 351 /
duke@435 352 / rdi - from
duke@435 353 / rsi - to
duke@435 354 / rdx - count, treated as ssize_t
duke@435 355 /
duke@435 356 .align 16
duke@435 357 _Copy_arrayof_conjoint_jlongs:
duke@435 358 _Copy_conjoint_jlongs_atomic:
duke@435 359 cmpq %rdi,%rsi
duke@435 360 leaq -8(%rdi,%rdx,8),%rax / from + count*8 - 8
duke@435 361 jbe acl_CopyRight
duke@435 362 cmpq %rax,%rsi
duke@435 363 jbe acl_CopyLeft
duke@435 364 acl_CopyRight:
duke@435 365 leaq -8(%rsi,%rdx,8),%rcx / to + count*8 - 8
duke@435 366 negq %rdx
duke@435 367 jmp 3f
duke@435 368 1: movq 8(%rax,%rdx,8),%rsi
duke@435 369 movq %rsi,8(%rcx,%rdx,8)
duke@435 370 addq $1,%rdx
duke@435 371 jnz 1b
duke@435 372 ret
duke@435 373 .align 16
duke@435 374 2: movq -24(%rax,%rdx,8),%rsi
duke@435 375 movq %rsi,-24(%rcx,%rdx,8)
duke@435 376 movq -16(%rax,%rdx,8),%rsi
duke@435 377 movq %rsi,-16(%rcx,%rdx,8)
duke@435 378 movq -8(%rax,%rdx,8),%rsi
duke@435 379 movq %rsi,-8(%rcx,%rdx,8)
duke@435 380 movq (%rax,%rdx,8),%rsi
duke@435 381 movq %rsi,(%rcx,%rdx,8)
duke@435 382 3: addq $4,%rdx
duke@435 383 jle 2b
duke@435 384 subq $4,%rdx
duke@435 385 jl 1b
duke@435 386 ret
duke@435 387 4: movq -8(%rdi,%rdx,8),%rcx
duke@435 388 movq %rcx,-8(%rsi,%rdx,8)
duke@435 389 subq $1,%rdx
duke@435 390 jnz 4b
duke@435 391 ret
duke@435 392 .align 16
duke@435 393 5: movq 24(%rdi,%rdx,8),%rcx
duke@435 394 movq %rcx,24(%rsi,%rdx,8)
duke@435 395 movq 16(%rdi,%rdx,8),%rcx
duke@435 396 movq %rcx,16(%rsi,%rdx,8)
duke@435 397 movq 8(%rdi,%rdx,8),%rcx
duke@435 398 movq %rcx,8(%rsi,%rdx,8)
duke@435 399 movq (%rdi,%rdx,8),%rcx
duke@435 400 movq %rcx,(%rsi,%rdx,8)
duke@435 401 acl_CopyLeft:
duke@435 402 subq $4,%rdx
duke@435 403 jge 5b
duke@435 404 addq $4,%rdx
duke@435 405 jg 4b
duke@435 406 ret

mercurial