src/os_cpu/linux_x86/vm/bytes_linux_x86.inline.hpp

Wed, 25 Sep 2013 13:58:13 +0200

author
dsimms
date
Wed, 25 Sep 2013 13:58:13 +0200
changeset 5781
899ecf76b570
parent 2314
f95d63e2154a
child 6876
710a3c8b516e
permissions
-rw-r--r--

8023956: Provide a work-around to broken Linux 32 bit "Exec Shield" using CS for NX emulation (crashing with SI_KERNEL)
Summary: Execute some code at a high virtual address value, and keep mapped
Reviewed-by: coleenp, zgu

     1 /*
     2  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef OS_CPU_LINUX_X86_VM_BYTES_LINUX_X86_INLINE_HPP
    26 #define OS_CPU_LINUX_X86_VM_BYTES_LINUX_X86_INLINE_HPP
    28 #include <byteswap.h>
    30 // Efficient swapping of data bytes from Java byte
    31 // ordering to native byte ordering and vice versa.
    32 inline u2   Bytes::swap_u2(u2 x) {
    33 #ifdef AMD64
    34   return bswap_16(x);
    35 #else
    36   u2 ret;
    37   __asm__ __volatile__ (
    38     "movw %0, %%ax;"
    39     "xchg %%al, %%ah;"
    40     "movw %%ax, %0"
    41     :"=r" (ret)      // output : register 0 => ret
    42     :"0"  (x)        // input  : x => register 0
    43     :"ax", "0"       // clobbered registers
    44   );
    45   return ret;
    46 #endif // AMD64
    47 }
    49 inline u4   Bytes::swap_u4(u4 x) {
    50 #ifdef AMD64
    51   return bswap_32(x);
    52 #else
    53   u4 ret;
    54   __asm__ __volatile__ (
    55     "bswap %0"
    56     :"=r" (ret)      // output : register 0 => ret
    57     :"0"  (x)        // input  : x => register 0
    58     :"0"             // clobbered register
    59   );
    60   return ret;
    61 #endif // AMD64
    62 }
    64 #ifdef AMD64
    65 inline u8 Bytes::swap_u8(u8 x) {
    66 #ifdef SPARC_WORKS
    67   // workaround for SunStudio12 CR6615391
    68   __asm__ __volatile__ (
    69     "bswapq %0"
    70     :"=r" (x)        // output : register 0 => x
    71     :"0"  (x)        // input  : x => register 0
    72     :"0"             // clobbered register
    73   );
    74   return x;
    75 #else
    76   return bswap_64(x);
    77 #endif
    78 }
    79 #else
    80 // Helper function for swap_u8
    81 inline u8   Bytes::swap_u8_base(u4 x, u4 y) {
    82   return (((u8)swap_u4(x))<<32) | swap_u4(y);
    83 }
    85 inline u8 Bytes::swap_u8(u8 x) {
    86   return swap_u8_base(*(u4*)&x, *(((u4*)&x)+1));
    87 }
    88 #endif // !AMD64
    90 #endif // OS_CPU_LINUX_X86_VM_BYTES_LINUX_X86_INLINE_HPP

mercurial