src/share/vm/utilities/copy.cpp

Mon, 03 Dec 2018 07:29:54 -0500

author
dbuck
date
Mon, 03 Dec 2018 07:29:54 -0500
changeset 9562
dee6a1ce4a0c
parent 2314
f95d63e2154a
child 9572
624a0741915c
permissions
-rw-r--r--

8141491: Unaligned memory access in Bits.c
Summary: Introduce alignment-safe Copy::conjoint_swap and JVM_CopySwapMemory
Reviewed-by: mikael, dholmes

duke@435 1 /*
dbuck@9562 2 * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "runtime/sharedRuntime.hpp"
stefank@2314 27 #include "utilities/copy.hpp"
duke@435 28
duke@435 29
duke@435 30 // Copy bytes; larger units are filled atomically if everything is aligned.
duke@435 31 void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) {
duke@435 32 address src = (address) from;
duke@435 33 address dst = (address) to;
duke@435 34 uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size;
duke@435 35
duke@435 36 // (Note: We could improve performance by ignoring the low bits of size,
duke@435 37 // and putting a short cleanup loop after each bulk copy loop.
duke@435 38 // There are plenty of other ways to make this faster also,
duke@435 39 // and it's a slippery slope. For now, let's keep this code simple
duke@435 40 // since the simplicity helps clarify the atomicity semantics of
duke@435 41 // this operation. There are also CPU-specific assembly versions
duke@435 42 // which may or may not want to include such optimizations.)
duke@435 43
duke@435 44 if (bits % sizeof(jlong) == 0) {
duke@435 45 Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong));
duke@435 46 } else if (bits % sizeof(jint) == 0) {
duke@435 47 Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint));
duke@435 48 } else if (bits % sizeof(jshort) == 0) {
duke@435 49 Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort));
duke@435 50 } else {
duke@435 51 // Not aligned, so no need to be atomic.
kvn@1958 52 Copy::conjoint_jbytes((void*) src, (void*) dst, size);
duke@435 53 }
duke@435 54 }
duke@435 55
dbuck@9562 56 class CopySwap : AllStatic {
dbuck@9562 57 public:
dbuck@9562 58 /**
dbuck@9562 59 * Copy and byte swap elements
dbuck@9562 60 *
dbuck@9562 61 * @param src address of source
dbuck@9562 62 * @param dst address of destination
dbuck@9562 63 * @param byte_count number of bytes to copy
dbuck@9562 64 * @param elem_size size of the elements to copy-swap
dbuck@9562 65 */
dbuck@9562 66 static void conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
dbuck@9562 67 assert(src != NULL, "address must not be NULL");
dbuck@9562 68 assert(dst != NULL, "address must not be NULL");
dbuck@9562 69 assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
dbuck@9562 70 err_msg("incorrect element size: " SIZE_FORMAT, elem_size));
dbuck@9562 71 assert(is_size_aligned(byte_count, elem_size),
dbuck@9562 72 err_msg("byte_count " SIZE_FORMAT " must be multiple of element size " SIZE_FORMAT, byte_count, elem_size));
dbuck@9562 73
dbuck@9562 74 address src_end = src + byte_count;
dbuck@9562 75
dbuck@9562 76 if (dst <= src || dst >= src_end) {
dbuck@9562 77 do_conjoint_swap<RIGHT>(src, dst, byte_count, elem_size);
dbuck@9562 78 } else {
dbuck@9562 79 do_conjoint_swap<LEFT>(src, dst, byte_count, elem_size);
dbuck@9562 80 }
dbuck@9562 81 }
dbuck@9562 82
dbuck@9562 83 private:
dbuck@9562 84 /**
dbuck@9562 85 * Byte swap a 16-bit value
dbuck@9562 86 */
dbuck@9562 87 static uint16_t byte_swap(uint16_t x) {
dbuck@9562 88 return (x << 8) | (x >> 8);
dbuck@9562 89 }
dbuck@9562 90
dbuck@9562 91 /**
dbuck@9562 92 * Byte swap a 32-bit value
dbuck@9562 93 */
dbuck@9562 94 static uint32_t byte_swap(uint32_t x) {
dbuck@9562 95 uint16_t lo = (uint16_t)x;
dbuck@9562 96 uint16_t hi = (uint16_t)(x >> 16);
dbuck@9562 97
dbuck@9562 98 return ((uint32_t)byte_swap(lo) << 16) | (uint32_t)byte_swap(hi);
dbuck@9562 99 }
dbuck@9562 100
dbuck@9562 101 /**
dbuck@9562 102 * Byte swap a 64-bit value
dbuck@9562 103 */
dbuck@9562 104 static uint64_t byte_swap(uint64_t x) {
dbuck@9562 105 uint32_t lo = (uint32_t)x;
dbuck@9562 106 uint32_t hi = (uint32_t)(x >> 32);
dbuck@9562 107
dbuck@9562 108 return ((uint64_t)byte_swap(lo) << 32) | (uint64_t)byte_swap(hi);
dbuck@9562 109 }
dbuck@9562 110
dbuck@9562 111 enum CopyDirection {
dbuck@9562 112 RIGHT, // lower -> higher address
dbuck@9562 113 LEFT // higher -> lower address
dbuck@9562 114 };
dbuck@9562 115
dbuck@9562 116 /**
dbuck@9562 117 * Copy and byte swap elements
dbuck@9562 118 *
dbuck@9562 119 * <T> - type of element to copy
dbuck@9562 120 * <D> - copy direction
dbuck@9562 121 * <is_src_aligned> - true if src argument is aligned to element size
dbuck@9562 122 * <is_dst_aligned> - true if dst argument is aligned to element size
dbuck@9562 123 *
dbuck@9562 124 * @param src address of source
dbuck@9562 125 * @param dst address of destination
dbuck@9562 126 * @param byte_count number of bytes to copy
dbuck@9562 127 */
dbuck@9562 128 template <typename T, CopyDirection D, bool is_src_aligned, bool is_dst_aligned>
dbuck@9562 129 static void do_conjoint_swap(address src, address dst, size_t byte_count) {
dbuck@9562 130 address cur_src, cur_dst;
dbuck@9562 131
dbuck@9562 132 switch (D) {
dbuck@9562 133 case RIGHT:
dbuck@9562 134 cur_src = src;
dbuck@9562 135 cur_dst = dst;
dbuck@9562 136 break;
dbuck@9562 137 case LEFT:
dbuck@9562 138 cur_src = src + byte_count - sizeof(T);
dbuck@9562 139 cur_dst = dst + byte_count - sizeof(T);
dbuck@9562 140 break;
dbuck@9562 141 }
dbuck@9562 142
dbuck@9562 143 for (size_t i = 0; i < byte_count / sizeof(T); i++) {
dbuck@9562 144 T tmp;
dbuck@9562 145
dbuck@9562 146 if (is_src_aligned) {
dbuck@9562 147 tmp = *(T*)cur_src;
dbuck@9562 148 } else {
dbuck@9562 149 memcpy(&tmp, cur_src, sizeof(T));
dbuck@9562 150 }
dbuck@9562 151
dbuck@9562 152 tmp = byte_swap(tmp);
dbuck@9562 153
dbuck@9562 154 if (is_dst_aligned) {
dbuck@9562 155 *(T*)cur_dst = tmp;
dbuck@9562 156 } else {
dbuck@9562 157 memcpy(cur_dst, &tmp, sizeof(T));
dbuck@9562 158 }
dbuck@9562 159
dbuck@9562 160 switch (D) {
dbuck@9562 161 case RIGHT:
dbuck@9562 162 cur_src += sizeof(T);
dbuck@9562 163 cur_dst += sizeof(T);
dbuck@9562 164 break;
dbuck@9562 165 case LEFT:
dbuck@9562 166 cur_src -= sizeof(T);
dbuck@9562 167 cur_dst -= sizeof(T);
dbuck@9562 168 break;
dbuck@9562 169 }
dbuck@9562 170 }
dbuck@9562 171 }
dbuck@9562 172
dbuck@9562 173 /**
dbuck@9562 174 * Copy and byte swap elements
dbuck@9562 175 *
dbuck@9562 176 * <T> - type of element to copy
dbuck@9562 177 * <D> - copy direction
dbuck@9562 178 *
dbuck@9562 179 * @param src address of source
dbuck@9562 180 * @param dst address of destination
dbuck@9562 181 * @param byte_count number of bytes to copy
dbuck@9562 182 */
dbuck@9562 183 template <typename T, CopyDirection direction>
dbuck@9562 184 static void do_conjoint_swap(address src, address dst, size_t byte_count) {
dbuck@9562 185 if (is_ptr_aligned(src, sizeof(T))) {
dbuck@9562 186 if (is_ptr_aligned(dst, sizeof(T))) {
dbuck@9562 187 do_conjoint_swap<T,direction,true,true>(src, dst, byte_count);
dbuck@9562 188 } else {
dbuck@9562 189 do_conjoint_swap<T,direction,true,false>(src, dst, byte_count);
dbuck@9562 190 }
dbuck@9562 191 } else {
dbuck@9562 192 if (is_ptr_aligned(dst, sizeof(T))) {
dbuck@9562 193 do_conjoint_swap<T,direction,false,true>(src, dst, byte_count);
dbuck@9562 194 } else {
dbuck@9562 195 do_conjoint_swap<T,direction,false,false>(src, dst, byte_count);
dbuck@9562 196 }
dbuck@9562 197 }
dbuck@9562 198 }
dbuck@9562 199
dbuck@9562 200
dbuck@9562 201 /**
dbuck@9562 202 * Copy and byte swap elements
dbuck@9562 203 *
dbuck@9562 204 * <D> - copy direction
dbuck@9562 205 *
dbuck@9562 206 * @param src address of source
dbuck@9562 207 * @param dst address of destination
dbuck@9562 208 * @param byte_count number of bytes to copy
dbuck@9562 209 * @param elem_size size of the elements to copy-swap
dbuck@9562 210 */
dbuck@9562 211 template <CopyDirection D>
dbuck@9562 212 static void do_conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
dbuck@9562 213 switch (elem_size) {
dbuck@9562 214 case 2: do_conjoint_swap<uint16_t,D>(src, dst, byte_count); break;
dbuck@9562 215 case 4: do_conjoint_swap<uint32_t,D>(src, dst, byte_count); break;
dbuck@9562 216 case 8: do_conjoint_swap<uint64_t,D>(src, dst, byte_count); break;
dbuck@9562 217 default: guarantee(false, err_msg("do_conjoint_swap: Invalid elem_size %zd\n", elem_size));
dbuck@9562 218 }
dbuck@9562 219 }
dbuck@9562 220 };
dbuck@9562 221
dbuck@9562 222 void Copy::conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
dbuck@9562 223 CopySwap::conjoint_swap(src, dst, byte_count, elem_size);
dbuck@9562 224 }
duke@435 225
duke@435 226 // Fill bytes; larger units are filled atomically if everything is aligned.
duke@435 227 void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
duke@435 228 address dst = (address) to;
duke@435 229 uintptr_t bits = (uintptr_t) to | (uintptr_t) size;
duke@435 230 if (bits % sizeof(jlong) == 0) {
duke@435 231 jlong fill = (julong)( (jubyte)value ); // zero-extend
duke@435 232 if (fill != 0) {
duke@435 233 fill += fill << 8;
duke@435 234 fill += fill << 16;
duke@435 235 fill += fill << 32;
duke@435 236 }
duke@435 237 //Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong));
duke@435 238 for (uintptr_t off = 0; off < size; off += sizeof(jlong)) {
duke@435 239 *(jlong*)(dst + off) = fill;
duke@435 240 }
duke@435 241 } else if (bits % sizeof(jint) == 0) {
duke@435 242 jint fill = (juint)( (jubyte)value ); // zero-extend
duke@435 243 if (fill != 0) {
duke@435 244 fill += fill << 8;
duke@435 245 fill += fill << 16;
duke@435 246 }
duke@435 247 //Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint));
duke@435 248 for (uintptr_t off = 0; off < size; off += sizeof(jint)) {
duke@435 249 *(jint*)(dst + off) = fill;
duke@435 250 }
duke@435 251 } else if (bits % sizeof(jshort) == 0) {
duke@435 252 jshort fill = (jushort)( (jubyte)value ); // zero-extend
duke@435 253 fill += fill << 8;
duke@435 254 //Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort));
duke@435 255 for (uintptr_t off = 0; off < size; off += sizeof(jshort)) {
duke@435 256 *(jshort*)(dst + off) = fill;
duke@435 257 }
duke@435 258 } else {
duke@435 259 // Not aligned, so no need to be atomic.
duke@435 260 Copy::fill_to_bytes(dst, size, value);
duke@435 261 }
duke@435 262 }

mercurial