duke@435: /* duke@435: * Copyright 2006-2007 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: # include "incls/_precompiled.incl" duke@435: # include "incls/_copy.cpp.incl" duke@435: duke@435: duke@435: // Copy bytes; larger units are filled atomically if everything is aligned. duke@435: void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) { duke@435: address src = (address) from; duke@435: address dst = (address) to; duke@435: uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size; duke@435: duke@435: // (Note: We could improve performance by ignoring the low bits of size, duke@435: // and putting a short cleanup loop after each bulk copy loop. duke@435: // There are plenty of other ways to make this faster also, duke@435: // and it's a slippery slope. For now, let's keep this code simple duke@435: // since the simplicity helps clarify the atomicity semantics of duke@435: // this operation. There are also CPU-specific assembly versions duke@435: // which may or may not want to include such optimizations.) duke@435: duke@435: if (bits % sizeof(jlong) == 0) { duke@435: Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong)); duke@435: } else if (bits % sizeof(jint) == 0) { duke@435: Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint)); duke@435: } else if (bits % sizeof(jshort) == 0) { duke@435: Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort)); duke@435: } else { duke@435: // Not aligned, so no need to be atomic. duke@435: Copy::conjoint_bytes((void*) src, (void*) dst, size); duke@435: } duke@435: } duke@435: duke@435: duke@435: // Fill bytes; larger units are filled atomically if everything is aligned. duke@435: void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) { duke@435: address dst = (address) to; duke@435: uintptr_t bits = (uintptr_t) to | (uintptr_t) size; duke@435: if (bits % sizeof(jlong) == 0) { duke@435: jlong fill = (julong)( (jubyte)value ); // zero-extend duke@435: if (fill != 0) { duke@435: fill += fill << 8; duke@435: fill += fill << 16; duke@435: fill += fill << 32; duke@435: } duke@435: //Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong)); duke@435: for (uintptr_t off = 0; off < size; off += sizeof(jlong)) { duke@435: *(jlong*)(dst + off) = fill; duke@435: } duke@435: } else if (bits % sizeof(jint) == 0) { duke@435: jint fill = (juint)( (jubyte)value ); // zero-extend duke@435: if (fill != 0) { duke@435: fill += fill << 8; duke@435: fill += fill << 16; duke@435: } duke@435: //Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint)); duke@435: for (uintptr_t off = 0; off < size; off += sizeof(jint)) { duke@435: *(jint*)(dst + off) = fill; duke@435: } duke@435: } else if (bits % sizeof(jshort) == 0) { duke@435: jshort fill = (jushort)( (jubyte)value ); // zero-extend duke@435: fill += fill << 8; duke@435: //Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort)); duke@435: for (uintptr_t off = 0; off < size; off += sizeof(jshort)) { duke@435: *(jshort*)(dst + off) = fill; duke@435: } duke@435: } else { duke@435: // Not aligned, so no need to be atomic. duke@435: Copy::fill_to_bytes(dst, size, value); duke@435: } duke@435: }