src/share/vm/utilities/copy.cpp

Thu, 20 Nov 2008 16:56:09 -0800

author
ysr
date
Thu, 20 Nov 2008 16:56:09 -0800
changeset 888
c96030fff130
parent 435
a61af66fc99e
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa

duke@435 1 /*
duke@435 2 * Copyright 2006-2007 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_copy.cpp.incl"
duke@435 27
duke@435 28
duke@435 29 // Copy bytes; larger units are filled atomically if everything is aligned.
duke@435 30 void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) {
duke@435 31 address src = (address) from;
duke@435 32 address dst = (address) to;
duke@435 33 uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size;
duke@435 34
duke@435 35 // (Note: We could improve performance by ignoring the low bits of size,
duke@435 36 // and putting a short cleanup loop after each bulk copy loop.
duke@435 37 // There are plenty of other ways to make this faster also,
duke@435 38 // and it's a slippery slope. For now, let's keep this code simple
duke@435 39 // since the simplicity helps clarify the atomicity semantics of
duke@435 40 // this operation. There are also CPU-specific assembly versions
duke@435 41 // which may or may not want to include such optimizations.)
duke@435 42
duke@435 43 if (bits % sizeof(jlong) == 0) {
duke@435 44 Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong));
duke@435 45 } else if (bits % sizeof(jint) == 0) {
duke@435 46 Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint));
duke@435 47 } else if (bits % sizeof(jshort) == 0) {
duke@435 48 Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort));
duke@435 49 } else {
duke@435 50 // Not aligned, so no need to be atomic.
duke@435 51 Copy::conjoint_bytes((void*) src, (void*) dst, size);
duke@435 52 }
duke@435 53 }
duke@435 54
duke@435 55
duke@435 56 // Fill bytes; larger units are filled atomically if everything is aligned.
duke@435 57 void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
duke@435 58 address dst = (address) to;
duke@435 59 uintptr_t bits = (uintptr_t) to | (uintptr_t) size;
duke@435 60 if (bits % sizeof(jlong) == 0) {
duke@435 61 jlong fill = (julong)( (jubyte)value ); // zero-extend
duke@435 62 if (fill != 0) {
duke@435 63 fill += fill << 8;
duke@435 64 fill += fill << 16;
duke@435 65 fill += fill << 32;
duke@435 66 }
duke@435 67 //Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong));
duke@435 68 for (uintptr_t off = 0; off < size; off += sizeof(jlong)) {
duke@435 69 *(jlong*)(dst + off) = fill;
duke@435 70 }
duke@435 71 } else if (bits % sizeof(jint) == 0) {
duke@435 72 jint fill = (juint)( (jubyte)value ); // zero-extend
duke@435 73 if (fill != 0) {
duke@435 74 fill += fill << 8;
duke@435 75 fill += fill << 16;
duke@435 76 }
duke@435 77 //Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint));
duke@435 78 for (uintptr_t off = 0; off < size; off += sizeof(jint)) {
duke@435 79 *(jint*)(dst + off) = fill;
duke@435 80 }
duke@435 81 } else if (bits % sizeof(jshort) == 0) {
duke@435 82 jshort fill = (jushort)( (jubyte)value ); // zero-extend
duke@435 83 fill += fill << 8;
duke@435 84 //Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort));
duke@435 85 for (uintptr_t off = 0; off < size; off += sizeof(jshort)) {
duke@435 86 *(jshort*)(dst + off) = fill;
duke@435 87 }
duke@435 88 } else {
duke@435 89 // Not aligned, so no need to be atomic.
duke@435 90 Copy::fill_to_bytes(dst, size, value);
duke@435 91 }
duke@435 92 }

mercurial