src/share/vm/utilities/copy.cpp

Thu, 11 Feb 2010 15:52:19 -0800

author
iveresov
date
Thu, 11 Feb 2010 15:52:19 -0800
changeset 1696
0414c1049f15
parent 435
a61af66fc99e
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6923991: G1: improve scalability of RSet scanning
Summary: Implemented block-based work stealing. Moved copying during the rset scanning phase to the main copying phase. Made the size of rset table depend on the region size.
Reviewed-by: apetrusenko, tonyp

     1 /*
     2  * Copyright 2006-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_copy.cpp.incl"
    29 // Copy bytes; larger units are filled atomically if everything is aligned.
    30 void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) {
    31   address src = (address) from;
    32   address dst = (address) to;
    33   uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size;
    35   // (Note:  We could improve performance by ignoring the low bits of size,
    36   // and putting a short cleanup loop after each bulk copy loop.
    37   // There are plenty of other ways to make this faster also,
    38   // and it's a slippery slope.  For now, let's keep this code simple
    39   // since the simplicity helps clarify the atomicity semantics of
    40   // this operation.  There are also CPU-specific assembly versions
    41   // which may or may not want to include such optimizations.)
    43   if (bits % sizeof(jlong) == 0) {
    44     Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong));
    45   } else if (bits % sizeof(jint) == 0) {
    46     Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint));
    47   } else if (bits % sizeof(jshort) == 0) {
    48     Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort));
    49   } else {
    50     // Not aligned, so no need to be atomic.
    51     Copy::conjoint_bytes((void*) src, (void*) dst, size);
    52   }
    53 }
    56 // Fill bytes; larger units are filled atomically if everything is aligned.
    57 void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
    58   address dst = (address) to;
    59   uintptr_t bits = (uintptr_t) to | (uintptr_t) size;
    60   if (bits % sizeof(jlong) == 0) {
    61     jlong fill = (julong)( (jubyte)value ); // zero-extend
    62     if (fill != 0) {
    63       fill += fill << 8;
    64       fill += fill << 16;
    65       fill += fill << 32;
    66     }
    67     //Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong));
    68     for (uintptr_t off = 0; off < size; off += sizeof(jlong)) {
    69       *(jlong*)(dst + off) = fill;
    70     }
    71   } else if (bits % sizeof(jint) == 0) {
    72     jint fill = (juint)( (jubyte)value ); // zero-extend
    73     if (fill != 0) {
    74       fill += fill << 8;
    75       fill += fill << 16;
    76     }
    77     //Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint));
    78     for (uintptr_t off = 0; off < size; off += sizeof(jint)) {
    79       *(jint*)(dst + off) = fill;
    80     }
    81   } else if (bits % sizeof(jshort) == 0) {
    82     jshort fill = (jushort)( (jubyte)value ); // zero-extend
    83     fill += fill << 8;
    84     //Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort));
    85     for (uintptr_t off = 0; off < size; off += sizeof(jshort)) {
    86       *(jshort*)(dst + off) = fill;
    87     }
    88   } else {
    89     // Not aligned, so no need to be atomic.
    90     Copy::fill_to_bytes(dst, size, value);
    91   }
    92 }

mercurial