src/share/vm/utilities/copy.cpp

Wed, 03 Jul 2019 20:42:37 +0800

author
aoqi
date
Wed, 03 Jul 2019 20:42:37 +0800
changeset 9637
eef07cd490d4
parent 9572
624a0741915c
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "runtime/sharedRuntime.hpp"
    27 #include "utilities/copy.hpp"
    30 // Copy bytes; larger units are filled atomically if everything is aligned.
    31 void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) {
    32   address src = (address) from;
    33   address dst = (address) to;
    34   uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size;
    36   // (Note:  We could improve performance by ignoring the low bits of size,
    37   // and putting a short cleanup loop after each bulk copy loop.
    38   // There are plenty of other ways to make this faster also,
    39   // and it's a slippery slope.  For now, let's keep this code simple
    40   // since the simplicity helps clarify the atomicity semantics of
    41   // this operation.  There are also CPU-specific assembly versions
    42   // which may or may not want to include such optimizations.)
    44   if (bits % sizeof(jlong) == 0) {
    45     Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong));
    46   } else if (bits % sizeof(jint) == 0) {
    47     Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint));
    48   } else if (bits % sizeof(jshort) == 0) {
    49     Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort));
    50   } else {
    51     // Not aligned, so no need to be atomic.
    52     Copy::conjoint_jbytes((void*) src, (void*) dst, size);
    53   }
    54 }
    56 class CopySwap : AllStatic {
    57 public:
    58   /**
    59    * Copy and byte swap elements
    60    *
    61    * @param src address of source
    62    * @param dst address of destination
    63    * @param byte_count number of bytes to copy
    64    * @param elem_size size of the elements to copy-swap
    65    */
    66   static void conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
    67     assert(src != NULL, "address must not be NULL");
    68     assert(dst != NULL, "address must not be NULL");
    69     assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
    70            err_msg("incorrect element size: " SIZE_FORMAT, elem_size));
    71     assert(is_size_aligned(byte_count, elem_size),
    72            err_msg("byte_count " SIZE_FORMAT " must be multiple of element size " SIZE_FORMAT, byte_count, elem_size));
    74     address src_end = src + byte_count;
    76     if (dst <= src || dst >= src_end) {
    77       do_conjoint_swap<RIGHT>(src, dst, byte_count, elem_size);
    78     } else {
    79       do_conjoint_swap<LEFT>(src, dst, byte_count, elem_size);
    80     }
    81   }
    83 private:
    84   /**
    85    * Byte swap a 16-bit value
    86    */
    87   static uint16_t byte_swap(uint16_t x) {
    88     return (x << 8) | (x >> 8);
    89   }
    91   /**
    92    * Byte swap a 32-bit value
    93    */
    94   static uint32_t byte_swap(uint32_t x) {
    95     uint16_t lo = (uint16_t)x;
    96     uint16_t hi = (uint16_t)(x >> 16);
    98     return ((uint32_t)byte_swap(lo) << 16) | (uint32_t)byte_swap(hi);
    99   }
   101   /**
   102    * Byte swap a 64-bit value
   103    */
   104   static uint64_t byte_swap(uint64_t x) {
   105     uint32_t lo = (uint32_t)x;
   106     uint32_t hi = (uint32_t)(x >> 32);
   108     return ((uint64_t)byte_swap(lo) << 32) | (uint64_t)byte_swap(hi);
   109   }
   111   enum CopyDirection {
   112     RIGHT, // lower -> higher address
   113     LEFT   // higher -> lower address
   114   };
   116   /**
   117    * Copy and byte swap elements
   118    *
   119    * <T> - type of element to copy
   120    * <D> - copy direction
   121    * <is_src_aligned> - true if src argument is aligned to element size
   122    * <is_dst_aligned> - true if dst argument is aligned to element size
   123    *
   124    * @param src address of source
   125    * @param dst address of destination
   126    * @param byte_count number of bytes to copy
   127    */
   128   template <typename T, CopyDirection D, bool is_src_aligned, bool is_dst_aligned>
   129   static void do_conjoint_swap(address src, address dst, size_t byte_count) {
   130     address cur_src, cur_dst;
   132     switch (D) {
   133     case RIGHT:
   134       cur_src = src;
   135       cur_dst = dst;
   136       break;
   137     case LEFT:
   138       cur_src = src + byte_count - sizeof(T);
   139       cur_dst = dst + byte_count - sizeof(T);
   140       break;
   141     }
   143     for (size_t i = 0; i < byte_count / sizeof(T); i++) {
   144       T tmp;
   146       if (is_src_aligned) {
   147         tmp = *(T*)cur_src;
   148       } else {
   149         memcpy(&tmp, cur_src, sizeof(T));
   150       }
   152       tmp = byte_swap(tmp);
   154       if (is_dst_aligned) {
   155         *(T*)cur_dst = tmp;
   156       } else {
   157         memcpy(cur_dst, &tmp, sizeof(T));
   158       }
   160       switch (D) {
   161       case RIGHT:
   162         cur_src += sizeof(T);
   163         cur_dst += sizeof(T);
   164         break;
   165       case LEFT:
   166         cur_src -= sizeof(T);
   167         cur_dst -= sizeof(T);
   168         break;
   169       }
   170     }
   171   }
   173   /**
   174    * Copy and byte swap elements
   175    *
   176    * <T> - type of element to copy
   177    * <D> - copy direction
   178    *
   179    * @param src address of source
   180    * @param dst address of destination
   181    * @param byte_count number of bytes to copy
   182    */
   183   template <typename T, CopyDirection direction>
   184   static void do_conjoint_swap(address src, address dst, size_t byte_count) {
   185     if (is_ptr_aligned(src, sizeof(T))) {
   186       if (is_ptr_aligned(dst, sizeof(T))) {
   187         do_conjoint_swap<T,direction,true,true>(src, dst, byte_count);
   188       } else {
   189         do_conjoint_swap<T,direction,true,false>(src, dst, byte_count);
   190       }
   191     } else {
   192       if (is_ptr_aligned(dst, sizeof(T))) {
   193         do_conjoint_swap<T,direction,false,true>(src, dst, byte_count);
   194       } else {
   195         do_conjoint_swap<T,direction,false,false>(src, dst, byte_count);
   196       }
   197     }
   198   }
   201   /**
   202    * Copy and byte swap elements
   203    *
   204    * <D> - copy direction
   205    *
   206    * @param src address of source
   207    * @param dst address of destination
   208    * @param byte_count number of bytes to copy
   209    * @param elem_size size of the elements to copy-swap
   210    */
   211   template <CopyDirection D>
   212   static void do_conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
   213     switch (elem_size) {
   214     case 2: do_conjoint_swap<uint16_t,D>(src, dst, byte_count); break;
   215     case 4: do_conjoint_swap<uint32_t,D>(src, dst, byte_count); break;
   216     case 8: do_conjoint_swap<uint64_t,D>(src, dst, byte_count); break;
   217     default: guarantee(false, err_msg("do_conjoint_swap: Invalid elem_size %zd\n", elem_size));
   218     }
   219   }
   220 };
   222 void Copy::conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
   223   CopySwap::conjoint_swap(src, dst, byte_count, elem_size);
   224 }
   226 // Fill bytes; larger units are filled atomically if everything is aligned.
   227 void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
   228   address dst = (address) to;
   229   uintptr_t bits = (uintptr_t) to | (uintptr_t) size;
   230   if (bits % sizeof(jlong) == 0) {
   231     jlong fill = (julong)( (jubyte)value ); // zero-extend
   232     if (fill != 0) {
   233       fill += fill << 8;
   234       fill += fill << 16;
   235       fill += fill << 32;
   236     }
   237     //Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong));
   238     for (uintptr_t off = 0; off < size; off += sizeof(jlong)) {
   239       *(jlong*)(dst + off) = fill;
   240     }
   241   } else if (bits % sizeof(jint) == 0) {
   242     jint fill = (juint)( (jubyte)value ); // zero-extend
   243     if (fill != 0) {
   244       fill += fill << 8;
   245       fill += fill << 16;
   246     }
   247     //Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint));
   248     for (uintptr_t off = 0; off < size; off += sizeof(jint)) {
   249       *(jint*)(dst + off) = fill;
   250     }
   251   } else if (bits % sizeof(jshort) == 0) {
   252     jshort fill = (jushort)( (jubyte)value ); // zero-extend
   253     fill += fill << 8;
   254     //Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort));
   255     for (uintptr_t off = 0; off < size; off += sizeof(jshort)) {
   256       *(jshort*)(dst + off) = fill;
   257     }
   258   } else {
   259     // Not aligned, so no need to be atomic.
   260     Copy::fill_to_bytes(dst, size, value);
   261   }
   262 }

mercurial