Merge mips64el-jdk8u232-b10

Sat, 09 Nov 2019 20:29:45 +0800

author
aoqi
date
Sat, 09 Nov 2019 20:29:45 +0800
changeset 9756
2be326848943
parent 9707
b87dc103bf45
parent 9755
12177d88b89c
child 9757
a9a6cc2af3e8

Merge

.hgtags file | annotate | diff | comparison | revisions
THIRD_PARTY_README file | annotate | diff | comparison | revisions
agent/src/os/linux/ps_core.c file | annotate | diff | comparison | revisions
agent/src/os/linux/ps_proc.c file | annotate | diff | comparison | revisions
make/bsd/makefiles/jsig.make file | annotate | diff | comparison | revisions
make/linux/makefiles/jsig.make file | annotate | diff | comparison | revisions
make/linux/makefiles/zeroshark.make file | annotate | diff | comparison | revisions
make/solaris/makefiles/jsig.make file | annotate | diff | comparison | revisions
src/cpu/ppc/vm/assembler_ppc.hpp file | annotate | diff | comparison | revisions
src/cpu/ppc/vm/assembler_ppc.inline.hpp file | annotate | diff | comparison | revisions
src/cpu/ppc/vm/macroAssembler_ppc.hpp file | annotate | diff | comparison | revisions
src/cpu/ppc/vm/stubGenerator_ppc.cpp file | annotate | diff | comparison | revisions
src/cpu/ppc/vm/vm_version_ppc.cpp file | annotate | diff | comparison | revisions
src/cpu/ppc/vm/vm_version_ppc.hpp file | annotate | diff | comparison | revisions
src/cpu/x86/vm/stubRoutines_x86_64.hpp file | annotate | diff | comparison | revisions
src/cpu/x86/vm/x86_64.ad file | annotate | diff | comparison | revisions
src/os/aix/vm/os_aix.cpp file | annotate | diff | comparison | revisions
src/os/bsd/vm/os_bsd.cpp file | annotate | diff | comparison | revisions
src/os/bsd/vm/os_bsd.inline.hpp file | annotate | diff | comparison | revisions
src/os/bsd/vm/perfMemory_bsd.cpp file | annotate | diff | comparison | revisions
src/os/linux/vm/os_linux.cpp file | annotate | diff | comparison | revisions
src/os/linux/vm/os_linux.inline.hpp file | annotate | diff | comparison | revisions
src/os/linux/vm/perfMemory_linux.cpp file | annotate | diff | comparison | revisions
src/os/posix/vm/os_posix.cpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/os_solaris.cpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/os_solaris.inline.hpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/perfMemory_solaris.cpp file | annotate | diff | comparison | revisions
src/os/windows/vm/os_windows.cpp file | annotate | diff | comparison | revisions
src/os/windows/vm/os_windows.inline.hpp file | annotate | diff | comparison | revisions
src/os/windows/vm/perfMemory_windows.cpp file | annotate | diff | comparison | revisions
src/share/vm/c1/c1_Optimizer.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/bcEscapeAnalyzer.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/bcEscapeAnalyzer.hpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciStreams.cpp file | annotate | diff | comparison | revisions
src/share/vm/ci/ciStreams.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/oops/instanceKlass.cpp file | annotate | diff | comparison | revisions
src/share/vm/oops/instanceKlass.hpp file | annotate | diff | comparison | revisions
src/share/vm/oops/klass.hpp file | annotate | diff | comparison | revisions
src/share/vm/oops/klassVtable.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/connode.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/graphKit.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/graphKit.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/ifnode.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/library_call.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/loopPredicate.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/loopTransform.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/loopnode.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/matcher.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/memnode.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/reg_split.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/runtime.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/superword.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiEnvBase.hpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiRedefineClasses.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiRedefineClasses.hpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiTagMap.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/java.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/mutexLocker.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/mutexLocker.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/os.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/stubRoutines.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/classLoadingService.cpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/taskqueue.hpp file | annotate | diff | comparison | revisions
test/Makefile file | annotate | diff | comparison | revisions
test/TEST.groups file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Sat Nov 09 20:15:27 2019 +0800
     1.2 +++ b/.hgtags	Sat Nov 09 20:29:45 2019 +0800
     1.3 @@ -1289,6 +1289,17 @@
     1.4  17778f8991c83d794897f05210dce2d2a7b4eb2d jdk8u222-b06
     1.5  75f4e02f1113bc353fc60df7330dd5456efc49a3 jdk8u222-b07
     1.6  36a743eee6721b423b7c21a3ba28ac8d906a5386 jdk8u222-b08
     1.7 +1ec20e8a3d8a7a29e9113b14567abec9f0240e9d jdk8u232-b00
     1.8  55f693ba975d445d83a59cc32367ec4c2452b0c5 jdk8u222-b09
     1.9 +adfdce09acc32a691145a67792d47ab637159776 jdk8u222-b10
    1.10 +adfdce09acc32a691145a67792d47ab637159776 jdk8u222-ga
    1.11  2fdf635bcf2807c701b33724474d6a318fbe6c01 mips64el-jdk8u222-b11
    1.12  8c67a8d091962557c723fe56922fcbcc71ad6778 mips64el-jdk8u222-b12
    1.13 +afa42cf8d060a12fe2fd24210cac6c46252fcd53 jdk8u232-b01
    1.14 +c963a2881865f6fab5b49a31d22651e8e1b4bf46 jdk8u232-b02
    1.15 +fa7fe6dae563edaae8a8bbe8ac4bd4fa942bde0c jdk8u232-b03
    1.16 +921c5ee7965fdfde75f578ddda24d5cd16f124dc jdk8u232-b04
    1.17 +b13d7942036329f64c77a93cffc25e1b52523a3c jdk8u232-b05
    1.18 +fea2c7f50ce8e6aee1e946eaec7b834193747d82 jdk8u232-b06
    1.19 +c751303497d539aa85c6373aa0fa85580d3f3044 jdk8u232-b07
    1.20 +4170228e11e6313e948e6ddcae9af3eed06b1fbe jdk8u232-b08
     2.1 --- a/THIRD_PARTY_README	Sat Nov 09 20:15:27 2019 +0800
     2.2 +++ b/THIRD_PARTY_README	Sat Nov 09 20:29:45 2019 +0800
     2.3 @@ -1470,60 +1470,90 @@
     2.4  
     2.5  -------------------------------------------------------------------------------
     2.6  
     2.7 -%% This notice is provided with respect to libpng 1.6.35, which may be
     2.8 +%% This notice is provided with respect to libpng 1.6.37, which may be
     2.9  included with JRE 8, JDK 8, and OpenJDK 8.
    2.10  
    2.11  --- begin of LICENSE ---
    2.12  
    2.13 -This copy of the libpng notices is provided for your convenience.  In case of
    2.14 -any discrepancy between this copy and the notices in the file png.h that is
    2.15 -included in the libpng distribution, the latter shall prevail.
    2.16 -
    2.17 -COPYRIGHT NOTICE, DISCLAIMER, and LICENSE:
    2.18 -
    2.19 -If you modify libpng you may insert additional notices immediately following
    2.20 -this sentence.
    2.21 -
    2.22 -This code is released under the libpng license.
    2.23 -
    2.24 -libpng versions 1.0.7, July 1, 2000 through 1.6.35, July 15, 2018 are
    2.25 +COPYRIGHT NOTICE, DISCLAIMER, and LICENSE
    2.26 +=========================================
    2.27 +
    2.28 +PNG Reference Library License version 2
    2.29 +---------------------------------------
    2.30 +
    2.31 + * Copyright (c) 1995-2019 The PNG Reference Library Authors.
    2.32 + * Copyright (c) 2018-2019 Cosmin Truta.
    2.33 + * Copyright (c) 2000-2002, 2004, 2006-2018 Glenn Randers-Pehrson.
    2.34 + * Copyright (c) 1996-1997 Andreas Dilger.
    2.35 + * Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.
    2.36 +
    2.37 +The software is supplied "as is", without warranty of any kind,
    2.38 +express or implied, including, without limitation, the warranties
    2.39 +of merchantability, fitness for a particular purpose, title, and
    2.40 +non-infringement.  In no event shall the Copyright owners, or
    2.41 +anyone distributing the software, be liable for any damages or
    2.42 +other liability, whether in contract, tort or otherwise, arising
    2.43 +from, out of, or in connection with the software, or the use or
    2.44 +other dealings in the software, even if advised of the possibility
    2.45 +of such damage.
    2.46 +
    2.47 +Permission is hereby granted to use, copy, modify, and distribute
    2.48 +this software, or portions hereof, for any purpose, without fee,
    2.49 +subject to the following restrictions:
    2.50 +
    2.51 + 1. The origin of this software must not be misrepresented; you
    2.52 +    must not claim that you wrote the original software.  If you
    2.53 +    use this software in a product, an acknowledgment in the product
    2.54 +    documentation would be appreciated, but is not required.
    2.55 +
    2.56 + 2. Altered source versions must be plainly marked as such, and must
    2.57 +    not be misrepresented as being the original software.
    2.58 +
    2.59 + 3. This Copyright notice may not be removed or altered from any
    2.60 +    source or altered source distribution.
    2.61 +
    2.62 +
    2.63 +PNG Reference Library License version 1 (for libpng 0.5 through 1.6.35)
    2.64 +-----------------------------------------------------------------------
    2.65 +
    2.66 +libpng versions 1.0.7, July 1, 2000, through 1.6.35, July 15, 2018 are
    2.67  Copyright (c) 2000-2002, 2004, 2006-2018 Glenn Randers-Pehrson, are
    2.68  derived from libpng-1.0.6, and are distributed according to the same
    2.69  disclaimer and license as libpng-1.0.6 with the following individuals
    2.70  added to the list of Contributing Authors:
    2.71  
    2.72 -   Simon-Pierre Cadieux
    2.73 -   Eric S. Raymond
    2.74 -   Mans Rullgard
    2.75 -   Cosmin Truta
    2.76 -   Gilles Vollant
    2.77 -   James Yu
    2.78 -   Mandar Sahastrabuddhe
    2.79 -   Google Inc.
    2.80 -   Vadim Barkov
    2.81 +    Simon-Pierre Cadieux
    2.82 +    Eric S. Raymond
    2.83 +    Mans Rullgard
    2.84 +    Cosmin Truta
    2.85 +    Gilles Vollant
    2.86 +    James Yu
    2.87 +    Mandar Sahastrabuddhe
    2.88 +    Google Inc.
    2.89 +    Vadim Barkov
    2.90  
    2.91  and with the following additions to the disclaimer:
    2.92  
    2.93 -   There is no warranty against interference with your enjoyment of the
    2.94 -   library or against infringement.  There is no warranty that our
    2.95 -   efforts or the library will fulfill any of your particular purposes
    2.96 -   or needs.  This library is provided with all faults, and the entire
    2.97 -   risk of satisfactory quality, performance, accuracy, and effort is with
    2.98 -   the user.
    2.99 +    There is no warranty against interference with your enjoyment of
   2.100 +    the library or against infringement.  There is no warranty that our
   2.101 +    efforts or the library will fulfill any of your particular purposes
   2.102 +    or needs.  This library is provided with all faults, and the entire
   2.103 +    risk of satisfactory quality, performance, accuracy, and effort is
   2.104 +    with the user.
   2.105  
   2.106  Some files in the "contrib" directory and some configure-generated
   2.107 -files that are distributed with libpng have other copyright owners and
   2.108 +files that are distributed with libpng have other copyright owners, and
   2.109  are released under other open source licenses.
   2.110  
   2.111  libpng versions 0.97, January 1998, through 1.0.6, March 20, 2000, are
   2.112  Copyright (c) 1998-2000 Glenn Randers-Pehrson, are derived from
   2.113  libpng-0.96, and are distributed according to the same disclaimer and
   2.114 -license as libpng-0.96, with the following individuals added to the list
   2.115 -of Contributing Authors:
   2.116 -
   2.117 -   Tom Lane
   2.118 -   Glenn Randers-Pehrson
   2.119 -   Willem van Schaik
   2.120 +license as libpng-0.96, with the following individuals added to the
   2.121 +list of Contributing Authors:
   2.122 +
   2.123 +    Tom Lane
   2.124 +    Glenn Randers-Pehrson
   2.125 +    Willem van Schaik
   2.126  
   2.127  libpng versions 0.89, June 1996, through 0.96, May 1997, are
   2.128  Copyright (c) 1996-1997 Andreas Dilger, are derived from libpng-0.88,
   2.129 @@ -1531,14 +1561,14 @@
   2.130  libpng-0.88, with the following individuals added to the list of
   2.131  Contributing Authors:
   2.132  
   2.133 -   John Bowler
   2.134 -   Kevin Bracey
   2.135 -   Sam Bushell
   2.136 -   Magnus Holmgren
   2.137 -   Greg Roelofs
   2.138 -   Tom Tanner
   2.139 -
   2.140 -Some files in the "scripts" directory have other copyright owners
   2.141 +    John Bowler
   2.142 +    Kevin Bracey
   2.143 +    Sam Bushell
   2.144 +    Magnus Holmgren
   2.145 +    Greg Roelofs
   2.146 +    Tom Tanner
   2.147 +
   2.148 +Some files in the "scripts" directory have other copyright owners,
   2.149  but are released under this license.
   2.150  
   2.151  libpng versions 0.5, May 1995, through 0.88, January 1996, are
   2.152 @@ -1547,39 +1577,38 @@
   2.153  For the purposes of this copyright and license, "Contributing Authors"
   2.154  is defined as the following set of individuals:
   2.155  
   2.156 -   Andreas Dilger
   2.157 -   Dave Martindale
   2.158 -   Guy Eric Schalnat
   2.159 -   Paul Schmidt
   2.160 -   Tim Wegner
   2.161 -
   2.162 -The PNG Reference Library is supplied "AS IS".  The Contributing Authors
   2.163 -and Group 42, Inc. disclaim all warranties, expressed or implied,
   2.164 -including, without limitation, the warranties of merchantability and of
   2.165 -fitness for any purpose.  The Contributing Authors and Group 42, Inc.
   2.166 -assume no liability for direct, indirect, incidental, special, exemplary,
   2.167 -or consequential damages, which may result from the use of the PNG
   2.168 -Reference Library, even if advised of the possibility of such damage.
   2.169 +    Andreas Dilger
   2.170 +    Dave Martindale
   2.171 +    Guy Eric Schalnat
   2.172 +    Paul Schmidt
   2.173 +    Tim Wegner
   2.174 +
   2.175 +The PNG Reference Library is supplied "AS IS".  The Contributing
   2.176 +Authors and Group 42, Inc. disclaim all warranties, expressed or
   2.177 +implied, including, without limitation, the warranties of
   2.178 +merchantability and of fitness for any purpose.  The Contributing
   2.179 +Authors and Group 42, Inc. assume no liability for direct, indirect,
   2.180 +incidental, special, exemplary, or consequential damages, which may
   2.181 +result from the use of the PNG Reference Library, even if advised of
   2.182 +the possibility of such damage.
   2.183  
   2.184  Permission is hereby granted to use, copy, modify, and distribute this
   2.185  source code, or portions hereof, for any purpose, without fee, subject
   2.186  to the following restrictions:
   2.187  
   2.188 -  1. The origin of this source code must not be misrepresented.
   2.189 -
   2.190 -  2. Altered versions must be plainly marked as such and must not
   2.191 -     be misrepresented as being the original source.
   2.192 -
   2.193 -  3. This Copyright notice may not be removed or altered from any
   2.194 -     source or altered source distribution.
   2.195 -
   2.196 -The Contributing Authors and Group 42, Inc. specifically permit, without
   2.197 -fee, and encourage the use of this source code as a component to
   2.198 -supporting the PNG file format in commercial products.  If you use this
   2.199 -source code in a product, acknowledgment is not required but would be
   2.200 -appreciated.
   2.201 -
   2.202 -END OF COPYRIGHT NOTICE, DISCLAIMER, and LICENSE.
   2.203 + 1. The origin of this source code must not be misrepresented.
   2.204 +
   2.205 + 2. Altered versions must be plainly marked as such and must not
   2.206 +    be misrepresented as being the original source.
   2.207 +
   2.208 + 3. This Copyright notice may not be removed or altered from any
   2.209 +    source or altered source distribution.
   2.210 +
   2.211 +The Contributing Authors and Group 42, Inc. specifically permit,
   2.212 +without fee, and encourage the use of this source code as a component
   2.213 +to supporting the PNG file format in commercial products.  If you use
   2.214 +this source code in a product, acknowledgment is not required but would
   2.215 +be appreciated.
   2.216  
   2.217  TRADEMARK:
   2.218  
   2.219 @@ -2101,13 +2130,13 @@
   2.220  
   2.221  -------------------------------------------------------------------------------
   2.222  
   2.223 -%% This notice is provided with respect to PC/SC Lite for Suse Linux v.1.1.1,
   2.224 +%% This notice is provided with respect to PC/SC Lite v1.8.24,
   2.225  which may be included with JRE 8, JDK 8, and OpenJDK 8 on Linux and Solaris.
   2.226  
   2.227  --- begin of LICENSE ---
   2.228  
   2.229 -Copyright (c) 1999-2004 David Corcoran <corcoran@linuxnet.com>
   2.230 -Copyright (c) 1999-2004 Ludovic Rousseau <ludovic.rousseau (at) free.fr>
   2.231 +Copyright (c) 1999-2003 David Corcoran <corcoran@linuxnet.com>
   2.232 +Copyright (c) 2001-2011 Ludovic Rousseau <ludovic.rousseau@free.fr>
   2.233  All rights reserved.
   2.234  
   2.235  Redistribution and use in source and binary forms, with or without
   2.236 @@ -2119,15 +2148,10 @@
   2.237  2. Redistributions in binary form must reproduce the above copyright
   2.238     notice, this list of conditions and the following disclaimer in the
   2.239     documentation and/or other materials provided with the distribution.
   2.240 -3. All advertising materials mentioning features or use of this software
   2.241 -   must display the following acknowledgement:
   2.242 -     This product includes software developed by: 
   2.243 -      David Corcoran <corcoran@linuxnet.com>
   2.244 -      http://www.linuxnet.com (MUSCLE)
   2.245 -4. The name of the author may not be used to endorse or promote products
   2.246 +3. The name of the author may not be used to endorse or promote products
   2.247     derived from this software without specific prior written permission.
   2.248  
   2.249 -Changes to this license can be made only by the copyright author with 
   2.250 +Changes to this license can be made only by the copyright author with
   2.251  explicit written consent.
   2.252  
   2.253  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     3.1 --- a/agent/src/os/linux/ps_core.c	Sat Nov 09 20:15:27 2019 +0800
     3.2 +++ b/agent/src/os/linux/ps_core.c	Sat Nov 09 20:29:45 2019 +0800
     3.3 @@ -865,8 +865,51 @@
     3.4  #define LD_BASE_OFFSET        offsetof(struct r_debug,  r_ldbase)
     3.5  #define LINK_MAP_ADDR_OFFSET  offsetof(struct link_map, l_addr)
     3.6  #define LINK_MAP_NAME_OFFSET  offsetof(struct link_map, l_name)
     3.7 +#define LINK_MAP_LD_OFFSET    offsetof(struct link_map, l_ld)
     3.8  #define LINK_MAP_NEXT_OFFSET  offsetof(struct link_map, l_next)
     3.9  
    3.10 +// Calculate the load address of shared library
    3.11 +// on prelink-enabled environment.
    3.12 +//
    3.13 +// In case of GDB, it would be calculated by offset of link_map.l_ld
    3.14 +// and the address of .dynamic section.
    3.15 +// See GDB implementation: lm_addr_check @ solib-svr4.c
    3.16 +static uintptr_t calc_prelinked_load_address(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* elf_ehdr, uintptr_t link_map_addr) {
    3.17 +  ELF_PHDR *phbuf;
    3.18 +  uintptr_t lib_ld;
    3.19 +  uintptr_t lib_dyn_addr = 0L;
    3.20 +  uintptr_t load_addr;
    3.21 +  int i;
    3.22 +
    3.23 +  phbuf = read_program_header_table(lib_fd, elf_ehdr);
    3.24 +  if (phbuf == NULL) {
    3.25 +    print_debug("can't read program header of shared object\n");
    3.26 +    return 0L;
    3.27 +  }
    3.28 +
    3.29 +  // Get the address of .dynamic section from shared library.
    3.30 +  for (i = 0; i < elf_ehdr->e_phnum; i++) {
    3.31 +    if (phbuf[i].p_type == PT_DYNAMIC) {
    3.32 +      lib_dyn_addr = phbuf[i].p_vaddr;
    3.33 +      break;
    3.34 +    }
    3.35 +  }
    3.36 +
    3.37 +  free(phbuf);
    3.38 +
    3.39 +  if (ps_pdread(ph, (psaddr_t)link_map_addr + LINK_MAP_LD_OFFSET,
    3.40 +               &lib_ld, sizeof(uintptr_t)) != PS_OK) {
    3.41 +    print_debug("can't read address of dynamic section in shared object\n");
    3.42 +    return 0L;
    3.43 +  }
    3.44 +
    3.45 +  // Return the load address which is calculated by the address of .dynamic
    3.46 +  // and link_map.l_ld .
    3.47 +  load_addr = lib_ld - lib_dyn_addr;
    3.48 +  print_debug("lib_ld = 0x%lx, lib_dyn_addr = 0x%lx -> lib_base_diff = 0x%lx\n", lib_ld, lib_dyn_addr, load_addr);
    3.49 +  return load_addr;
    3.50 +}
    3.51 +
    3.52  // read shared library info from runtime linker's data structures.
    3.53  // This work is done by librtlb_db in Solaris
    3.54  static bool read_shared_lib_info(struct ps_prochandle* ph) {
    3.55 @@ -968,6 +1011,14 @@
    3.56              // continue with other libraries...
    3.57           } else {
    3.58              if (read_elf_header(lib_fd, &elf_ehdr)) {
    3.59 +               if (lib_base_diff == 0x0L) {
    3.60 +                 lib_base_diff = calc_prelinked_load_address(ph, lib_fd, &elf_ehdr, link_map_addr);
    3.61 +                 if (lib_base_diff == 0x0L) {
    3.62 +                   close(lib_fd);
    3.63 +                   return false;
    3.64 +                 }
    3.65 +               }
    3.66 +
    3.67                 lib_base = lib_base_diff + find_base_address(lib_fd, &elf_ehdr);
    3.68                 print_debug("reading library %s @ 0x%lx [ 0x%lx ]\n",
    3.69                             lib_name, lib_base, lib_base_diff);
     4.1 --- a/agent/src/os/linux/ps_proc.c	Sat Nov 09 20:15:27 2019 +0800
     4.2 +++ b/agent/src/os/linux/ps_proc.c	Sat Nov 09 20:29:45 2019 +0800
     4.3 @@ -345,7 +345,7 @@
     4.4  
     4.5  static bool read_lib_info(struct ps_prochandle* ph) {
     4.6    char fname[32];
     4.7 -  char buf[256];
     4.8 +  char buf[PATH_MAX];
     4.9    FILE *fp = NULL;
    4.10  
    4.11    sprintf(fname, "/proc/%d/maps", ph->pid);
    4.12 @@ -355,10 +355,41 @@
    4.13      return false;
    4.14    }
    4.15  
    4.16 -  while(fgets_no_cr(buf, 256, fp)){
    4.17 -    char * word[6];
    4.18 -    int nwords = split_n_str(buf, 6, word, ' ', '\0');
    4.19 -    if (nwords > 5 && find_lib(ph, word[5]) == false) {
    4.20 +  while(fgets_no_cr(buf, PATH_MAX, fp)){
    4.21 +    char * word[7];
    4.22 +    int nwords = split_n_str(buf, 7, word, ' ', '\0');
    4.23 +
    4.24 +    if (nwords < 6) {
    4.25 +      // not a shared library entry. ignore.
    4.26 +      continue;
    4.27 +    }
    4.28 +
    4.29 +    // SA does not handle the lines with patterns:
    4.30 +    //   "[stack]", "[heap]", "[vdso]", "[vsyscall]", etc.
    4.31 +    if (word[5][0] == '[') {
    4.32 +        // not a shared library entry. ignore.
    4.33 +        continue;
    4.34 +    }
    4.35 +
    4.36 +    if (nwords > 6) {
    4.37 +      // prelink altered mapfile when the program is running.
    4.38 +      // Entries like one below have to be skipped
    4.39 +      //  /lib64/libc-2.15.so (deleted)
    4.40 +      // SO name in entries like one below have to be stripped.
    4.41 +      //  /lib64/libpthread-2.15.so.#prelink#.EECVts
    4.42 +      char *s = strstr(word[5],".#prelink#");
    4.43 +      if (s == NULL) {
    4.44 +        // No prelink keyword. skip deleted library
    4.45 +        print_debug("skip shared object %s deleted by prelink\n", word[5]);
    4.46 +        continue;
    4.47 +      }
    4.48 +
    4.49 +      // Fall through
    4.50 +      print_debug("rectifying shared object name %s changed by prelink\n", word[5]);
    4.51 +      *s = 0;
    4.52 +    }
    4.53 +
    4.54 +    if (find_lib(ph, word[5]) == false) {
    4.55         intptr_t base;
    4.56         lib_info* lib;
    4.57  #ifdef _LP64
     5.1 --- a/make/aix/makefiles/jsig.make	Sat Nov 09 20:15:27 2019 +0800
     5.2 +++ b/make/aix/makefiles/jsig.make	Sat Nov 09 20:29:45 2019 +0800
     5.3 @@ -54,10 +54,15 @@
     5.4    JSIG_DEBUG_CFLAGS = -g
     5.5  endif
     5.6  
     5.7 +# Optimize jsig lib at level -O3 unless it's a slowdebug build
     5.8 +ifneq ($(DEBUG_LEVEL), slowdebug)
     5.9 +  JSIG_OPT_FLAGS = $(OPT_CFLAGS)
    5.10 +endif
    5.11 +
    5.12  $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
    5.13  	@echo Making signal interposition lib...
    5.14  	$(QUIETLY) $(CXX) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
    5.15 -                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
    5.16 +                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(JSIG_OPT_FLAGS) -o $@ $< -ldl
    5.17  
    5.18  #ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
    5.19  #	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
     6.1 --- a/make/bsd/makefiles/jsig.make	Sat Nov 09 20:15:27 2019 +0800
     6.2 +++ b/make/bsd/makefiles/jsig.make	Sat Nov 09 20:29:45 2019 +0800
     6.3 @@ -59,10 +59,15 @@
     6.4    JSIG_DEBUG_CFLAGS = -g
     6.5  endif
     6.6  
     6.7 +# Optimize jsig lib at level -O3 unless it's a slowdebug build
     6.8 +ifneq ($(DEBUG_LEVEL), slowdebug)
     6.9 +  JSIG_OPT_FLAGS = $(OPT_CFLAGS)
    6.10 +endif
    6.11 +
    6.12  $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
    6.13  	@echo Making signal interposition lib...
    6.14  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
    6.15 -                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(EXTRA_CFLAGS) -o $@ $<
    6.16 +                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(JSIG_OPT_FLAGS) $(EXTRA_CFLAGS) -o $@ $<
    6.17  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
    6.18    ifeq ($(OS_VENDOR), Darwin)
    6.19  	$(DSYMUTIL) $@
     7.1 --- a/make/linux/makefiles/jsig.make	Sat Nov 09 20:15:27 2019 +0800
     7.2 +++ b/make/linux/makefiles/jsig.make	Sat Nov 09 20:29:45 2019 +0800
     7.3 @@ -51,10 +51,15 @@
     7.4    JSIG_DEBUG_CFLAGS = -g
     7.5  endif
     7.6  
     7.7 +# Optimize jsig lib at level -O3 unless it's a slowdebug build
     7.8 +ifneq ($(DEBUG_LEVEL), slowdebug)
     7.9 +  JSIG_OPT_FLAGS = $(OPT_CFLAGS)
    7.10 +endif
    7.11 +
    7.12  $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
    7.13  	@echo Making signal interposition lib...
    7.14  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
    7.15 -                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(EXTRA_CFLAGS) -o $@ $< -ldl
    7.16 +                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) $(JSIG_OPT_FLAGS) $(EXTRA_CFLAGS) -o $@ $< -ldl
    7.17  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
    7.18    ifneq ($(STRIP_POLICY),no_strip)
    7.19  	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
     8.1 --- a/make/linux/makefiles/zeroshark.make	Sat Nov 09 20:15:27 2019 +0800
     8.2 +++ b/make/linux/makefiles/zeroshark.make	Sat Nov 09 20:29:45 2019 +0800
     8.3 @@ -1,5 +1,5 @@
     8.4  #
     8.5 -# Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
     8.6 +# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
     8.7  # Copyright 2007, 2008 Red Hat, Inc.
     8.8  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.9  #
    8.10 @@ -25,8 +25,16 @@
    8.11  
    8.12  # Setup common to Zero (non-Shark) and Shark versions of VM
    8.13  
    8.14 -# override this from the main file because some version of llvm do not like -Wundef
    8.15 -WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wunused-function -Wunused-value
    8.16 +# Some versions of llvm do not like -Wundef
    8.17 +ifeq ($(JVM_VARIANT_ZEROSHARK), true)
    8.18 +  WARNING_FLAGS += -Wno-undef
    8.19 +endif
    8.20 +# Suppress some warning flags that are normally turned on for hotspot,
    8.21 +# because some of the zero code has not been updated accordingly.
    8.22 +WARNING_FLAGS += -Wno-return-type \
    8.23 +  -Wno-format-nonliteral -Wno-format-security \
    8.24 +  -Wno-maybe-uninitialized
    8.25 + 
    8.26  
    8.27  # If FDLIBM_CFLAGS is non-empty it holds CFLAGS needed to be passed to
    8.28  # the compiler so as to be able to produce optimized objects
    8.29 @@ -48,5 +56,3 @@
    8.30  ifeq ($(ARCH_DATA_MODEL), 64)
    8.31    CFLAGS += -D_LP64=1
    8.32  endif
    8.33 -
    8.34 -OPT_CFLAGS/compactingPermGenGen.o = -O1
     9.1 --- a/make/solaris/makefiles/jsig.make	Sat Nov 09 20:15:27 2019 +0800
     9.2 +++ b/make/solaris/makefiles/jsig.make	Sat Nov 09 20:29:45 2019 +0800
     9.3 @@ -47,10 +47,15 @@
     9.4  LFLAGS_JSIG += -mt -xnolib
     9.5  endif
     9.6  
     9.7 +# Optimize jsig lib unless it's a slowdebug build
     9.8 +ifneq ($(DEBUG_LEVEL), slowdebug)
     9.9 +  JSIG_OPT_FLAGS = -xO4 -g
    9.10 +endif
    9.11 +
    9.12  $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
    9.13  	@echo Making signal interposition lib...
    9.14  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
    9.15 -                         $(LFLAGS_JSIG) -o $@ $(JSIGSRCDIR)/jsig.c -ldl
    9.16 +                         $(LFLAGS_JSIG) $(JSIG_OPT_FLAGS) -o $@ $(JSIGSRCDIR)/jsig.c -ldl
    9.17  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
    9.18  	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
    9.19  	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
    10.1 --- a/src/cpu/ppc/vm/assembler_ppc.hpp	Sat Nov 09 20:15:27 2019 +0800
    10.2 +++ b/src/cpu/ppc/vm/assembler_ppc.hpp	Sat Nov 09 20:29:45 2019 +0800
    10.3 @@ -2000,7 +2000,8 @@
    10.4    inline void vsbox(       VectorRegister d, VectorRegister a);
    10.5  
    10.6    // SHA (introduced with Power 8)
    10.7 -  // Not yet implemented.
    10.8 +  inline void vshasigmad(VectorRegister d, VectorRegister a, bool st, int six);
    10.9 +  inline void vshasigmaw(VectorRegister d, VectorRegister a, bool st, int six);
   10.10  
   10.11    // Vector Binary Polynomial Multiplication (introduced with Power 8)
   10.12    inline void vpmsumb(  VectorRegister d, VectorRegister a, VectorRegister b);
   10.13 @@ -2096,6 +2097,11 @@
   10.14    inline void lvsl(  VectorRegister d, Register s2);
   10.15    inline void lvsr(  VectorRegister d, Register s2);
   10.16  
   10.17 +  // Endianess specific concatenation of 2 loaded vectors.
   10.18 +  inline void load_perm(VectorRegister perm, Register addr);
   10.19 +  inline void vec_perm(VectorRegister first_dest, VectorRegister second, VectorRegister perm);
   10.20 +  inline void vec_perm(VectorRegister dest, VectorRegister first, VectorRegister second, VectorRegister perm);
   10.21 +
   10.22    // RegisterOrConstant versions.
   10.23    // These emitters choose between the versions using two registers and
   10.24    // those with register and immediate, depending on the content of roc.
    11.1 --- a/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Sat Nov 09 20:15:27 2019 +0800
    11.2 +++ b/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Sat Nov 09 20:29:45 2019 +0800
    11.3 @@ -789,7 +789,8 @@
    11.4  inline void Assembler::vsbox(       VectorRegister d, VectorRegister a)                   { emit_int32( VSBOX_OPCODE        | vrt(d) | vra(a)         ); }
    11.5  
    11.6  // SHA (introduced with Power 8)
    11.7 -// Not yet implemented.
    11.8 +inline void Assembler::vshasigmad(VectorRegister d, VectorRegister a, bool st, int six) { emit_int32( VSHASIGMAD_OPCODE | vrt(d) | vra(a) | vst(st) | vsix(six)); }
    11.9 +inline void Assembler::vshasigmaw(VectorRegister d, VectorRegister a, bool st, int six) { emit_int32( VSHASIGMAW_OPCODE | vrt(d) | vra(a) | vst(st) | vsix(six)); }
   11.10  
   11.11  // Vector Binary Polynomial Multiplication (introduced with Power 8)
   11.12  inline void Assembler::vpmsumb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
   11.13 @@ -887,6 +888,30 @@
   11.14  inline void Assembler::lvsl(  VectorRegister d, Register s2) { emit_int32( LVSL_OPCODE   | vrt(d) | rb(s2)); }
   11.15  inline void Assembler::lvsr(  VectorRegister d, Register s2) { emit_int32( LVSR_OPCODE   | vrt(d) | rb(s2)); }
   11.16  
   11.17 +inline void Assembler::load_perm(VectorRegister perm, Register addr) {
   11.18 +#if defined(VM_LITTLE_ENDIAN)
   11.19 +  lvsr(perm, addr);
   11.20 +#else
   11.21 +  lvsl(perm, addr);
   11.22 +#endif
   11.23 +}
   11.24 +
   11.25 +inline void Assembler::vec_perm(VectorRegister first_dest, VectorRegister second, VectorRegister perm) {
   11.26 +#if defined(VM_LITTLE_ENDIAN)
   11.27 +  vperm(first_dest, second, first_dest, perm);
   11.28 +#else
   11.29 +  vperm(first_dest, first_dest, second, perm);
   11.30 +#endif
   11.31 +}
   11.32 +
   11.33 +inline void Assembler::vec_perm(VectorRegister dest, VectorRegister first, VectorRegister second, VectorRegister perm) {
   11.34 +#if defined(VM_LITTLE_ENDIAN)
   11.35 +  vperm(dest, second, first, perm);
   11.36 +#else
   11.37 +  vperm(dest, first, second, perm);
   11.38 +#endif
   11.39 +}
   11.40 +
   11.41  inline void Assembler::load_const(Register d, void* x, Register tmp) {
   11.42     load_const(d, (long)x, tmp);
   11.43  }
    12.1 --- a/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Sat Nov 09 20:15:27 2019 +0800
    12.2 +++ b/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Sat Nov 09 20:29:45 2019 +0800
    12.3 @@ -667,6 +667,40 @@
    12.4  
    12.5    void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp);
    12.6  
    12.7 +  // SHA-2 auxiliary functions and public interfaces
    12.8 + private:
    12.9 +  void sha256_deque(const VectorRegister src,
   12.10 +      const VectorRegister dst1, const VectorRegister dst2, const VectorRegister dst3);
   12.11 +  void sha256_load_h_vec(const VectorRegister a, const VectorRegister e, const Register hptr);
   12.12 +  void sha256_round(const VectorRegister* hs, const int total_hs, int& h_cnt, const VectorRegister kpw);
   12.13 +  void sha256_load_w_plus_k_vec(const Register buf_in, const VectorRegister* ws,
   12.14 +      const int total_ws, const Register k, const VectorRegister* kpws,
   12.15 +      const int total_kpws);
   12.16 +  void sha256_calc_4w(const VectorRegister w0, const VectorRegister w1,
   12.17 +      const VectorRegister w2, const VectorRegister w3, const VectorRegister kpw0,
   12.18 +      const VectorRegister kpw1, const VectorRegister kpw2, const VectorRegister kpw3,
   12.19 +      const Register j, const Register k);
   12.20 +  void sha256_update_sha_state(const VectorRegister a, const VectorRegister b,
   12.21 +      const VectorRegister c, const VectorRegister d, const VectorRegister e,
   12.22 +      const VectorRegister f, const VectorRegister g, const VectorRegister h,
   12.23 +      const Register hptr);
   12.24 +
   12.25 +  void sha512_load_w_vec(const Register buf_in, const VectorRegister* ws, const int total_ws);
   12.26 +  void sha512_update_sha_state(const Register state, const VectorRegister* hs, const int total_hs);
   12.27 +  void sha512_round(const VectorRegister* hs, const int total_hs, int& h_cnt, const VectorRegister kpw);
   12.28 +  void sha512_load_h_vec(const Register state, const VectorRegister* hs, const int total_hs);
   12.29 +  void sha512_calc_2w(const VectorRegister w0, const VectorRegister w1,
   12.30 +      const VectorRegister w2, const VectorRegister w3,
   12.31 +      const VectorRegister w4, const VectorRegister w5,
   12.32 +      const VectorRegister w6, const VectorRegister w7,
   12.33 +      const VectorRegister kpw0, const VectorRegister kpw1, const Register j,
   12.34 +      const VectorRegister vRb, const Register k);
   12.35 +
   12.36 + public:
   12.37 +  void sha256(bool multi_block);
   12.38 +  void sha512(bool multi_block);
   12.39 +
   12.40 +
   12.41    //
   12.42    // Debugging
   12.43    //
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/src/cpu/ppc/vm/macroAssembler_ppc_sha.cpp	Sat Nov 09 20:29:45 2019 +0800
    13.3 @@ -0,0 +1,1136 @@
    13.4 +// Copyright (c) 2017 Instituto de Pesquisas Eldorado. All rights reserved.
    13.5 +// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.6 +//
    13.7 +// This code is free software; you can redistribute it and/or modify it
    13.8 +// under the terms of the GNU General Public License version 2 only, as
    13.9 +// published by the Free Software Foundation.
   13.10 +//
   13.11 +// This code is distributed in the hope that it will be useful, but WITHOUT
   13.12 +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13.13 +// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13.14 +// version 2 for more details (a copy is included in the LICENSE file that
   13.15 +// accompanied this code).
   13.16 +//
   13.17 +// You should have received a copy of the GNU General Public License version
   13.18 +// 2 along with this work; if not, write to the Free Software Foundation,
   13.19 +// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   13.20 +//
   13.21 +// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   13.22 +// or visit www.oracle.com if you need additional information or have any
   13.23 +// questions.
   13.24 +
   13.25 +// Implemented according to "Descriptions of SHA-256, SHA-384, and SHA-512"
   13.26 +// (http://www.iwar.org.uk/comsec/resources/cipher/sha256-384-512.pdf).
   13.27 +
   13.28 +#include "asm/macroAssembler.inline.hpp"
   13.29 +#include "runtime/stubRoutines.hpp"
   13.30 +
   13.31 +/**********************************************************************
   13.32 + * SHA 256
   13.33 + *********************************************************************/
   13.34 +
   13.35 +void MacroAssembler::sha256_deque(const VectorRegister src,
   13.36 +                                  const VectorRegister dst1,
   13.37 +                                  const VectorRegister dst2,
   13.38 +                                  const VectorRegister dst3) {
   13.39 +  vsldoi (dst1, src, src, 12);
   13.40 +  vsldoi (dst2, src, src, 8);
   13.41 +  vsldoi (dst3, src, src, 4);
   13.42 +}
   13.43 +
   13.44 +void MacroAssembler::sha256_round(const VectorRegister* hs,
   13.45 +                                  const int total_hs,
   13.46 +                                  int& h_cnt,
   13.47 +                                  const VectorRegister kpw) {
   13.48 +  // convenience registers: cycle from 0-7 downwards
   13.49 +  const VectorRegister a = hs[(total_hs + 0 - (h_cnt % total_hs)) % total_hs];
   13.50 +  const VectorRegister b = hs[(total_hs + 1 - (h_cnt % total_hs)) % total_hs];
   13.51 +  const VectorRegister c = hs[(total_hs + 2 - (h_cnt % total_hs)) % total_hs];
   13.52 +  const VectorRegister d = hs[(total_hs + 3 - (h_cnt % total_hs)) % total_hs];
   13.53 +  const VectorRegister e = hs[(total_hs + 4 - (h_cnt % total_hs)) % total_hs];
   13.54 +  const VectorRegister f = hs[(total_hs + 5 - (h_cnt % total_hs)) % total_hs];
   13.55 +  const VectorRegister g = hs[(total_hs + 6 - (h_cnt % total_hs)) % total_hs];
   13.56 +  const VectorRegister h = hs[(total_hs + 7 - (h_cnt % total_hs)) % total_hs];
   13.57 +  // temporaries
   13.58 +  VectorRegister ch  = VR0;
   13.59 +  VectorRegister maj = VR1;
   13.60 +  VectorRegister bsa = VR2;
   13.61 +  VectorRegister bse = VR3;
   13.62 +  VectorRegister vt0 = VR4;
   13.63 +  VectorRegister vt1 = VR5;
   13.64 +  VectorRegister vt2 = VR6;
   13.65 +  VectorRegister vt3 = VR7;
   13.66 +
   13.67 +  vsel       (ch,  g,   f, e);
   13.68 +  vxor       (maj, a,   b);
   13.69 +  vshasigmaw (bse, e,   1, 0xf);
   13.70 +  vadduwm    (vt2, ch,  kpw);
   13.71 +  vadduwm    (vt1, h,   bse);
   13.72 +  vsel       (maj, b,   c, maj);
   13.73 +  vadduwm    (vt3, vt1, vt2);
   13.74 +  vshasigmaw (bsa, a,   1, 0);
   13.75 +  vadduwm    (vt0, bsa, maj);
   13.76 +
   13.77 +  vadduwm    (d,   d,   vt3);
   13.78 +  vadduwm    (h,   vt3, vt0);
   13.79 +
   13.80 +  // advance vector pointer to the next iteration
   13.81 +  h_cnt++;
   13.82 +}
   13.83 +
   13.84 +void MacroAssembler::sha256_load_h_vec(const VectorRegister a,
   13.85 +                                       const VectorRegister e,
   13.86 +                                       const Register hptr) {
   13.87 +  // temporaries
   13.88 +  Register tmp = R8;
   13.89 +  VectorRegister vt0 = VR0;
   13.90 +  VectorRegister vRb = VR6;
   13.91 +  // labels
   13.92 +  Label sha256_aligned;
   13.93 +
   13.94 +  andi_  (tmp,  hptr, 0xf);
   13.95 +  lvx    (a,    hptr);
   13.96 +  addi   (tmp,  hptr, 16);
   13.97 +  lvx    (e,    tmp);
   13.98 +  beq    (CCR0, sha256_aligned);
   13.99 +
  13.100 +  // handle unaligned accesses
  13.101 +  load_perm(vRb, hptr);
  13.102 +  addi   (tmp, hptr, 32);
  13.103 +  vec_perm(a,   e,    vRb);
  13.104 +
  13.105 +  lvx    (vt0,  tmp);
  13.106 +  vec_perm(e,   vt0,  vRb);
  13.107 +
  13.108 +  // aligned accesses
  13.109 +  bind(sha256_aligned);
  13.110 +}
  13.111 +
  13.112 +void MacroAssembler::sha256_load_w_plus_k_vec(const Register buf_in,
  13.113 +                                              const VectorRegister* ws,
  13.114 +                                              const int total_ws,
  13.115 +                                              const Register k,
  13.116 +                                              const VectorRegister* kpws,
  13.117 +                                              const int total_kpws) {
  13.118 +  Label w_aligned, after_w_load;
  13.119 +
  13.120 +  Register tmp       = R8;
  13.121 +  VectorRegister vt0 = VR0;
  13.122 +  VectorRegister vt1 = VR1;
  13.123 +  VectorRegister vRb = VR6;
  13.124 +
  13.125 +  andi_ (tmp, buf_in, 0xF);
  13.126 +  beq   (CCR0, w_aligned); // address ends with 0x0, not 0x8
  13.127 +
  13.128 +  // deal with unaligned addresses
  13.129 +  lvx    (ws[0], buf_in);
  13.130 +  load_perm(vRb, buf_in);
  13.131 +
  13.132 +  for (int n = 1; n < total_ws; n++) {
  13.133 +    VectorRegister w_cur = ws[n];
  13.134 +    VectorRegister w_prev = ws[n-1];
  13.135 +
  13.136 +    addi (tmp, buf_in, n * 16);
  13.137 +    lvx  (w_cur, tmp);
  13.138 +    vec_perm(w_prev, w_cur, vRb);
  13.139 +  }
  13.140 +  addi   (tmp, buf_in, total_ws * 16);
  13.141 +  lvx    (vt0, tmp);
  13.142 +  vec_perm(ws[total_ws-1], vt0, vRb);
  13.143 +  b      (after_w_load);
  13.144 +
  13.145 +  bind(w_aligned);
  13.146 +
  13.147 +  // deal with aligned addresses
  13.148 +  lvx(ws[0], buf_in);
  13.149 +  for (int n = 1; n < total_ws; n++) {
  13.150 +    VectorRegister w = ws[n];
  13.151 +    addi (tmp, buf_in, n * 16);
  13.152 +    lvx  (w, tmp);
  13.153 +  }
  13.154 +
  13.155 +  bind(after_w_load);
  13.156 +
  13.157 +#if defined(VM_LITTLE_ENDIAN)
  13.158 +  // Byte swapping within int values
  13.159 +  li       (tmp, 8);
  13.160 +  lvsl     (vt0, tmp);
  13.161 +  vspltisb (vt1, 0xb);
  13.162 +  vxor     (vt1, vt0, vt1);
  13.163 +  for (int n = 0; n < total_ws; n++) {
  13.164 +    VectorRegister w = ws[n];
  13.165 +    vec_perm(w, w, vt1);
  13.166 +  }
  13.167 +#endif
  13.168 +
  13.169 +  // Loading k, which is always aligned to 16-bytes
  13.170 +  lvx    (kpws[0], k);
  13.171 +  for (int n = 1; n < total_kpws; n++) {
  13.172 +    VectorRegister kpw = kpws[n];
  13.173 +    addi (tmp, k, 16 * n);
  13.174 +    lvx  (kpw, tmp);
  13.175 +  }
  13.176 +
  13.177 +  // Add w to K
  13.178 +  assert(total_ws == total_kpws, "Redesign the loop below");
  13.179 +  for (int n = 0; n < total_kpws; n++) {
  13.180 +    VectorRegister kpw = kpws[n];
  13.181 +    VectorRegister w   = ws[n];
  13.182 +
  13.183 +    vadduwm  (kpw, kpw, w);
  13.184 +  }
  13.185 +}
  13.186 +
  13.187 +void MacroAssembler::sha256_calc_4w(const VectorRegister w0,
  13.188 +                                    const VectorRegister w1,
  13.189 +                                    const VectorRegister w2,
  13.190 +                                    const VectorRegister w3,
  13.191 +                                    const VectorRegister kpw0,
  13.192 +                                    const VectorRegister kpw1,
  13.193 +                                    const VectorRegister kpw2,
  13.194 +                                    const VectorRegister kpw3,
  13.195 +                                    const Register j,
  13.196 +                                    const Register k) {
  13.197 +  // Temporaries
  13.198 +  const VectorRegister  vt0  = VR0;
  13.199 +  const VectorRegister  vt1  = VR1;
  13.200 +  const VectorSRegister vsrt1 = vt1->to_vsr();
  13.201 +  const VectorRegister  vt2  = VR2;
  13.202 +  const VectorRegister  vt3  = VR3;
  13.203 +  const VectorSRegister vst3 = vt3->to_vsr();
  13.204 +  const VectorRegister  vt4  = VR4;
  13.205 +
  13.206 +  // load to k[j]
  13.207 +  lvx        (vt0, j,   k);
  13.208 +
  13.209 +  // advance j
  13.210 +  addi       (j,   j,   16); // 16 bytes were read
  13.211 +
  13.212 +#if defined(VM_LITTLE_ENDIAN)
  13.213 +  // b = w[j-15], w[j-14], w[j-13], w[j-12]
  13.214 +  vsldoi     (vt1, w1,  w0, 12);
  13.215 +
  13.216 +  // c = w[j-7], w[j-6], w[j-5], w[j-4]
  13.217 +  vsldoi     (vt2, w3,  w2, 12);
  13.218 +
  13.219 +#else
  13.220 +  // b = w[j-15], w[j-14], w[j-13], w[j-12]
  13.221 +  vsldoi     (vt1, w0,  w1, 4);
  13.222 +
  13.223 +  // c = w[j-7], w[j-6], w[j-5], w[j-4]
  13.224 +  vsldoi     (vt2, w2,  w3, 4);
  13.225 +#endif
  13.226 +
  13.227 +  // d = w[j-2], w[j-1], w[j-4], w[j-3]
  13.228 +  vsldoi     (vt3, w3,  w3, 8);
  13.229 +
  13.230 +  // b = s0(w[j-15]) , s0(w[j-14]) , s0(w[j-13]) , s0(w[j-12])
  13.231 +  vshasigmaw (vt1, vt1, 0,  0);
  13.232 +
  13.233 +  // d = s1(w[j-2]) , s1(w[j-1]) , s1(w[j-4]) , s1(w[j-3])
  13.234 +  vshasigmaw (vt3, vt3, 0,  0xf);
  13.235 +
  13.236 +  // c = s0(w[j-15]) + w[j-7],
  13.237 +  //     s0(w[j-14]) + w[j-6],
  13.238 +  //     s0(w[j-13]) + w[j-5],
  13.239 +  //     s0(w[j-12]) + w[j-4]
  13.240 +  vadduwm    (vt2, vt1, vt2);
  13.241 +
  13.242 +  // c = s0(w[j-15]) + w[j-7] + w[j-16],
  13.243 +  //     s0(w[j-14]) + w[j-6] + w[j-15],
  13.244 +  //     s0(w[j-13]) + w[j-5] + w[j-14],
  13.245 +  //     s0(w[j-12]) + w[j-4] + w[j-13]
  13.246 +  vadduwm    (vt2, vt2, w0);
  13.247 +
  13.248 +  // e = s0(w[j-15]) + w[j-7] + w[j-16] + s1(w[j-2]), // w[j]
  13.249 +  //     s0(w[j-14]) + w[j-6] + w[j-15] + s1(w[j-1]), // w[j+1]
  13.250 +  //     s0(w[j-13]) + w[j-5] + w[j-14] + s1(w[j-4]), // UNDEFINED
  13.251 +  //     s0(w[j-12]) + w[j-4] + w[j-13] + s1(w[j-3])  // UNDEFINED
  13.252 +  vadduwm    (vt4, vt2, vt3);
  13.253 +
  13.254 +  // At this point, e[0] and e[1] are the correct values to be stored at w[j]
  13.255 +  // and w[j+1].
  13.256 +  // e[2] and e[3] are not considered.
  13.257 +  // b = s1(w[j]) , s1(s(w[j+1]) , UNDEFINED , UNDEFINED
  13.258 +  vshasigmaw (vt1, vt4, 0,  0xf);
  13.259 +
  13.260 +  // v5 = s1(w[j-2]) , s1(w[j-1]) , s1(w[j]) , s1(w[j+1])
  13.261 +#if defined(VM_LITTLE_ENDIAN)
  13.262 +  xxmrgld    (vst3, vsrt1, vst3);
  13.263 +#else
  13.264 +  xxmrghd    (vst3, vst3, vsrt1);
  13.265 +#endif
  13.266 +
  13.267 +  // c = s0(w[j-15]) + w[j-7] + w[j-16] + s1(w[j-2]), // w[j]
  13.268 +  //     s0(w[j-14]) + w[j-6] + w[j-15] + s1(w[j-1]), // w[j+1]
  13.269 +  //     s0(w[j-13]) + w[j-5] + w[j-14] + s1(w[j]),   // w[j+2]
  13.270 +  //     s0(w[j-12]) + w[j-4] + w[j-13] + s1(w[j+1])  // w[j+4]
  13.271 +  vadduwm    (vt2, vt2, vt3);
  13.272 +
  13.273 +  // Updating w0 to w3 to hold the new previous 16 values from w.
  13.274 +  vmr        (w0,  w1);
  13.275 +  vmr        (w1,  w2);
  13.276 +  vmr        (w2,  w3);
  13.277 +  vmr        (w3,  vt2);
  13.278 +
  13.279 +  // store k + w to v9 (4 values at once)
  13.280 +#if defined(VM_LITTLE_ENDIAN)
  13.281 +  vadduwm    (kpw0, vt2, vt0);
  13.282 +
  13.283 +  vsldoi     (kpw1, kpw0, kpw0, 12);
  13.284 +  vsldoi     (kpw2, kpw0, kpw0, 8);
  13.285 +  vsldoi     (kpw3, kpw0, kpw0, 4);
  13.286 +#else
  13.287 +  vadduwm    (kpw3, vt2, vt0);
  13.288 +
  13.289 +  vsldoi     (kpw2, kpw3, kpw3, 12);
  13.290 +  vsldoi     (kpw1, kpw3, kpw3, 8);
  13.291 +  vsldoi     (kpw0, kpw3, kpw3, 4);
  13.292 +#endif
  13.293 +}
  13.294 +
  13.295 +void MacroAssembler::sha256_update_sha_state(const VectorRegister a,
  13.296 +                                             const VectorRegister b_,
  13.297 +                                             const VectorRegister c,
  13.298 +                                             const VectorRegister d,
  13.299 +                                             const VectorRegister e,
  13.300 +                                             const VectorRegister f,
  13.301 +                                             const VectorRegister g,
  13.302 +                                             const VectorRegister h,
  13.303 +                                             const Register hptr) {
  13.304 +  // temporaries
  13.305 +  VectorRegister vt0  = VR0;
  13.306 +  VectorRegister vt1  = VR1;
  13.307 +  VectorRegister vt2  = VR2;
  13.308 +  VectorRegister vt3  = VR3;
  13.309 +  VectorRegister vt4  = VR4;
  13.310 +  VectorRegister vt5  = VR5;
  13.311 +  VectorRegister vaux = VR6;
  13.312 +  VectorRegister vRb  = VR6;
  13.313 +  Register tmp        = R8;
  13.314 +  Register of16       = R8;
  13.315 +  Register of32       = R9;
  13.316 +  Label state_load_aligned;
  13.317 +
  13.318 +  // Load hptr
  13.319 +  andi_   (tmp, hptr, 0xf);
  13.320 +  li      (of16, 16);
  13.321 +  lvx     (vt0, hptr);
  13.322 +  lvx     (vt5, of16, hptr);
  13.323 +  beq     (CCR0, state_load_aligned);
  13.324 +
  13.325 +  // handle unaligned accesses
  13.326 +  li      (of32, 32);
  13.327 +  load_perm(vRb, hptr);
  13.328 +
  13.329 +  vec_perm(vt0, vt5,  vRb);        // vt0 = hptr[0]..hptr[3]
  13.330 +
  13.331 +  lvx     (vt1, hptr, of32);
  13.332 +  vec_perm(vt5, vt1,  vRb);        // vt5 = hptr[4]..hptr[7]
  13.333 +
  13.334 +  // aligned accesses
  13.335 +  bind(state_load_aligned);
  13.336 +
  13.337 +#if defined(VM_LITTLE_ENDIAN)
  13.338 +  vmrglw  (vt1, b_, a);            // vt1 = {a, b, ?, ?}
  13.339 +  vmrglw  (vt2, d, c);             // vt2 = {c, d, ?, ?}
  13.340 +  vmrglw  (vt3, f, e);             // vt3 = {e, f, ?, ?}
  13.341 +  vmrglw  (vt4, h, g);             // vt4 = {g, h, ?, ?}
  13.342 +  xxmrgld (vt1->to_vsr(), vt2->to_vsr(), vt1->to_vsr()); // vt1 = {a, b, c, d}
  13.343 +  xxmrgld (vt3->to_vsr(), vt4->to_vsr(), vt3->to_vsr()); // vt3 = {e, f, g, h}
  13.344 +  vadduwm (a,   vt0, vt1);         // a = {a+hptr[0], b+hptr[1], c+hptr[2], d+hptr[3]}
  13.345 +  vadduwm (e,   vt5, vt3);         // e = {e+hptr[4], f+hptr[5], g+hptr[6], h+hptr[7]}
  13.346 +
  13.347 +  // Save hptr back, works for any alignment
  13.348 +  xxswapd (vt0->to_vsr(), a->to_vsr());
  13.349 +  stxvd2x (vt0->to_vsr(), hptr);
  13.350 +  xxswapd (vt5->to_vsr(), e->to_vsr());
  13.351 +  stxvd2x (vt5->to_vsr(), of16, hptr);
  13.352 +#else
  13.353 +  vmrglw  (vt1, a, b_);            // vt1 = {a, b, ?, ?}
  13.354 +  vmrglw  (vt2, c, d);             // vt2 = {c, d, ?, ?}
  13.355 +  vmrglw  (vt3, e, f);             // vt3 = {e, f, ?, ?}
  13.356 +  vmrglw  (vt4, g, h);             // vt4 = {g, h, ?, ?}
  13.357 +  xxmrgld (vt1->to_vsr(), vt1->to_vsr(), vt2->to_vsr()); // vt1 = {a, b, c, d}
  13.358 +  xxmrgld (vt3->to_vsr(), vt3->to_vsr(), vt4->to_vsr()); // vt3 = {e, f, g, h}
  13.359 +  vadduwm (d,   vt0, vt1);         // d = {a+hptr[0], b+hptr[1], c+hptr[2], d+hptr[3]}
  13.360 +  vadduwm (h,   vt5, vt3);         // h = {e+hptr[4], f+hptr[5], g+hptr[6], h+hptr[7]}
  13.361 +
  13.362 +  // Save hptr back, works for any alignment
  13.363 +  stxvd2x (d->to_vsr(), hptr);
  13.364 +  stxvd2x (h->to_vsr(), of16, hptr);
  13.365 +#endif
  13.366 +}
  13.367 +
  13.368 +static const uint32_t sha256_round_table[64] __attribute((aligned(16))) = {
  13.369 +  0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
  13.370 +  0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
  13.371 +  0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
  13.372 +  0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
  13.373 +  0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
  13.374 +  0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
  13.375 +  0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
  13.376 +  0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
  13.377 +  0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
  13.378 +  0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
  13.379 +  0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
  13.380 +  0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
  13.381 +  0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
  13.382 +  0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
  13.383 +  0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
  13.384 +  0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
  13.385 +};
  13.386 +static const uint32_t *sha256_round_consts = sha256_round_table;
  13.387 +
  13.388 +//   R3_ARG1   - byte[]  Input string with padding but in Big Endian
  13.389 +//   R4_ARG2   - int[]   SHA.state (at first, the root of primes)
  13.390 +//   R5_ARG3   - int     offset
  13.391 +//   R6_ARG4   - int     limit
  13.392 +//
  13.393 +//   Internal Register usage:
  13.394 +//   R7        - k
  13.395 +//   R8        - tmp | j | of16
  13.396 +//   R9        - of32
  13.397 +//   VR0-VR8   - ch, maj, bsa, bse, vt0-vt3 | vt0-vt5, vaux/vRb
  13.398 +//   VR9-VR16  - a-h
  13.399 +//   VR17-VR20 - w0-w3
  13.400 +//   VR21-VR23 - vRb | vaux0-vaux2
  13.401 +//   VR24-VR27 - kpw0-kpw3
  13.402 +void MacroAssembler::sha256(bool multi_block) {
  13.403 +  static const ssize_t buf_size = 64;
  13.404 +  static const uint8_t w_size = sizeof(sha256_round_table)/sizeof(uint32_t);
  13.405 +#ifdef AIX
  13.406 +  // malloc provides 16 byte alignment
  13.407 +  if (((uintptr_t)sha256_round_consts & 0xF) != 0) {
  13.408 +    uint32_t *new_round_consts = (uint32_t*)malloc(sizeof(sha256_round_table));
  13.409 +    guarantee(new_round_consts, "oom");
  13.410 +    memcpy(new_round_consts, sha256_round_consts, sizeof(sha256_round_table));
  13.411 +    sha256_round_consts = (const uint32_t*)new_round_consts;
  13.412 +  }
  13.413 +#endif
  13.414 +
  13.415 +  Register buf_in = R3_ARG1;
  13.416 +  Register state  = R4_ARG2;
  13.417 +  Register ofs    = R5_ARG3;
  13.418 +  Register limit  = R6_ARG4;
  13.419 +
  13.420 +  Label sha_loop, core_loop;
  13.421 +
  13.422 +  // Save non-volatile vector registers in the red zone
  13.423 +  static const VectorRegister nv[] = {
  13.424 +    VR20, VR21, VR22, VR23, VR24, VR25, VR26, VR27/*, VR28, VR29, VR30, VR31*/
  13.425 +  };
  13.426 +  static const uint8_t nv_size = sizeof(nv) / sizeof (VectorRegister);
  13.427 +
  13.428 +  for (int c = 0; c < nv_size; c++) {
  13.429 +    Register tmp = R8;
  13.430 +    li  (tmp, (c - (nv_size)) * 16);
  13.431 +    stvx(nv[c], tmp, R1);
  13.432 +  }
  13.433 +
  13.434 +  // Load hash state to registers
  13.435 +  VectorRegister a = VR9;
  13.436 +  VectorRegister b = VR10;
  13.437 +  VectorRegister c = VR11;
  13.438 +  VectorRegister d = VR12;
  13.439 +  VectorRegister e = VR13;
  13.440 +  VectorRegister f = VR14;
  13.441 +  VectorRegister g = VR15;
  13.442 +  VectorRegister h = VR16;
  13.443 +  static const VectorRegister hs[] = {a, b, c, d, e, f, g, h};
  13.444 +  static const int total_hs = sizeof(hs)/sizeof(VectorRegister);
  13.445 +  // counter for cycling through hs vector to avoid register moves between iterations
  13.446 +  int h_cnt = 0;
  13.447 +
  13.448 +  // Load a-h registers from the memory pointed by state
  13.449 +#if defined(VM_LITTLE_ENDIAN)
  13.450 +  sha256_load_h_vec(a, e, state);
  13.451 +#else
  13.452 +  sha256_load_h_vec(d, h, state);
  13.453 +#endif
  13.454 +
  13.455 +  // keep k loaded also during MultiBlock loops
  13.456 +  Register k = R7;
  13.457 +  assert(((uintptr_t)sha256_round_consts & 0xF) == 0, "k alignment");
  13.458 +  load_const_optimized(k, (address)sha256_round_consts, R0);
  13.459 +
  13.460 +  // Avoiding redundant loads
  13.461 +  if (multi_block) {
  13.462 +    align(OptoLoopAlignment);
  13.463 +  }
  13.464 +  bind(sha_loop);
  13.465 +#if defined(VM_LITTLE_ENDIAN)
  13.466 +  sha256_deque(a, b, c, d);
  13.467 +  sha256_deque(e, f, g, h);
  13.468 +#else
  13.469 +  sha256_deque(d, c, b, a);
  13.470 +  sha256_deque(h, g, f, e);
  13.471 +#endif
  13.472 +
  13.473 +  // Load 16 elements from w out of the loop.
  13.474 +  // Order of the int values is Endianess specific.
  13.475 +  VectorRegister w0 = VR17;
  13.476 +  VectorRegister w1 = VR18;
  13.477 +  VectorRegister w2 = VR19;
  13.478 +  VectorRegister w3 = VR20;
  13.479 +  static const VectorRegister ws[] = {w0, w1, w2, w3};
  13.480 +  static const int total_ws = sizeof(ws)/sizeof(VectorRegister);
  13.481 +
  13.482 +  VectorRegister kpw0 = VR24;
  13.483 +  VectorRegister kpw1 = VR25;
  13.484 +  VectorRegister kpw2 = VR26;
  13.485 +  VectorRegister kpw3 = VR27;
  13.486 +  static const VectorRegister kpws[] = {kpw0, kpw1, kpw2, kpw3};
  13.487 +  static const int total_kpws = sizeof(kpws)/sizeof(VectorRegister);
  13.488 +
  13.489 +  sha256_load_w_plus_k_vec(buf_in, ws, total_ws, k, kpws, total_kpws);
  13.490 +
  13.491 +  // Cycle through the first 16 elements
  13.492 +  assert(total_ws == total_kpws, "Redesign the loop below");
  13.493 +  for (int n = 0; n < total_ws; n++) {
  13.494 +    VectorRegister vaux0 = VR21;
  13.495 +    VectorRegister vaux1 = VR22;
  13.496 +    VectorRegister vaux2 = VR23;
  13.497 +
  13.498 +    sha256_deque(kpws[n], vaux0, vaux1, vaux2);
  13.499 +
  13.500 +#if defined(VM_LITTLE_ENDIAN)
  13.501 +    sha256_round(hs, total_hs, h_cnt, kpws[n]);
  13.502 +    sha256_round(hs, total_hs, h_cnt, vaux0);
  13.503 +    sha256_round(hs, total_hs, h_cnt, vaux1);
  13.504 +    sha256_round(hs, total_hs, h_cnt, vaux2);
  13.505 +#else
  13.506 +    sha256_round(hs, total_hs, h_cnt, vaux2);
  13.507 +    sha256_round(hs, total_hs, h_cnt, vaux1);
  13.508 +    sha256_round(hs, total_hs, h_cnt, vaux0);
  13.509 +    sha256_round(hs, total_hs, h_cnt, kpws[n]);
  13.510 +#endif
  13.511 +  }
  13.512 +
  13.513 +  Register tmp = R8;
  13.514 +  // loop the 16th to the 64th iteration by 8 steps
  13.515 +  li   (tmp, (w_size - 16) / total_hs);
  13.516 +  mtctr(tmp);
  13.517 +
  13.518 +  // j will be aligned to 4 for loading words.
  13.519 +  // Whenever read, advance the pointer (e.g: when j is used in a function)
  13.520 +  Register j = R8;
  13.521 +  li   (j, 16*4);
  13.522 +
  13.523 +  align(OptoLoopAlignment);
  13.524 +  bind(core_loop);
  13.525 +
  13.526 +  // due to VectorRegister rotate, always iterate in multiples of total_hs
  13.527 +  for (int n = 0; n < total_hs/4; n++) {
  13.528 +    sha256_calc_4w(w0, w1, w2, w3, kpw0, kpw1, kpw2, kpw3, j, k);
  13.529 +    sha256_round(hs, total_hs, h_cnt, kpw0);
  13.530 +    sha256_round(hs, total_hs, h_cnt, kpw1);
  13.531 +    sha256_round(hs, total_hs, h_cnt, kpw2);
  13.532 +    sha256_round(hs, total_hs, h_cnt, kpw3);
  13.533 +  }
  13.534 +
  13.535 +  bdnz   (core_loop);
  13.536 +
  13.537 +  // Update hash state
  13.538 +  sha256_update_sha_state(a, b, c, d, e, f, g, h, state);
  13.539 +
  13.540 +  if (multi_block) {
  13.541 +    addi(buf_in, buf_in, buf_size);
  13.542 +    addi(ofs, ofs, buf_size);
  13.543 +    cmplw(CCR0, ofs, limit);
  13.544 +    ble(CCR0, sha_loop);
  13.545 +
  13.546 +    // return ofs
  13.547 +    mr(R3_RET, ofs);
  13.548 +  }
  13.549 +
  13.550 +  // Restore non-volatile registers
  13.551 +  for (int c = 0; c < nv_size; c++) {
  13.552 +    Register tmp = R8;
  13.553 +    li  (tmp, (c - (nv_size)) * 16);
  13.554 +    lvx(nv[c], tmp, R1);
  13.555 +  }
  13.556 +}
  13.557 +
  13.558 +
  13.559 +/**********************************************************************
  13.560 + * SHA 512
  13.561 + *********************************************************************/
  13.562 +
  13.563 +void MacroAssembler::sha512_load_w_vec(const Register buf_in,
  13.564 +                                       const VectorRegister* ws,
  13.565 +                                       const int total_ws) {
  13.566 +  Register tmp       = R8;
  13.567 +  VectorRegister vRb = VR8;
  13.568 +  VectorRegister aux = VR9;
  13.569 +  Label is_aligned, after_alignment;
  13.570 +
  13.571 +  andi_  (tmp, buf_in, 0xF);
  13.572 +  beq    (CCR0, is_aligned); // address ends with 0x0, not 0x8
  13.573 +
  13.574 +  // deal with unaligned addresses
  13.575 +  lvx    (ws[0], buf_in);
  13.576 +  load_perm(vRb, buf_in);
  13.577 +
  13.578 +  for (int n = 1; n < total_ws; n++) {
  13.579 +    VectorRegister w_cur = ws[n];
  13.580 +    VectorRegister w_prev = ws[n-1];
  13.581 +    addi (tmp, buf_in, n * 16);
  13.582 +    lvx  (w_cur, tmp);
  13.583 +    vec_perm(w_prev, w_cur, vRb);
  13.584 +  }
  13.585 +  addi   (tmp, buf_in, total_ws * 16);
  13.586 +  lvx    (aux, tmp);
  13.587 +  vec_perm(ws[total_ws-1], aux, vRb);
  13.588 +  b      (after_alignment);
  13.589 +
  13.590 +  bind(is_aligned);
  13.591 +  lvx  (ws[0], buf_in);
  13.592 +  for (int n = 1; n < total_ws; n++) {
  13.593 +    VectorRegister w = ws[n];
  13.594 +    addi (tmp, buf_in, n * 16);
  13.595 +    lvx  (w, tmp);
  13.596 +  }
  13.597 +
  13.598 +  bind(after_alignment);
  13.599 +}
  13.600 +
  13.601 +// Update hash state
  13.602 +void MacroAssembler::sha512_update_sha_state(const Register state,
  13.603 +                                             const VectorRegister* hs,
  13.604 +                                             const int total_hs) {
  13.605 +
  13.606 +#if defined(VM_LITTLE_ENDIAN)
  13.607 +  int start_idx = 0;
  13.608 +#else
  13.609 +  int start_idx = 1;
  13.610 +#endif
  13.611 +
  13.612 +  // load initial hash from the memory pointed by state
  13.613 +  VectorRegister ini_a = VR10;
  13.614 +  VectorRegister ini_c = VR12;
  13.615 +  VectorRegister ini_e = VR14;
  13.616 +  VectorRegister ini_g = VR16;
  13.617 +  static const VectorRegister inis[] = {ini_a, ini_c, ini_e, ini_g};
  13.618 +  static const int total_inis = sizeof(inis)/sizeof(VectorRegister);
  13.619 +
  13.620 +  Label state_save_aligned, after_state_save_aligned;
  13.621 +
  13.622 +  Register addr      = R7;
  13.623 +  Register tmp       = R8;
  13.624 +  VectorRegister vRb = VR8;
  13.625 +  VectorRegister aux = VR9;
  13.626 +
  13.627 +  andi_(tmp, state, 0xf);
  13.628 +  beq(CCR0, state_save_aligned);
  13.629 +  // deal with unaligned addresses
  13.630 +
  13.631 +  {
  13.632 +    VectorRegister a = hs[0];
  13.633 +    VectorRegister b_ = hs[1];
  13.634 +    VectorRegister c = hs[2];
  13.635 +    VectorRegister d = hs[3];
  13.636 +    VectorRegister e = hs[4];
  13.637 +    VectorRegister f = hs[5];
  13.638 +    VectorRegister g = hs[6];
  13.639 +    VectorRegister h = hs[7];
  13.640 +    load_perm(vRb, state);
  13.641 +    lvx    (ini_a, state);
  13.642 +    addi   (addr, state, 16);
  13.643 +
  13.644 +    lvx    (ini_c, addr);
  13.645 +    addi   (addr, state, 32);
  13.646 +    vec_perm(ini_a, ini_c, vRb);
  13.647 +
  13.648 +    lvx    (ini_e, addr);
  13.649 +    addi   (addr, state, 48);
  13.650 +    vec_perm(ini_c, ini_e, vRb);
  13.651 +
  13.652 +    lvx    (ini_g, addr);
  13.653 +    addi   (addr, state, 64);
  13.654 +    vec_perm(ini_e, ini_g, vRb);
  13.655 +
  13.656 +    lvx    (aux, addr);
  13.657 +    vec_perm(ini_g, aux, vRb);
  13.658 +
  13.659 +#if defined(VM_LITTLE_ENDIAN)
  13.660 +    xxmrgld(a->to_vsr(), b_->to_vsr(), a->to_vsr());
  13.661 +    xxmrgld(c->to_vsr(), d->to_vsr(), c->to_vsr());
  13.662 +    xxmrgld(e->to_vsr(), f->to_vsr(), e->to_vsr());
  13.663 +    xxmrgld(g->to_vsr(), h->to_vsr(), g->to_vsr());
  13.664 +#else
  13.665 +    xxmrgld(b_->to_vsr(), a->to_vsr(), b_->to_vsr());
  13.666 +    xxmrgld(d->to_vsr(), c->to_vsr(), d->to_vsr());
  13.667 +    xxmrgld(f->to_vsr(), e->to_vsr(), f->to_vsr());
  13.668 +    xxmrgld(h->to_vsr(), g->to_vsr(), h->to_vsr());
  13.669 +#endif
  13.670 +
  13.671 +    for (int n = start_idx; n < total_hs; n += 2) {
  13.672 +      VectorRegister h_cur = hs[n];
  13.673 +      VectorRegister ini_cur = inis[n/2];
  13.674 +
  13.675 +      vaddudm(h_cur, ini_cur, h_cur);
  13.676 +    }
  13.677 +
  13.678 +    for (int n = start_idx; n < total_hs; n += 2) {
  13.679 +      VectorRegister h_cur = hs[n];
  13.680 +
  13.681 +      mfvrd  (tmp, h_cur);
  13.682 +#if defined(VM_LITTLE_ENDIAN)
  13.683 +      std    (tmp, 8*n + 8, state);
  13.684 +#else
  13.685 +      std    (tmp, 8*n - 8, state);
  13.686 +#endif
  13.687 +      vsldoi (aux, h_cur, h_cur, 8);
  13.688 +      mfvrd  (tmp, aux);
  13.689 +      std    (tmp, 8*n + 0, state);
  13.690 +    }
  13.691 +
  13.692 +    b      (after_state_save_aligned);
  13.693 +  }
  13.694 +
  13.695 +  bind(state_save_aligned);
  13.696 +  {
  13.697 +    for (int n = 0; n < total_hs; n += 2) {
  13.698 +#if defined(VM_LITTLE_ENDIAN)
  13.699 +      VectorRegister h_cur = hs[n];
  13.700 +      VectorRegister h_next = hs[n+1];
  13.701 +#else
  13.702 +      VectorRegister h_cur = hs[n+1];
  13.703 +      VectorRegister h_next = hs[n];
  13.704 +#endif
  13.705 +      VectorRegister ini_cur = inis[n/2];
  13.706 +
  13.707 +      if (n/2 == 0) {
  13.708 +        lvx(ini_cur, state);
  13.709 +      } else {
  13.710 +        addi(addr, state, (n/2) * 16);
  13.711 +        lvx(ini_cur, addr);
  13.712 +      }
  13.713 +      xxmrgld(h_cur->to_vsr(), h_next->to_vsr(), h_cur->to_vsr());
  13.714 +    }
  13.715 +
  13.716 +    for (int n = start_idx; n < total_hs; n += 2) {
  13.717 +      VectorRegister h_cur = hs[n];
  13.718 +      VectorRegister ini_cur = inis[n/2];
  13.719 +
  13.720 +      vaddudm(h_cur, ini_cur, h_cur);
  13.721 +    }
  13.722 +
  13.723 +    for (int n = start_idx; n < total_hs; n += 2) {
  13.724 +      VectorRegister h_cur = hs[n];
  13.725 +
  13.726 +      if (n/2 == 0) {
  13.727 +        stvx(h_cur, state);
  13.728 +      } else {
  13.729 +        addi(addr, state, (n/2) * 16);
  13.730 +        stvx(h_cur, addr);
  13.731 +      }
  13.732 +    }
  13.733 +  }
  13.734 +
  13.735 +  bind(after_state_save_aligned);
  13.736 +}
  13.737 +
  13.738 +// Use h_cnt to cycle through hs elements but also increment it at the end
  13.739 +void MacroAssembler::sha512_round(const VectorRegister* hs,
  13.740 +                                  const int total_hs, int& h_cnt,
  13.741 +                                  const VectorRegister kpw) {
  13.742 +
  13.743 +  // convenience registers: cycle from 0-7 downwards
  13.744 +  const VectorRegister a = hs[(total_hs + 0 - (h_cnt % total_hs)) % total_hs];
  13.745 +  const VectorRegister b = hs[(total_hs + 1 - (h_cnt % total_hs)) % total_hs];
  13.746 +  const VectorRegister c = hs[(total_hs + 2 - (h_cnt % total_hs)) % total_hs];
  13.747 +  const VectorRegister d = hs[(total_hs + 3 - (h_cnt % total_hs)) % total_hs];
  13.748 +  const VectorRegister e = hs[(total_hs + 4 - (h_cnt % total_hs)) % total_hs];
  13.749 +  const VectorRegister f = hs[(total_hs + 5 - (h_cnt % total_hs)) % total_hs];
  13.750 +  const VectorRegister g = hs[(total_hs + 6 - (h_cnt % total_hs)) % total_hs];
  13.751 +  const VectorRegister h = hs[(total_hs + 7 - (h_cnt % total_hs)) % total_hs];
  13.752 +  // temporaries
  13.753 +  const VectorRegister Ch   = VR20;
  13.754 +  const VectorRegister Maj  = VR21;
  13.755 +  const VectorRegister bsa  = VR22;
  13.756 +  const VectorRegister bse  = VR23;
  13.757 +  const VectorRegister tmp1 = VR24;
  13.758 +  const VectorRegister tmp2 = VR25;
  13.759 +
  13.760 +  vsel      (Ch,   g,    f,   e);
  13.761 +  vxor      (Maj,  a,    b);
  13.762 +  vshasigmad(bse,  e,    1,   0xf);
  13.763 +  vaddudm   (tmp2, Ch,   kpw);
  13.764 +  vaddudm   (tmp1, h,    bse);
  13.765 +  vsel      (Maj,  b,    c,   Maj);
  13.766 +  vaddudm   (tmp1, tmp1, tmp2);
  13.767 +  vshasigmad(bsa,  a,    1,   0);
  13.768 +  vaddudm   (tmp2, bsa,  Maj);
  13.769 +  vaddudm   (d,    d,    tmp1);
  13.770 +  vaddudm   (h,    tmp1, tmp2);
  13.771 +
  13.772 +  // advance vector pointer to the next iteration
  13.773 +  h_cnt++;
  13.774 +}
  13.775 +
  13.776 +void MacroAssembler::sha512_calc_2w(const VectorRegister w0,
  13.777 +                                    const VectorRegister w1,
  13.778 +                                    const VectorRegister w2,
  13.779 +                                    const VectorRegister w3,
  13.780 +                                    const VectorRegister w4,
  13.781 +                                    const VectorRegister w5,
  13.782 +                                    const VectorRegister w6,
  13.783 +                                    const VectorRegister w7,
  13.784 +                                    const VectorRegister kpw0,
  13.785 +                                    const VectorRegister kpw1,
  13.786 +                                    const Register j,
  13.787 +                                    const VectorRegister vRb,
  13.788 +                                    const Register k) {
  13.789 +  // Temporaries
  13.790 +  const VectorRegister VR_a = VR20;
  13.791 +  const VectorRegister VR_b = VR21;
  13.792 +  const VectorRegister VR_c = VR22;
  13.793 +  const VectorRegister VR_d = VR23;
  13.794 +
  13.795 +  // load to k[j]
  13.796 +  lvx        (VR_a, j,    k);
  13.797 +  // advance j
  13.798 +  addi       (j,    j,    16); // 16 bytes were read
  13.799 +
  13.800 +#if defined(VM_LITTLE_ENDIAN)
  13.801 +  // v6 = w[j-15], w[j-14]
  13.802 +  vperm      (VR_b, w1,   w0,  vRb);
  13.803 +  // v12 = w[j-7], w[j-6]
  13.804 +  vperm      (VR_c, w5,   w4,  vRb);
  13.805 +#else
  13.806 +  // v6 = w[j-15], w[j-14]
  13.807 +  vperm      (VR_b, w0,   w1,  vRb);
  13.808 +  // v12 = w[j-7], w[j-6]
  13.809 +  vperm      (VR_c, w4,   w5,  vRb);
  13.810 +#endif
  13.811 +
  13.812 +  // v6 = s0(w[j-15]) , s0(w[j-14])
  13.813 +  vshasigmad (VR_b, VR_b,    0,   0);
  13.814 +  // v5 = s1(w[j-2]) , s1(w[j-1])
  13.815 +  vshasigmad (VR_d, w7,      0,   0xf);
  13.816 +  // v6 = s0(w[j-15]) + w[j-7] , s0(w[j-14]) + w[j-6]
  13.817 +  vaddudm    (VR_b, VR_b, VR_c);
  13.818 +  // v8 = s1(w[j-2]) + w[j-16] , s1(w[j-1]) + w[j-15]
  13.819 +  vaddudm    (VR_d, VR_d, w0);
  13.820 +  // v9 = s0(w[j-15]) + w[j-7] + w[j-16] + s1(w[j-2]), // w[j]
  13.821 +  //      s0(w[j-14]) + w[j-6] + w[j-15] + s1(w[j-1]), // w[j+1]
  13.822 +  vaddudm    (VR_c, VR_d, VR_b);
  13.823 +  // Updating w0 to w7 to hold the new previous 16 values from w.
  13.824 +  vmr        (w0,   w1);
  13.825 +  vmr        (w1,   w2);
  13.826 +  vmr        (w2,   w3);
  13.827 +  vmr        (w3,   w4);
  13.828 +  vmr        (w4,   w5);
  13.829 +  vmr        (w5,   w6);
  13.830 +  vmr        (w6,   w7);
  13.831 +  vmr        (w7,   VR_c);
  13.832 +
  13.833 +#if defined(VM_LITTLE_ENDIAN)
  13.834 +  // store k + w to kpw0 (2 values at once)
  13.835 +  vaddudm    (kpw0, VR_c, VR_a);
  13.836 +  // kpw1 holds (k + w)[1]
  13.837 +  vsldoi     (kpw1, kpw0, kpw0, 8);
  13.838 +#else
  13.839 +  // store k + w to kpw0 (2 values at once)
  13.840 +  vaddudm    (kpw1, VR_c, VR_a);
  13.841 +  // kpw1 holds (k + w)[1]
  13.842 +  vsldoi     (kpw0, kpw1, kpw1, 8);
  13.843 +#endif
  13.844 +}
  13.845 +
  13.846 +void MacroAssembler::sha512_load_h_vec(const Register state,
  13.847 +                                       const VectorRegister* hs,
  13.848 +                                       const int total_hs) {
  13.849 +#if defined(VM_LITTLE_ENDIAN)
  13.850 +  VectorRegister a   = hs[0];
  13.851 +  VectorRegister g   = hs[6];
  13.852 +  int start_idx = 0;
  13.853 +#else
  13.854 +  VectorRegister a   = hs[1];
  13.855 +  VectorRegister g   = hs[7];
  13.856 +  int start_idx = 1;
  13.857 +#endif
  13.858 +
  13.859 +  Register addr      = R7;
  13.860 +  VectorRegister vRb = VR8;
  13.861 +  Register tmp       = R8;
  13.862 +  Label state_aligned, after_state_aligned;
  13.863 +
  13.864 +  andi_(tmp, state, 0xf);
  13.865 +  beq(CCR0, state_aligned);
  13.866 +
  13.867 +  // deal with unaligned addresses
  13.868 +  VectorRegister aux = VR9;
  13.869 +
  13.870 +  lvx(hs[start_idx], state);
  13.871 +  load_perm(vRb, state);
  13.872 +
  13.873 +  for (int n = start_idx + 2; n < total_hs; n += 2) {
  13.874 +    VectorRegister h_cur   = hs[n];
  13.875 +    VectorRegister h_prev2 = hs[n - 2];
  13.876 +    addi(addr, state, (n/2) * 16);
  13.877 +    lvx(h_cur, addr);
  13.878 +    vec_perm(h_prev2, h_cur, vRb);
  13.879 +  }
  13.880 +  addi(addr, state, (total_hs/2) * 16);
  13.881 +  lvx    (aux, addr);
  13.882 +  vec_perm(hs[total_hs - 2 + start_idx], aux, vRb);
  13.883 +  b      (after_state_aligned);
  13.884 +
  13.885 +  bind(state_aligned);
  13.886 +
  13.887 +  // deal with aligned addresses
  13.888 +  lvx(hs[start_idx], state);
  13.889 +
  13.890 +  for (int n = start_idx + 2; n < total_hs; n += 2) {
  13.891 +    VectorRegister h_cur = hs[n];
  13.892 +    addi(addr, state, (n/2) * 16);
  13.893 +    lvx(h_cur, addr);
  13.894 +  }
  13.895 +
  13.896 +  bind(after_state_aligned);
  13.897 +}
  13.898 +
  13.899 +static const uint64_t sha512_round_table[80] __attribute((aligned(16))) = {
  13.900 +  0x428a2f98d728ae22, 0x7137449123ef65cd,
  13.901 +  0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc,
  13.902 +  0x3956c25bf348b538, 0x59f111f1b605d019,
  13.903 +  0x923f82a4af194f9b, 0xab1c5ed5da6d8118,
  13.904 +  0xd807aa98a3030242, 0x12835b0145706fbe,
  13.905 +  0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2,
  13.906 +  0x72be5d74f27b896f, 0x80deb1fe3b1696b1,
  13.907 +  0x9bdc06a725c71235, 0xc19bf174cf692694,
  13.908 +  0xe49b69c19ef14ad2, 0xefbe4786384f25e3,
  13.909 +  0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65,
  13.910 +  0x2de92c6f592b0275, 0x4a7484aa6ea6e483,
  13.911 +  0x5cb0a9dcbd41fbd4, 0x76f988da831153b5,
  13.912 +  0x983e5152ee66dfab, 0xa831c66d2db43210,
  13.913 +  0xb00327c898fb213f, 0xbf597fc7beef0ee4,
  13.914 +  0xc6e00bf33da88fc2, 0xd5a79147930aa725,
  13.915 +  0x06ca6351e003826f, 0x142929670a0e6e70,
  13.916 +  0x27b70a8546d22ffc, 0x2e1b21385c26c926,
  13.917 +  0x4d2c6dfc5ac42aed, 0x53380d139d95b3df,
  13.918 +  0x650a73548baf63de, 0x766a0abb3c77b2a8,
  13.919 +  0x81c2c92e47edaee6, 0x92722c851482353b,
  13.920 +  0xa2bfe8a14cf10364, 0xa81a664bbc423001,
  13.921 +  0xc24b8b70d0f89791, 0xc76c51a30654be30,
  13.922 +  0xd192e819d6ef5218, 0xd69906245565a910,
  13.923 +  0xf40e35855771202a, 0x106aa07032bbd1b8,
  13.924 +  0x19a4c116b8d2d0c8, 0x1e376c085141ab53,
  13.925 +  0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8,
  13.926 +  0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb,
  13.927 +  0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3,
  13.928 +  0x748f82ee5defb2fc, 0x78a5636f43172f60,
  13.929 +  0x84c87814a1f0ab72, 0x8cc702081a6439ec,
  13.930 +  0x90befffa23631e28, 0xa4506cebde82bde9,
  13.931 +  0xbef9a3f7b2c67915, 0xc67178f2e372532b,
  13.932 +  0xca273eceea26619c, 0xd186b8c721c0c207,
  13.933 +  0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178,
  13.934 +  0x06f067aa72176fba, 0x0a637dc5a2c898a6,
  13.935 +  0x113f9804bef90dae, 0x1b710b35131c471b,
  13.936 +  0x28db77f523047d84, 0x32caab7b40c72493,
  13.937 +  0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c,
  13.938 +  0x4cc5d4becb3e42b6, 0x597f299cfc657e2a,
  13.939 +  0x5fcb6fab3ad6faec, 0x6c44198c4a475817,
  13.940 +};
  13.941 +static const uint64_t *sha512_round_consts = sha512_round_table;
  13.942 +
  13.943 +//   R3_ARG1   - byte[]  Input string with padding but in Big Endian
  13.944 +//   R4_ARG2   - int[]   SHA.state (at first, the root of primes)
  13.945 +//   R5_ARG3   - int     offset
  13.946 +//   R6_ARG4   - int     limit
  13.947 +//
  13.948 +//   Internal Register usage:
  13.949 +//   R7 R8 R9  - volatile temporaries
  13.950 +//   VR0-VR7   - a-h
  13.951 +//   VR8       - vRb
  13.952 +//   VR9       - aux (highly volatile, use with care)
  13.953 +//   VR10-VR17 - w0-w7 | ini_a-ini_h
  13.954 +//   VR18      - vsp16 | kplusw0
  13.955 +//   VR19      - vsp32 | kplusw1
  13.956 +//   VR20-VR25 - sha512_calc_2w and sha512_round temporaries
  13.957 +void MacroAssembler::sha512(bool multi_block) {
  13.958 +  static const ssize_t buf_size = 128;
  13.959 +  static const uint8_t w_size = sizeof(sha512_round_table)/sizeof(uint64_t);
  13.960 +#ifdef AIX
  13.961 +  // malloc provides 16 byte alignment
  13.962 +  if (((uintptr_t)sha512_round_consts & 0xF) != 0) {
  13.963 +    uint64_t *new_round_consts = (uint64_t*)malloc(sizeof(sha512_round_table));
  13.964 +    guarantee(new_round_consts, "oom");
  13.965 +    memcpy(new_round_consts, sha512_round_consts, sizeof(sha512_round_table));
  13.966 +    sha512_round_consts = (const uint64_t*)new_round_consts;
  13.967 +  }
  13.968 +#endif
  13.969 +
  13.970 +  Register buf_in = R3_ARG1;
  13.971 +  Register state  = R4_ARG2;
  13.972 +  Register ofs    = R5_ARG3;
  13.973 +  Register limit  = R6_ARG4;
  13.974 +
  13.975 +  Label sha_loop, core_loop;
  13.976 +
  13.977 +  // Save non-volatile vector registers in the red zone
  13.978 +  static const VectorRegister nv[] = {
  13.979 +    VR20, VR21, VR22, VR23, VR24, VR25/*, VR26, VR27, VR28, VR29, VR30, VR31*/
  13.980 +  };
  13.981 +  static const uint8_t nv_size = sizeof(nv) / sizeof (VectorRegister);
  13.982 +
  13.983 +  for (int c = 0; c < nv_size; c++) {
  13.984 +    Register idx = R7;
  13.985 +    li  (idx, (c - (nv_size)) * 16);
  13.986 +    stvx(nv[c], idx, R1);
  13.987 +  }
  13.988 +
  13.989 +  // Load hash state to registers
  13.990 +  VectorRegister a = VR0;
  13.991 +  VectorRegister b = VR1;
  13.992 +  VectorRegister c = VR2;
  13.993 +  VectorRegister d = VR3;
  13.994 +  VectorRegister e = VR4;
  13.995 +  VectorRegister f = VR5;
  13.996 +  VectorRegister g = VR6;
  13.997 +  VectorRegister h = VR7;
  13.998 +  static const VectorRegister hs[] = {a, b, c, d, e, f, g, h};
  13.999 +  static const int total_hs = sizeof(hs)/sizeof(VectorRegister);
 13.1000 +  // counter for cycling through hs vector to avoid register moves between iterations
 13.1001 +  int h_cnt = 0;
 13.1002 +
 13.1003 +  // Load a-h registers from the memory pointed by state
 13.1004 +  sha512_load_h_vec(state, hs, total_hs);
 13.1005 +
 13.1006 +  Register k = R9;
 13.1007 +  assert(((uintptr_t)sha512_round_consts & 0xF) == 0, "k alignment");
 13.1008 +  load_const_optimized(k, (address)sha512_round_consts, R0);
 13.1009 +
 13.1010 +  if (multi_block) {
 13.1011 +    align(OptoLoopAlignment);
 13.1012 +  }
 13.1013 +  bind(sha_loop);
 13.1014 +
 13.1015 +  for (int n = 0; n < total_hs; n += 2) {
 13.1016 +#if defined(VM_LITTLE_ENDIAN)
 13.1017 +    VectorRegister h_cur = hs[n];
 13.1018 +    VectorRegister h_next = hs[n + 1];
 13.1019 +#else
 13.1020 +    VectorRegister h_cur = hs[n + 1];
 13.1021 +    VectorRegister h_next = hs[n];
 13.1022 +#endif
 13.1023 +    vsldoi (h_next, h_cur, h_cur, 8);
 13.1024 +  }
 13.1025 +
 13.1026 +  // Load 16 elements from w out of the loop.
 13.1027 +  // Order of the long values is Endianess specific.
 13.1028 +  VectorRegister w0 = VR10;
 13.1029 +  VectorRegister w1 = VR11;
 13.1030 +  VectorRegister w2 = VR12;
 13.1031 +  VectorRegister w3 = VR13;
 13.1032 +  VectorRegister w4 = VR14;
 13.1033 +  VectorRegister w5 = VR15;
 13.1034 +  VectorRegister w6 = VR16;
 13.1035 +  VectorRegister w7 = VR17;
 13.1036 +  static const VectorRegister ws[] = {w0, w1, w2, w3, w4, w5, w6, w7};
 13.1037 +  static const int total_ws = sizeof(ws)/sizeof(VectorRegister);
 13.1038 +
 13.1039 +  // Load 16 w into vectors and setup vsl for vperm
 13.1040 +  sha512_load_w_vec(buf_in, ws, total_ws);
 13.1041 +
 13.1042 +#if defined(VM_LITTLE_ENDIAN)
 13.1043 +  VectorRegister vsp16 = VR18;
 13.1044 +  VectorRegister vsp32 = VR19;
 13.1045 +  VectorRegister shiftarg = VR9;
 13.1046 +
 13.1047 +  vspltisw(vsp16,    8);
 13.1048 +  vspltisw(shiftarg, 1);
 13.1049 +  vsl     (vsp16,    vsp16, shiftarg);
 13.1050 +  vsl     (vsp32,    vsp16, shiftarg);
 13.1051 +
 13.1052 +  VectorRegister vsp8 = VR9;
 13.1053 +  vspltish(vsp8,     8);
 13.1054 +
 13.1055 +  // Convert input from Big Endian to Little Endian
 13.1056 +  for (int c = 0; c < total_ws; c++) {
 13.1057 +    VectorRegister w = ws[c];
 13.1058 +    vrlh  (w, w, vsp8);
 13.1059 +  }
 13.1060 +  for (int c = 0; c < total_ws; c++) {
 13.1061 +    VectorRegister w = ws[c];
 13.1062 +    vrlw  (w, w, vsp16);
 13.1063 +  }
 13.1064 +  for (int c = 0; c < total_ws; c++) {
 13.1065 +    VectorRegister w = ws[c];
 13.1066 +    vrld  (w, w, vsp32);
 13.1067 +  }
 13.1068 +#endif
 13.1069 +
 13.1070 +  Register Rb        = R10;
 13.1071 +  VectorRegister vRb = VR8;
 13.1072 +  li      (Rb, 8);
 13.1073 +  load_perm(vRb, Rb);
 13.1074 +
 13.1075 +  VectorRegister kplusw0 = VR18;
 13.1076 +  VectorRegister kplusw1 = VR19;
 13.1077 +
 13.1078 +  Register addr      = R7;
 13.1079 +
 13.1080 +  for (int n = 0; n < total_ws; n++) {
 13.1081 +    VectorRegister w = ws[n];
 13.1082 +
 13.1083 +    if (n == 0) {
 13.1084 +      lvx  (kplusw0, k);
 13.1085 +    } else {
 13.1086 +      addi (addr, k, n * 16);
 13.1087 +      lvx  (kplusw0, addr);
 13.1088 +    }
 13.1089 +#if defined(VM_LITTLE_ENDIAN)
 13.1090 +    vaddudm(kplusw0, kplusw0, w);
 13.1091 +    vsldoi (kplusw1, kplusw0, kplusw0, 8);
 13.1092 +#else
 13.1093 +    vaddudm(kplusw1, kplusw0, w);
 13.1094 +    vsldoi (kplusw0, kplusw1, kplusw1, 8);
 13.1095 +#endif
 13.1096 +
 13.1097 +    sha512_round(hs, total_hs, h_cnt, kplusw0);
 13.1098 +    sha512_round(hs, total_hs, h_cnt, kplusw1);
 13.1099 +  }
 13.1100 +
 13.1101 +  Register tmp       = R8;
 13.1102 +  li    (tmp, (w_size-16)/total_hs);
 13.1103 +  mtctr (tmp);
 13.1104 +  // j will be aligned to 4 for loading words.
 13.1105 +  // Whenever read, advance the pointer (e.g: when j is used in a function)
 13.1106 +  Register j = tmp;
 13.1107 +  li     (j, 8*16);
 13.1108 +
 13.1109 +  align(OptoLoopAlignment);
 13.1110 +  bind(core_loop);
 13.1111 +
 13.1112 +  // due to VectorRegister rotate, always iterate in multiples of total_hs
 13.1113 +  for (int n = 0; n < total_hs/2; n++) {
 13.1114 +    sha512_calc_2w(w0, w1, w2, w3, w4, w5, w6, w7, kplusw0, kplusw1, j, vRb, k);
 13.1115 +    sha512_round(hs, total_hs, h_cnt, kplusw0);
 13.1116 +    sha512_round(hs, total_hs, h_cnt, kplusw1);
 13.1117 +  }
 13.1118 +
 13.1119 +  bdnz   (core_loop);
 13.1120 +
 13.1121 +  sha512_update_sha_state(state, hs, total_hs);
 13.1122 +
 13.1123 +  if (multi_block) {
 13.1124 +    addi(buf_in, buf_in, buf_size);
 13.1125 +    addi(ofs, ofs, buf_size);
 13.1126 +    cmplw(CCR0, ofs, limit);
 13.1127 +    ble(CCR0, sha_loop);
 13.1128 +
 13.1129 +    // return ofs
 13.1130 +    mr(R3_RET, ofs);
 13.1131 +  }
 13.1132 +
 13.1133 +  // Restore non-volatile registers
 13.1134 +  for (int c = 0; c < nv_size; c++) {
 13.1135 +    Register idx = R7;
 13.1136 +    li  (idx, (c - (nv_size)) * 16);
 13.1137 +    lvx(nv[c], idx, R1);
 13.1138 +  }
 13.1139 +}
    14.1 --- a/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Sat Nov 09 20:15:27 2019 +0800
    14.2 +++ b/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Sat Nov 09 20:29:45 2019 +0800
    14.3 @@ -2224,7 +2224,7 @@
    14.4      return start;
    14.5    }
    14.6  
    14.7 -  // Arguments for generated stub (little endian only):
    14.8 +  // Arguments for generated stub:
    14.9    //   R3_ARG1   - source byte array address
   14.10    //   R4_ARG2   - destination byte array address
   14.11    //   R5_ARG3   - round key array
   14.12 @@ -2243,7 +2243,6 @@
   14.13      Register keylen         = R8;
   14.14      Register temp           = R9;
   14.15      Register keypos         = R10;
   14.16 -    Register hex            = R11;
   14.17      Register fifteen        = R12;
   14.18  
   14.19      VectorRegister vRet     = VR0;
   14.20 @@ -2263,164 +2262,170 @@
   14.21      VectorRegister vTmp3    = VR11;
   14.22      VectorRegister vTmp4    = VR12;
   14.23  
   14.24 -    VectorRegister vLow     = VR13;
   14.25 -    VectorRegister vHigh    = VR14;
   14.26 -
   14.27 -    __ li              (hex, 16);
   14.28      __ li              (fifteen, 15);
   14.29 -    __ vspltisb        (fSplt, 0x0f);
   14.30  
   14.31      // load unaligned from[0-15] to vsRet
   14.32      __ lvx             (vRet, from);
   14.33      __ lvx             (vTmp1, fifteen, from);
   14.34      __ lvsl            (fromPerm, from);
   14.35 +#ifdef VM_LITTLE_ENDIAN
   14.36 +    __ vspltisb        (fSplt, 0x0f);
   14.37      __ vxor            (fromPerm, fromPerm, fSplt);
   14.38 +#endif
   14.39      __ vperm           (vRet, vRet, vTmp1, fromPerm);
   14.40  
   14.41      // load keylen (44 or 52 or 60)
   14.42      __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
   14.43  
   14.44      // to load keys
   14.45 -    __ lvsr            (keyPerm, key);
   14.46 -    __ vxor            (vTmp2, vTmp2, vTmp2);
   14.47 +    __ load_perm       (keyPerm, key);
   14.48 +#ifdef VM_LITTLE_ENDIAN
   14.49      __ vspltisb        (vTmp2, -16);
   14.50      __ vrld            (keyPerm, keyPerm, vTmp2);
   14.51      __ vrld            (keyPerm, keyPerm, vTmp2);
   14.52      __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
   14.53 -
   14.54 -    // load the 1st round key to vKey1
   14.55 -    __ li              (keypos, 0);
   14.56 +#endif
   14.57 +
   14.58 +    // load the 1st round key to vTmp1
   14.59 +    __ lvx             (vTmp1, key);
   14.60 +    __ li              (keypos, 16);
   14.61      __ lvx             (vKey1, keypos, key);
   14.62 -    __ addi            (keypos, keypos, 16);
   14.63 +    __ vec_perm        (vTmp1, vKey1, keyPerm);
   14.64 +
   14.65 +    // 1st round
   14.66 +    __ vxor            (vRet, vRet, vTmp1);
   14.67 +
   14.68 +    // load the 2nd round key to vKey1
   14.69 +    __ li              (keypos, 32);
   14.70 +    __ lvx             (vKey2, keypos, key);
   14.71 +    __ vec_perm        (vKey1, vKey2, keyPerm);
   14.72 +
   14.73 +    // load the 3rd round key to vKey2
   14.74 +    __ li              (keypos, 48);
   14.75 +    __ lvx             (vKey3, keypos, key);
   14.76 +    __ vec_perm        (vKey2, vKey3, keyPerm);
   14.77 +
   14.78 +    // load the 4th round key to vKey3
   14.79 +    __ li              (keypos, 64);
   14.80 +    __ lvx             (vKey4, keypos, key);
   14.81 +    __ vec_perm        (vKey3, vKey4, keyPerm);
   14.82 +
   14.83 +    // load the 5th round key to vKey4
   14.84 +    __ li              (keypos, 80);
   14.85      __ lvx             (vTmp1, keypos, key);
   14.86 -    __ vperm           (vKey1, vTmp1, vKey1, keyPerm);
   14.87 -
   14.88 -    // 1st round
   14.89 -    __ vxor (vRet, vRet, vKey1);
   14.90 -
   14.91 -    // load the 2nd round key to vKey1
   14.92 -    __ addi            (keypos, keypos, 16);
   14.93 -    __ lvx             (vTmp2, keypos, key);
   14.94 -    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
   14.95 -
   14.96 -    // load the 3rd round key to vKey2
   14.97 -    __ addi            (keypos, keypos, 16);
   14.98 +    __ vec_perm        (vKey4, vTmp1, keyPerm);
   14.99 +
  14.100 +    // 2nd - 5th rounds
  14.101 +    __ vcipher         (vRet, vRet, vKey1);
  14.102 +    __ vcipher         (vRet, vRet, vKey2);
  14.103 +    __ vcipher         (vRet, vRet, vKey3);
  14.104 +    __ vcipher         (vRet, vRet, vKey4);
  14.105 +
  14.106 +    // load the 6th round key to vKey1
  14.107 +    __ li              (keypos, 96);
  14.108 +    __ lvx             (vKey2, keypos, key);
  14.109 +    __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
  14.110 +
  14.111 +    // load the 7th round key to vKey2
  14.112 +    __ li              (keypos, 112);
  14.113 +    __ lvx             (vKey3, keypos, key);
  14.114 +    __ vec_perm        (vKey2, vKey3, keyPerm);
  14.115 +
  14.116 +    // load the 8th round key to vKey3
  14.117 +    __ li              (keypos, 128);
  14.118 +    __ lvx             (vKey4, keypos, key);
  14.119 +    __ vec_perm        (vKey3, vKey4, keyPerm);
  14.120 +
  14.121 +    // load the 9th round key to vKey4
  14.122 +    __ li              (keypos, 144);
  14.123      __ lvx             (vTmp1, keypos, key);
  14.124 -    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
  14.125 -
  14.126 -    // load the 4th round key to vKey3
  14.127 -    __ addi            (keypos, keypos, 16);
  14.128 -    __ lvx             (vTmp2, keypos, key);
  14.129 -    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
  14.130 -
  14.131 -    // load the 5th round key to vKey4
  14.132 -    __ addi            (keypos, keypos, 16);
  14.133 +    __ vec_perm        (vKey4, vTmp1, keyPerm);
  14.134 +
  14.135 +    // 6th - 9th rounds
  14.136 +    __ vcipher         (vRet, vRet, vKey1);
  14.137 +    __ vcipher         (vRet, vRet, vKey2);
  14.138 +    __ vcipher         (vRet, vRet, vKey3);
  14.139 +    __ vcipher         (vRet, vRet, vKey4);
  14.140 +
  14.141 +    // load the 10th round key to vKey1
  14.142 +    __ li              (keypos, 160);
  14.143 +    __ lvx             (vKey2, keypos, key);
  14.144 +    __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
  14.145 +
  14.146 +    // load the 11th round key to vKey2
  14.147 +    __ li              (keypos, 176);
  14.148      __ lvx             (vTmp1, keypos, key);
  14.149 -    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
  14.150 -
  14.151 -    // 2nd - 5th rounds
  14.152 -    __ vcipher (vRet, vRet, vKey1);
  14.153 -    __ vcipher (vRet, vRet, vKey2);
  14.154 -    __ vcipher (vRet, vRet, vKey3);
  14.155 -    __ vcipher (vRet, vRet, vKey4);
  14.156 -
  14.157 -    // load the 6th round key to vKey1
  14.158 -    __ addi            (keypos, keypos, 16);
  14.159 -    __ lvx             (vTmp2, keypos, key);
  14.160 -    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
  14.161 -
  14.162 -    // load the 7th round key to vKey2
  14.163 -    __ addi            (keypos, keypos, 16);
  14.164 -    __ lvx             (vTmp1, keypos, key);
  14.165 -    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
  14.166 -
  14.167 -    // load the 8th round key to vKey3
  14.168 -    __ addi            (keypos, keypos, 16);
  14.169 -    __ lvx             (vTmp2, keypos, key);
  14.170 -    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
  14.171 -
  14.172 -    // load the 9th round key to vKey4
  14.173 -    __ addi            (keypos, keypos, 16);
  14.174 -    __ lvx             (vTmp1, keypos, key);
  14.175 -    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
  14.176 -
  14.177 -    // 6th - 9th rounds
  14.178 -    __ vcipher (vRet, vRet, vKey1);
  14.179 -    __ vcipher (vRet, vRet, vKey2);
  14.180 -    __ vcipher (vRet, vRet, vKey3);
  14.181 -    __ vcipher (vRet, vRet, vKey4);
  14.182 -
  14.183 -    // load the 10th round key to vKey1
  14.184 -    __ addi            (keypos, keypos, 16);
  14.185 -    __ lvx             (vTmp2, keypos, key);
  14.186 -    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
  14.187 -
  14.188 -    // load the 11th round key to vKey2
  14.189 -    __ addi            (keypos, keypos, 16);
  14.190 -    __ lvx             (vTmp1, keypos, key);
  14.191 -    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
  14.192 +    __ vec_perm        (vKey2, vTmp1, keyPerm);
  14.193  
  14.194      // if all round keys are loaded, skip next 4 rounds
  14.195      __ cmpwi           (CCR0, keylen, 44);
  14.196      __ beq             (CCR0, L_doLast);
  14.197  
  14.198      // 10th - 11th rounds
  14.199 -    __ vcipher (vRet, vRet, vKey1);
  14.200 -    __ vcipher (vRet, vRet, vKey2);
  14.201 +    __ vcipher         (vRet, vRet, vKey1);
  14.202 +    __ vcipher         (vRet, vRet, vKey2);
  14.203  
  14.204      // load the 12th round key to vKey1
  14.205 -    __ addi            (keypos, keypos, 16);
  14.206 -    __ lvx             (vTmp2, keypos, key);
  14.207 -    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
  14.208 +    __ li              (keypos, 192);
  14.209 +    __ lvx             (vKey2, keypos, key);
  14.210 +    __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
  14.211  
  14.212      // load the 13th round key to vKey2
  14.213 -    __ addi            (keypos, keypos, 16);
  14.214 +    __ li              (keypos, 208);
  14.215      __ lvx             (vTmp1, keypos, key);
  14.216 -    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
  14.217 +    __ vec_perm        (vKey2, vTmp1, keyPerm);
  14.218  
  14.219      // if all round keys are loaded, skip next 2 rounds
  14.220      __ cmpwi           (CCR0, keylen, 52);
  14.221      __ beq             (CCR0, L_doLast);
  14.222  
  14.223      // 12th - 13th rounds
  14.224 -    __ vcipher (vRet, vRet, vKey1);
  14.225 -    __ vcipher (vRet, vRet, vKey2);
  14.226 +    __ vcipher         (vRet, vRet, vKey1);
  14.227 +    __ vcipher         (vRet, vRet, vKey2);
  14.228  
  14.229      // load the 14th round key to vKey1
  14.230 -    __ addi            (keypos, keypos, 16);
  14.231 -    __ lvx             (vTmp2, keypos, key);
  14.232 -    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
  14.233 +    __ li              (keypos, 224);
  14.234 +    __ lvx             (vKey2, keypos, key);
  14.235 +    __ vec_perm        (vKey1, vTmp1, vKey2, keyPerm);
  14.236  
  14.237      // load the 15th round key to vKey2
  14.238 -    __ addi            (keypos, keypos, 16);
  14.239 +    __ li              (keypos, 240);
  14.240      __ lvx             (vTmp1, keypos, key);
  14.241 -    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
  14.242 +    __ vec_perm        (vKey2, vTmp1, keyPerm);
  14.243  
  14.244      __ bind(L_doLast);
  14.245  
  14.246      // last two rounds
  14.247 -    __ vcipher (vRet, vRet, vKey1);
  14.248 -    __ vcipherlast (vRet, vRet, vKey2);
  14.249 -
  14.250 -    __ neg             (temp, to);
  14.251 -    __ lvsr            (toPerm, temp);
  14.252 -    __ vspltisb        (vTmp2, -1);
  14.253 -    __ vxor            (vTmp1, vTmp1, vTmp1);
  14.254 -    __ vperm           (vTmp2, vTmp2, vTmp1, toPerm);
  14.255 -    __ vxor            (toPerm, toPerm, fSplt);
  14.256 +    __ vcipher         (vRet, vRet, vKey1);
  14.257 +    __ vcipherlast     (vRet, vRet, vKey2);
  14.258 +
  14.259 +    // store result (unaligned)
  14.260 +#ifdef VM_LITTLE_ENDIAN
  14.261 +    __ lvsl            (toPerm, to);
  14.262 +#else
  14.263 +    __ lvsr            (toPerm, to);
  14.264 +#endif
  14.265 +    __ vspltisb        (vTmp3, -1);
  14.266 +    __ vspltisb        (vTmp4, 0);
  14.267      __ lvx             (vTmp1, to);
  14.268 -    __ vperm           (vRet, vRet, vRet, toPerm);
  14.269 -    __ vsel            (vTmp1, vTmp1, vRet, vTmp2);
  14.270 -    __ lvx             (vTmp4, fifteen, to);
  14.271 +    __ lvx             (vTmp2, fifteen, to);
  14.272 +#ifdef VM_LITTLE_ENDIAN
  14.273 +    __ vperm           (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
  14.274 +    __ vxor            (toPerm, toPerm, fSplt);       // swap bytes
  14.275 +#else
  14.276 +    __ vperm           (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
  14.277 +#endif
  14.278 +    __ vperm           (vTmp4, vRet, vRet, toPerm);   // rotate data
  14.279 +    __ vsel            (vTmp2, vTmp4, vTmp2, vTmp3);
  14.280 +    __ vsel            (vTmp1, vTmp1, vTmp4, vTmp3);
  14.281 +    __ stvx            (vTmp2, fifteen, to);          // store this one first (may alias)
  14.282      __ stvx            (vTmp1, to);
  14.283 -    __ vsel            (vRet, vRet, vTmp4, vTmp2);
  14.284 -    __ stvx            (vRet, fifteen, to);
  14.285  
  14.286      __ blr();
  14.287       return start;
  14.288    }
  14.289  
  14.290 -  // Arguments for generated stub (little endian only):
  14.291 +  // Arguments for generated stub:
  14.292    //   R3_ARG1   - source byte array address
  14.293    //   R4_ARG2   - destination byte array address
  14.294    //   R5_ARG3   - K (key) in little endian int array
  14.295 @@ -2442,7 +2447,6 @@
  14.296      Register keylen         = R8;
  14.297      Register temp           = R9;
  14.298      Register keypos         = R10;
  14.299 -    Register hex            = R11;
  14.300      Register fifteen        = R12;
  14.301  
  14.302      VectorRegister vRet     = VR0;
  14.303 @@ -2463,30 +2467,30 @@
  14.304      VectorRegister vTmp3    = VR12;
  14.305      VectorRegister vTmp4    = VR13;
  14.306  
  14.307 -    VectorRegister vLow     = VR14;
  14.308 -    VectorRegister vHigh    = VR15;
  14.309 -
  14.310 -    __ li              (hex, 16);
  14.311      __ li              (fifteen, 15);
  14.312 -    __ vspltisb        (fSplt, 0x0f);
  14.313  
  14.314      // load unaligned from[0-15] to vsRet
  14.315      __ lvx             (vRet, from);
  14.316      __ lvx             (vTmp1, fifteen, from);
  14.317      __ lvsl            (fromPerm, from);
  14.318 +#ifdef VM_LITTLE_ENDIAN
  14.319 +    __ vspltisb        (fSplt, 0x0f);
  14.320      __ vxor            (fromPerm, fromPerm, fSplt);
  14.321 +#endif
  14.322      __ vperm           (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE]
  14.323  
  14.324      // load keylen (44 or 52 or 60)
  14.325      __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
  14.326  
  14.327      // to load keys
  14.328 -    __ lvsr            (keyPerm, key);
  14.329 +    __ load_perm       (keyPerm, key);
  14.330 +#ifdef VM_LITTLE_ENDIAN
  14.331      __ vxor            (vTmp2, vTmp2, vTmp2);
  14.332      __ vspltisb        (vTmp2, -16);
  14.333      __ vrld            (keyPerm, keyPerm, vTmp2);
  14.334      __ vrld            (keyPerm, keyPerm, vTmp2);
  14.335      __ vsldoi          (keyPerm, keyPerm, keyPerm, 8);
  14.336 +#endif
  14.337  
  14.338      __ cmpwi           (CCR0, keylen, 44);
  14.339      __ beq             (CCR0, L_do44);
  14.340 @@ -2494,32 +2498,32 @@
  14.341      __ cmpwi           (CCR0, keylen, 52);
  14.342      __ beq             (CCR0, L_do52);
  14.343  
  14.344 -    // load the 15th round key to vKey11
  14.345 +    // load the 15th round key to vKey1
  14.346      __ li              (keypos, 240);
  14.347 +    __ lvx             (vKey1, keypos, key);
  14.348 +    __ li              (keypos, 224);
  14.349 +    __ lvx             (vKey2, keypos, key);
  14.350 +    __ vec_perm        (vKey1, vKey2, vKey1, keyPerm);
  14.351 +
  14.352 +    // load the 14th round key to vKey2
  14.353 +    __ li              (keypos, 208);
  14.354 +    __ lvx             (vKey3, keypos, key);
  14.355 +    __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
  14.356 +
  14.357 +    // load the 13th round key to vKey3
  14.358 +    __ li              (keypos, 192);
  14.359 +    __ lvx             (vKey4, keypos, key);
  14.360 +    __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
  14.361 +
  14.362 +    // load the 12th round key to vKey4
  14.363 +    __ li              (keypos, 176);
  14.364 +    __ lvx             (vKey5, keypos, key);
  14.365 +    __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
  14.366 +
  14.367 +    // load the 11th round key to vKey5
  14.368 +    __ li              (keypos, 160);
  14.369      __ lvx             (vTmp1, keypos, key);
  14.370 -    __ addi            (keypos, keypos, -16);
  14.371 -    __ lvx             (vTmp2, keypos, key);
  14.372 -    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
  14.373 -
  14.374 -    // load the 14th round key to vKey10
  14.375 -    __ addi            (keypos, keypos, -16);
  14.376 -    __ lvx             (vTmp1, keypos, key);
  14.377 -    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
  14.378 -
  14.379 -    // load the 13th round key to vKey10
  14.380 -    __ addi            (keypos, keypos, -16);
  14.381 -    __ lvx             (vTmp2, keypos, key);
  14.382 -    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
  14.383 -
  14.384 -    // load the 12th round key to vKey10
  14.385 -    __ addi            (keypos, keypos, -16);
  14.386 -    __ lvx             (vTmp1, keypos, key);
  14.387 -    __ vperm           (vKey4, vTmp2, vTmp1, keyPerm);
  14.388 -
  14.389 -    // load the 11th round key to vKey10
  14.390 -    __ addi            (keypos, keypos, -16);
  14.391 -    __ lvx             (vTmp2, keypos, key);
  14.392 -    __ vperm           (vKey5, vTmp1, vTmp2, keyPerm);
  14.393 +    __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
  14.394  
  14.395      // 1st - 5th rounds
  14.396      __ vxor            (vRet, vRet, vKey1);
  14.397 @@ -2532,22 +2536,22 @@
  14.398  
  14.399      __ bind            (L_do52);
  14.400  
  14.401 -    // load the 13th round key to vKey11
  14.402 +    // load the 13th round key to vKey1
  14.403      __ li              (keypos, 208);
  14.404 +    __ lvx             (vKey1, keypos, key);
  14.405 +    __ li              (keypos, 192);
  14.406 +    __ lvx             (vKey2, keypos, key);
  14.407 +    __ vec_perm        (vKey1, vKey2, vKey1, keyPerm);
  14.408 +
  14.409 +    // load the 12th round key to vKey2
  14.410 +    __ li              (keypos, 176);
  14.411 +    __ lvx             (vKey3, keypos, key);
  14.412 +    __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
  14.413 +
  14.414 +    // load the 11th round key to vKey3
  14.415 +    __ li              (keypos, 160);
  14.416      __ lvx             (vTmp1, keypos, key);
  14.417 -    __ addi            (keypos, keypos, -16);
  14.418 -    __ lvx             (vTmp2, keypos, key);
  14.419 -    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
  14.420 -
  14.421 -    // load the 12th round key to vKey10
  14.422 -    __ addi            (keypos, keypos, -16);
  14.423 -    __ lvx             (vTmp1, keypos, key);
  14.424 -    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
  14.425 -
  14.426 -    // load the 11th round key to vKey10
  14.427 -    __ addi            (keypos, keypos, -16);
  14.428 -    __ lvx             (vTmp2, keypos, key);
  14.429 -    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
  14.430 +    __ vec_perm        (vKey3, vTmp1, vKey3, keyPerm);
  14.431  
  14.432      // 1st - 3rd rounds
  14.433      __ vxor            (vRet, vRet, vKey1);
  14.434 @@ -2558,42 +2562,42 @@
  14.435  
  14.436      __ bind            (L_do44);
  14.437  
  14.438 -    // load the 11th round key to vKey11
  14.439 +    // load the 11th round key to vKey1
  14.440      __ li              (keypos, 176);
  14.441 +    __ lvx             (vKey1, keypos, key);
  14.442 +    __ li              (keypos, 160);
  14.443      __ lvx             (vTmp1, keypos, key);
  14.444 -    __ addi            (keypos, keypos, -16);
  14.445 -    __ lvx             (vTmp2, keypos, key);
  14.446 -    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
  14.447 +    __ vec_perm        (vKey1, vTmp1, vKey1, keyPerm);
  14.448  
  14.449      // 1st round
  14.450      __ vxor            (vRet, vRet, vKey1);
  14.451  
  14.452      __ bind            (L_doLast);
  14.453  
  14.454 -    // load the 10th round key to vKey10
  14.455 -    __ addi            (keypos, keypos, -16);
  14.456 +    // load the 10th round key to vKey1
  14.457 +    __ li              (keypos, 144);
  14.458 +    __ lvx             (vKey2, keypos, key);
  14.459 +    __ vec_perm        (vKey1, vKey2, vTmp1, keyPerm);
  14.460 +
  14.461 +    // load the 9th round key to vKey2
  14.462 +    __ li              (keypos, 128);
  14.463 +    __ lvx             (vKey3, keypos, key);
  14.464 +    __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
  14.465 +
  14.466 +    // load the 8th round key to vKey3
  14.467 +    __ li              (keypos, 112);
  14.468 +    __ lvx             (vKey4, keypos, key);
  14.469 +    __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
  14.470 +
  14.471 +    // load the 7th round key to vKey4
  14.472 +    __ li              (keypos, 96);
  14.473 +    __ lvx             (vKey5, keypos, key);
  14.474 +    __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
  14.475 +
  14.476 +    // load the 6th round key to vKey5
  14.477 +    __ li              (keypos, 80);
  14.478      __ lvx             (vTmp1, keypos, key);
  14.479 -    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
  14.480 -
  14.481 -    // load the 9th round key to vKey10
  14.482 -    __ addi            (keypos, keypos, -16);
  14.483 -    __ lvx             (vTmp2, keypos, key);
  14.484 -    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
  14.485 -
  14.486 -    // load the 8th round key to vKey10
  14.487 -    __ addi            (keypos, keypos, -16);
  14.488 -    __ lvx             (vTmp1, keypos, key);
  14.489 -    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
  14.490 -
  14.491 -    // load the 7th round key to vKey10
  14.492 -    __ addi            (keypos, keypos, -16);
  14.493 -    __ lvx             (vTmp2, keypos, key);
  14.494 -    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
  14.495 -
  14.496 -    // load the 6th round key to vKey10
  14.497 -    __ addi            (keypos, keypos, -16);
  14.498 -    __ lvx             (vTmp1, keypos, key);
  14.499 -    __ vperm           (vKey5, vTmp2, vTmp1, keyPerm);
  14.500 +    __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
  14.501  
  14.502      // last 10th - 6th rounds
  14.503      __ vncipher        (vRet, vRet, vKey1);
  14.504 @@ -2602,30 +2606,29 @@
  14.505      __ vncipher        (vRet, vRet, vKey4);
  14.506      __ vncipher        (vRet, vRet, vKey5);
  14.507  
  14.508 -    // load the 5th round key to vKey10
  14.509 -    __ addi            (keypos, keypos, -16);
  14.510 -    __ lvx             (vTmp2, keypos, key);
  14.511 -    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
  14.512 -
  14.513 -    // load the 4th round key to vKey10
  14.514 -    __ addi            (keypos, keypos, -16);
  14.515 -    __ lvx             (vTmp1, keypos, key);
  14.516 -    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
  14.517 -
  14.518 -    // load the 3rd round key to vKey10
  14.519 -    __ addi            (keypos, keypos, -16);
  14.520 -    __ lvx             (vTmp2, keypos, key);
  14.521 -    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
  14.522 -
  14.523 -    // load the 2nd round key to vKey10
  14.524 -    __ addi            (keypos, keypos, -16);
  14.525 -    __ lvx             (vTmp1, keypos, key);
  14.526 -    __ vperm           (vKey4, vTmp2, vTmp1, keyPerm);
  14.527 -
  14.528 -    // load the 1st round key to vKey10
  14.529 -    __ addi            (keypos, keypos, -16);
  14.530 -    __ lvx             (vTmp2, keypos, key);
  14.531 -    __ vperm           (vKey5, vTmp1, vTmp2, keyPerm);
  14.532 +    // load the 5th round key to vKey1
  14.533 +    __ li              (keypos, 64);
  14.534 +    __ lvx             (vKey2, keypos, key);
  14.535 +    __ vec_perm        (vKey1, vKey2, vTmp1, keyPerm);
  14.536 +
  14.537 +    // load the 4th round key to vKey2
  14.538 +    __ li              (keypos, 48);
  14.539 +    __ lvx             (vKey3, keypos, key);
  14.540 +    __ vec_perm        (vKey2, vKey3, vKey2, keyPerm);
  14.541 +
  14.542 +    // load the 3rd round key to vKey3
  14.543 +    __ li              (keypos, 32);
  14.544 +    __ lvx             (vKey4, keypos, key);
  14.545 +    __ vec_perm        (vKey3, vKey4, vKey3, keyPerm);
  14.546 +
  14.547 +    // load the 2nd round key to vKey4
  14.548 +    __ li              (keypos, 16);
  14.549 +    __ lvx             (vKey5, keypos, key);
  14.550 +    __ vec_perm        (vKey4, vKey5, vKey4, keyPerm);
  14.551 +
  14.552 +    // load the 1st round key to vKey5
  14.553 +    __ lvx             (vTmp1, key);
  14.554 +    __ vec_perm        (vKey5, vTmp1, vKey5, keyPerm);
  14.555  
  14.556      // last 5th - 1th rounds
  14.557      __ vncipher        (vRet, vRet, vKey1);
  14.558 @@ -2634,24 +2637,54 @@
  14.559      __ vncipher        (vRet, vRet, vKey4);
  14.560      __ vncipherlast    (vRet, vRet, vKey5);
  14.561  
  14.562 -    __ neg             (temp, to);
  14.563 -    __ lvsr            (toPerm, temp);
  14.564 -    __ vspltisb        (vTmp2, -1);
  14.565 -    __ vxor            (vTmp1, vTmp1, vTmp1);
  14.566 -    __ vperm           (vTmp2, vTmp2, vTmp1, toPerm);
  14.567 -    __ vxor            (toPerm, toPerm, fSplt);
  14.568 +    // store result (unaligned)
  14.569 +#ifdef VM_LITTLE_ENDIAN
  14.570 +    __ lvsl            (toPerm, to);
  14.571 +#else
  14.572 +    __ lvsr            (toPerm, to);
  14.573 +#endif
  14.574 +    __ vspltisb        (vTmp3, -1);
  14.575 +    __ vspltisb        (vTmp4, 0);
  14.576      __ lvx             (vTmp1, to);
  14.577 -    __ vperm           (vRet, vRet, vRet, toPerm);
  14.578 -    __ vsel            (vTmp1, vTmp1, vRet, vTmp2);
  14.579 -    __ lvx             (vTmp4, fifteen, to);
  14.580 +    __ lvx             (vTmp2, fifteen, to);
  14.581 +#ifdef VM_LITTLE_ENDIAN
  14.582 +    __ vperm           (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask
  14.583 +    __ vxor            (toPerm, toPerm, fSplt);       // swap bytes
  14.584 +#else
  14.585 +    __ vperm           (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask
  14.586 +#endif
  14.587 +    __ vperm           (vTmp4, vRet, vRet, toPerm);   // rotate data
  14.588 +    __ vsel            (vTmp2, vTmp4, vTmp2, vTmp3);
  14.589 +    __ vsel            (vTmp1, vTmp1, vTmp4, vTmp3);
  14.590 +    __ stvx            (vTmp2, fifteen, to);          // store this one first (may alias)
  14.591      __ stvx            (vTmp1, to);
  14.592 -    __ vsel            (vRet, vRet, vTmp4, vTmp2);
  14.593 -    __ stvx            (vRet, fifteen, to);
  14.594  
  14.595      __ blr();
  14.596       return start;
  14.597    }
  14.598  
  14.599 +  address generate_sha256_implCompress(bool multi_block, const char *name) {
  14.600 +    assert(UseSHA, "need SHA instructions");
  14.601 +    StubCodeMark mark(this, "StubRoutines", name);
  14.602 +    address start = __ function_entry();
  14.603 +
  14.604 +    __ sha256 (multi_block);
  14.605 +
  14.606 +    __ blr();
  14.607 +    return start;
  14.608 +  }
  14.609 +
  14.610 +  address generate_sha512_implCompress(bool multi_block, const char *name) {
  14.611 +    assert(UseSHA, "need SHA instructions");
  14.612 +    StubCodeMark mark(this, "StubRoutines", name);
  14.613 +    address start = __ function_entry();
  14.614 +
  14.615 +    __ sha512 (multi_block);
  14.616 +
  14.617 +    __ blr();
  14.618 +    return start;
  14.619 +  }
  14.620 +
  14.621    void generate_arraycopy_stubs() {
  14.622      // Note: the disjoint stubs must be generated first, some of
  14.623      // the conjoint stubs use them.
  14.624 @@ -2881,6 +2914,15 @@
  14.625        StubRoutines::_montgomerySquare
  14.626          = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
  14.627      }
  14.628 +
  14.629 +    if (UseSHA256Intrinsics) {
  14.630 +      StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
  14.631 +      StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
  14.632 +    }
  14.633 +    if (UseSHA512Intrinsics) {
  14.634 +      StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
  14.635 +      StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB");
  14.636 +    }
  14.637    }
  14.638  
  14.639   public:
    15.1 --- a/src/cpu/ppc/vm/stubRoutines_ppc_64.hpp	Sat Nov 09 20:15:27 2019 +0800
    15.2 +++ b/src/cpu/ppc/vm/stubRoutines_ppc_64.hpp	Sat Nov 09 20:29:45 2019 +0800
    15.3 @@ -34,7 +34,7 @@
    15.4  
    15.5  enum platform_dependent_constants {
    15.6    code_size1 = 20000,          // simply increase if too small (assembler will crash if too small)
    15.7 -  code_size2 = 20000           // simply increase if too small (assembler will crash if too small)
    15.8 +  code_size2 = 24000           // simply increase if too small (assembler will crash if too small)
    15.9  };
   15.10  
   15.11  // CRC32 Intrinsics.
    16.1 --- a/src/cpu/ppc/vm/vm_version_ppc.cpp	Sat Nov 09 20:15:27 2019 +0800
    16.2 +++ b/src/cpu/ppc/vm/vm_version_ppc.cpp	Sat Nov 09 20:29:45 2019 +0800
    16.3 @@ -110,7 +110,7 @@
    16.4    // Create and print feature-string.
    16.5    char buf[(num_features+1) * 16]; // Max 16 chars per feature.
    16.6    jio_snprintf(buf, sizeof(buf),
    16.7 -               "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s",
    16.8 +               "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
    16.9                 (has_fsqrt()   ? " fsqrt"   : ""),
   16.10                 (has_isel()    ? " isel"    : ""),
   16.11                 (has_lxarxeh() ? " lxarxeh" : ""),
   16.12 @@ -124,7 +124,8 @@
   16.13                 (has_vcipher() ? " aes"     : ""),
   16.14                 (has_vpmsumb() ? " vpmsumb" : ""),
   16.15                 (has_mfdscr()  ? " mfdscr"  : ""),
   16.16 -               (has_vsx()     ? " vsx"     : "")
   16.17 +               (has_vsx()     ? " vsx"     : ""),
   16.18 +               (has_vshasig() ? " sha"     : "")
   16.19                 // Make sure number of %s matches num_features!
   16.20                );
   16.21    _features_str = strdup(buf);
   16.22 @@ -173,7 +174,6 @@
   16.23    }
   16.24  
   16.25    // The AES intrinsic stubs require AES instruction support.
   16.26 -#if defined(VM_LITTLE_ENDIAN)
   16.27    if (has_vcipher()) {
   16.28      if (FLAG_IS_DEFAULT(UseAES)) {
   16.29        UseAES = true;
   16.30 @@ -194,29 +194,43 @@
   16.31      FLAG_SET_DEFAULT(UseAESIntrinsics, false);
   16.32    }
   16.33  
   16.34 -#else
   16.35 -  if (UseAES) {
   16.36 -    warning("AES instructions are not available on this CPU");
   16.37 -    FLAG_SET_DEFAULT(UseAES, false);
   16.38 -  }
   16.39 -  if (UseAESIntrinsics) {
   16.40 -    if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
   16.41 -      warning("AES intrinsics are not available on this CPU");
   16.42 -    FLAG_SET_DEFAULT(UseAESIntrinsics, false);
   16.43 -  }
   16.44 -#endif
   16.45 -
   16.46 -  if (UseSHA) {
   16.47 -    warning("SHA instructions are not available on this CPU");
   16.48 +  if (has_vshasig()) {
   16.49 +    if (FLAG_IS_DEFAULT(UseSHA)) {
   16.50 +      UseSHA = true;
   16.51 +    }
   16.52 +  } else if (UseSHA) {
   16.53 +    if (!FLAG_IS_DEFAULT(UseSHA))
   16.54 +      warning("SHA instructions are not available on this CPU");
   16.55      FLAG_SET_DEFAULT(UseSHA, false);
   16.56    }
   16.57 -  if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
   16.58 -    warning("SHA intrinsics are not available on this CPU");
   16.59 +
   16.60 +  if (UseSHA1Intrinsics) {
   16.61 +    warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
   16.62      FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
   16.63 +  }
   16.64 +
   16.65 +  if (UseSHA && has_vshasig()) {
   16.66 +    if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
   16.67 +      FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
   16.68 +    }
   16.69 +  } else if (UseSHA256Intrinsics) {
   16.70 +    warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
   16.71      FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
   16.72 +  }
   16.73 +
   16.74 +  if (UseSHA && has_vshasig()) {
   16.75 +    if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
   16.76 +      FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
   16.77 +    }
   16.78 +  } else if (UseSHA512Intrinsics) {
   16.79 +    warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
   16.80      FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
   16.81    }
   16.82  
   16.83 +  if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
   16.84 +    FLAG_SET_DEFAULT(UseSHA, false);
   16.85 +  }
   16.86 +
   16.87    if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
   16.88      UseMontgomeryMultiplyIntrinsic = true;
   16.89    }
   16.90 @@ -503,6 +517,7 @@
   16.91    a->vpmsumb(VR0, VR1, VR2);                   // code[12] -> vpmsumb
   16.92    a->mfdscr(R0);                               // code[13] -> mfdscr
   16.93    a->lxvd2x(VSR0, R3_ARG1);                    // code[14] -> vsx
   16.94 +  a->vshasigmaw(VR0, VR1, 1, 0xF);             // code[15] -> vshasig
   16.95    a->blr();
   16.96  
   16.97    // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
   16.98 @@ -551,6 +566,7 @@
   16.99    if (code[feature_cntr++]) features |= vpmsumb_m;
  16.100    if (code[feature_cntr++]) features |= mfdscr_m;
  16.101    if (code[feature_cntr++]) features |= vsx_m;
  16.102 +  if (code[feature_cntr++]) features |= vshasig_m;
  16.103  
  16.104    // Print the detection code.
  16.105    if (PrintAssembly) {
    17.1 --- a/src/cpu/ppc/vm/vm_version_ppc.hpp	Sat Nov 09 20:15:27 2019 +0800
    17.2 +++ b/src/cpu/ppc/vm/vm_version_ppc.hpp	Sat Nov 09 20:29:45 2019 +0800
    17.3 @@ -47,6 +47,7 @@
    17.4      vpmsumb,
    17.5      mfdscr,
    17.6      vsx,
    17.7 +    vshasig,
    17.8      num_features // last entry to count features
    17.9    };
   17.10    enum Feature_Flag_Set {
   17.11 @@ -63,6 +64,7 @@
   17.12      dcba_m                = (1 << dcba   ),
   17.13      lqarx_m               = (1 << lqarx  ),
   17.14      vcipher_m             = (1 << vcipher),
   17.15 +    vshasig_m             = (1 << vshasig),
   17.16      vpmsumb_m             = (1 << vpmsumb),
   17.17      mfdscr_m              = (1 << mfdscr ),
   17.18      vsx_m                 = (1 << vsx    ),
   17.19 @@ -99,6 +101,7 @@
   17.20    static bool has_vpmsumb() { return (_features & vpmsumb_m) != 0; }
   17.21    static bool has_mfdscr()  { return (_features & mfdscr_m) != 0; }
   17.22    static bool has_vsx()     { return (_features & vsx_m) != 0; }
   17.23 +  static bool has_vshasig() { return (_features & vshasig_m) != 0; }
   17.24  
   17.25    static const char* cpu_features() { return _features_str; }
   17.26  
    18.1 --- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Sat Nov 09 20:15:27 2019 +0800
    18.2 +++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Sat Nov 09 20:29:45 2019 +0800
    18.3 @@ -33,7 +33,7 @@
    18.4  
    18.5  enum platform_dependent_constants {
    18.6    code_size1 = 19000,          // simply increase if too small (assembler will crash if too small)
    18.7 -  code_size2 = 23000           // simply increase if too small (assembler will crash if too small)
    18.8 +  code_size2 = 24000           // simply increase if too small (assembler will crash if too small)
    18.9  };
   18.10  
   18.11  class x86 {
    19.1 --- a/src/cpu/x86/vm/x86_64.ad	Sat Nov 09 20:15:27 2019 +0800
    19.2 +++ b/src/cpu/x86/vm/x86_64.ad	Sat Nov 09 20:29:45 2019 +0800
    19.3 @@ -3740,6 +3740,23 @@
    19.4    %}
    19.5  %}
    19.6  
    19.7 +// Indirect Memory Plus Positive Index Register Plus Offset Operand
    19.8 +operand indPosIndexOffset(any_RegP reg, immL32 off, rRegI idx)
    19.9 +%{
   19.10 +  constraint(ALLOC_IN_RC(ptr_reg));
   19.11 +  predicate(n->in(2)->in(3)->as_Type()->type()->is_long()->_lo >= 0);
   19.12 +  match(AddP (AddP reg (ConvI2L idx)) off);
   19.13 +
   19.14 +  op_cost(10);
   19.15 +  format %{"[$reg + $off + $idx]" %}
   19.16 +  interface(MEMORY_INTER) %{
   19.17 +    base($reg);
   19.18 +    index($idx);
   19.19 +    scale(0x0);
   19.20 +    disp($off);
   19.21 +  %}
   19.22 +%}
   19.23 +
   19.24  // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
   19.25  operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale)
   19.26  %{
   19.27 @@ -3891,6 +3908,23 @@
   19.28    %}
   19.29  %}
   19.30  
   19.31 +// Indirect Memory Times Plus Positive Index Register Plus Offset Operand
   19.32 +operand indPosIndexOffsetNarrow(rRegN reg, immL32 off, rRegI idx)
   19.33 +%{
   19.34 +  constraint(ALLOC_IN_RC(ptr_reg));
   19.35 +  predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->as_Type()->type()->is_long()->_lo >= 0);
   19.36 +  match(AddP (AddP (DecodeN reg) (ConvI2L idx)) off);
   19.37 +
   19.38 +  op_cost(10);
   19.39 +  format %{"[$reg + $off + $idx]" %}
   19.40 +  interface(MEMORY_INTER) %{
   19.41 +    base($reg);
   19.42 +    index($idx);
   19.43 +    scale(0x0);
   19.44 +    disp($off);
   19.45 +  %}
   19.46 +%}
   19.47 +
   19.48  // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand
   19.49  operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 scale)
   19.50  %{
   19.51 @@ -4082,11 +4116,11 @@
   19.52  // case of this is memory operands.
   19.53  
   19.54  opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
   19.55 -               indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset,
   19.56 +               indIndexScale, indIndexScaleOffset, indPosIndexOffset, indPosIndexScaleOffset,
   19.57                 indCompressedOopOffset,
   19.58                 indirectNarrow, indOffset8Narrow, indOffset32Narrow,
   19.59                 indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
   19.60 -               indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
   19.61 +               indIndexScaleOffsetNarrow, indPosIndexOffsetNarrow, indPosIndexScaleOffsetNarrow);
   19.62  
   19.63  //----------PIPELINE-----------------------------------------------------------
   19.64  // Rules which define the behavior of the target architectures pipeline.
   19.65 @@ -5120,6 +5154,17 @@
   19.66    ins_pipe(ialu_reg_reg_fat);
   19.67  %}
   19.68  
   19.69 +instruct leaPPosIdxOff(rRegP dst, indPosIndexOffset mem)
   19.70 +%{
   19.71 +  match(Set dst mem);
   19.72 +
   19.73 +  ins_cost(110);
   19.74 +  format %{ "leaq    $dst, $mem\t# ptr posidxoff" %}
   19.75 +  opcode(0x8D);
   19.76 +  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
   19.77 +  ins_pipe(ialu_reg_reg_fat);
   19.78 +%}
   19.79 +
   19.80  instruct leaPPosIdxScaleOff(rRegP dst, indPosIndexScaleOffset mem)
   19.81  %{
   19.82    match(Set dst mem);
   19.83 @@ -5204,6 +5249,18 @@
   19.84    ins_pipe(ialu_reg_reg_fat);
   19.85  %}
   19.86  
   19.87 +instruct leaPPosIdxOffNarrow(rRegP dst, indPosIndexOffsetNarrow mem)
   19.88 +%{
   19.89 +  predicate(Universe::narrow_oop_shift() == 0);
   19.90 +  match(Set dst mem);
   19.91 +
   19.92 +  ins_cost(110);
   19.93 +  format %{ "leaq    $dst, $mem\t# ptr posidxoffnarrow" %}
   19.94 +  opcode(0x8D);
   19.95 +  ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
   19.96 +  ins_pipe(ialu_reg_reg_fat);
   19.97 +%}
   19.98 +
   19.99  instruct leaPPosIdxScaleOffNarrow(rRegP dst, indPosIndexScaleOffsetNarrow mem)
  19.100  %{
  19.101    predicate(Universe::narrow_oop_shift() == 0);
    20.1 --- a/src/os/aix/vm/os_aix.cpp	Sat Nov 09 20:15:27 2019 +0800
    20.2 +++ b/src/os/aix/vm/os_aix.cpp	Sat Nov 09 20:29:45 2019 +0800
    20.3 @@ -4184,8 +4184,7 @@
    20.4  
    20.5    /* Scan the directory */
    20.6    bool result = true;
    20.7 -  char buf[sizeof(struct dirent) + MAX_PATH];
    20.8 -  while (result && (ptr = ::readdir(dir)) != NULL) {
    20.9 +  while (result && (ptr = readdir(dir)) != NULL) {
   20.10      if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
   20.11        result = false;
   20.12      }
    21.1 --- a/src/os/aix/vm/os_aix.inline.hpp	Sat Nov 09 20:15:27 2019 +0800
    21.2 +++ b/src/os/aix/vm/os_aix.inline.hpp	Sat Nov 09 20:29:45 2019 +0800
    21.3 @@ -92,19 +92,6 @@
    21.4  
    21.5  inline const int os::default_file_open_flags() { return 0;}
    21.6  
    21.7 -inline DIR* os::opendir(const char* dirname)
    21.8 -{
    21.9 -  assert(dirname != NULL, "just checking");
   21.10 -  return ::opendir(dirname);
   21.11 -}
   21.12 -
   21.13 -inline int os::readdir_buf_size(const char *path)
   21.14 -{
   21.15 -  // according to aix sys/limits, NAME_MAX must be retrieved at runtime. */
   21.16 -  const long my_NAME_MAX = pathconf(path, _PC_NAME_MAX);
   21.17 -  return my_NAME_MAX + sizeof(dirent) + 1;
   21.18 -}
   21.19 -
   21.20  inline jlong os::lseek(int fd, jlong offset, int whence) {
   21.21    return (jlong) ::lseek64(fd, offset, whence);
   21.22  }
   21.23 @@ -121,28 +108,6 @@
   21.24    return ::ftruncate64(fd, length);
   21.25  }
   21.26  
   21.27 -inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf)
   21.28 -{
   21.29 -  dirent* p;
   21.30 -  int status;
   21.31 -  assert(dirp != NULL, "just checking");
   21.32 -
   21.33 -  // NOTE: Linux readdir_r (on RH 6.2 and 7.2 at least) is NOT like the POSIX
   21.34 -  // version. Here is the doc for this function:
   21.35 -  // http://www.gnu.org/manual/glibc-2.2.3/html_node/libc_262.html
   21.36 -
   21.37 -  if((status = ::readdir_r(dirp, dbuf, &p)) != 0) {
   21.38 -    errno = status;
   21.39 -    return NULL;
   21.40 -  } else
   21.41 -    return p;
   21.42 -}
   21.43 -
   21.44 -inline int os::closedir(DIR *dirp) {
   21.45 -  assert(dirp != NULL, "argument is NULL");
   21.46 -  return ::closedir(dirp);
   21.47 -}
   21.48 -
   21.49  // macros for restartable system calls
   21.50  
   21.51  #define RESTARTABLE(_cmd, _result) do { \
    22.1 --- a/src/os/aix/vm/perfMemory_aix.cpp	Sat Nov 09 20:15:27 2019 +0800
    22.2 +++ b/src/os/aix/vm/perfMemory_aix.cpp	Sat Nov 09 20:29:45 2019 +0800
    22.3 @@ -612,9 +612,8 @@
    22.4    // to determine the user name for the process id.
    22.5    //
    22.6    struct dirent* dentry;
    22.7 -  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal);
    22.8    errno = 0;
    22.9 -  while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) {
   22.10 +  while ((dentry = os::readdir(tmpdirp)) != NULL) {
   22.11  
   22.12      // check if the directory entry is a hsperfdata file
   22.13      if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) {
   22.14 @@ -648,9 +647,8 @@
   22.15      }
   22.16  
   22.17      struct dirent* udentry;
   22.18 -    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
   22.19      errno = 0;
   22.20 -    while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
   22.21 +    while ((udentry = os::readdir(subdirp)) != NULL) {
   22.22  
   22.23        if (filename_to_pid(udentry->d_name) == vmid) {
   22.24          struct stat statbuf;
   22.25 @@ -694,11 +692,9 @@
   22.26        }
   22.27      }
   22.28      os::closedir(subdirp);
   22.29 -    FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
   22.30      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
   22.31    }
   22.32    os::closedir(tmpdirp);
   22.33 -  FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
   22.34  
   22.35    return(oldest_user);
   22.36  }
   22.37 @@ -774,10 +770,8 @@
   22.38    // loop under these conditions is dependent upon the implementation of
   22.39    // opendir/readdir.
   22.40    struct dirent* entry;
   22.41 -  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
   22.42 -
   22.43    errno = 0;
   22.44 -  while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
   22.45 +  while ((entry = os::readdir(dirp)) != NULL) {
   22.46  
   22.47      pid_t pid = filename_to_pid(entry->d_name);
   22.48  
   22.49 @@ -816,7 +810,6 @@
   22.50    // Close the directory and reset the current working directory.
   22.51    close_directory_secure_cwd(dirp, saved_cwd_fd);
   22.52  
   22.53 -  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
   22.54  }
   22.55  
   22.56  // Make the user specific temporary directory. Returns true if
    23.1 --- a/src/os/bsd/vm/os_bsd.cpp	Sat Nov 09 20:15:27 2019 +0800
    23.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Sat Nov 09 20:29:45 2019 +0800
    23.3 @@ -3957,8 +3957,7 @@
    23.4  
    23.5    /* Scan the directory */
    23.6    bool result = true;
    23.7 -  char buf[sizeof(struct dirent) + MAX_PATH];
    23.8 -  while (result && (ptr = ::readdir(dir)) != NULL) {
    23.9 +  while (result && (ptr = readdir(dir)) != NULL) {
   23.10      if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
   23.11        result = false;
   23.12      }
    24.1 --- a/src/os/bsd/vm/os_bsd.inline.hpp	Sat Nov 09 20:15:27 2019 +0800
    24.2 +++ b/src/os/bsd/vm/os_bsd.inline.hpp	Sat Nov 09 20:29:45 2019 +0800
    24.3 @@ -95,17 +95,6 @@
    24.4  
    24.5  inline const int os::default_file_open_flags() { return 0;}
    24.6  
    24.7 -inline DIR* os::opendir(const char* dirname)
    24.8 -{
    24.9 -  assert(dirname != NULL, "just checking");
   24.10 -  return ::opendir(dirname);
   24.11 -}
   24.12 -
   24.13 -inline int os::readdir_buf_size(const char *path)
   24.14 -{
   24.15 -  return NAME_MAX + sizeof(dirent) + 1;
   24.16 -}
   24.17 -
   24.18  inline jlong os::lseek(int fd, jlong offset, int whence) {
   24.19    return (jlong) ::lseek(fd, offset, whence);
   24.20  }
   24.21 @@ -122,28 +111,6 @@
   24.22    return ::ftruncate(fd, length);
   24.23  }
   24.24  
   24.25 -inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf)
   24.26 -{
   24.27 -  dirent* p;
   24.28 -  int status;
   24.29 -  assert(dirp != NULL, "just checking");
   24.30 -
   24.31 -  // NOTE: Bsd readdir_r (on RH 6.2 and 7.2 at least) is NOT like the POSIX
   24.32 -  // version. Here is the doc for this function:
   24.33 -  // http://www.gnu.org/manual/glibc-2.2.3/html_node/libc_262.html
   24.34 -
   24.35 -  if((status = ::readdir_r(dirp, dbuf, &p)) != 0) {
   24.36 -    errno = status;
   24.37 -    return NULL;
   24.38 -  } else
   24.39 -    return p;
   24.40 -}
   24.41 -
   24.42 -inline int os::closedir(DIR *dirp) {
   24.43 -  assert(dirp != NULL, "argument is NULL");
   24.44 -  return ::closedir(dirp);
   24.45 -}
   24.46 -
   24.47  // macros for restartable system calls
   24.48  
   24.49  #define RESTARTABLE(_cmd, _result) do { \
    25.1 --- a/src/os/bsd/vm/perfMemory_bsd.cpp	Sat Nov 09 20:15:27 2019 +0800
    25.2 +++ b/src/os/bsd/vm/perfMemory_bsd.cpp	Sat Nov 09 20:29:45 2019 +0800
    25.3 @@ -533,9 +533,8 @@
    25.4    // to determine the user name for the process id.
    25.5    //
    25.6    struct dirent* dentry;
    25.7 -  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal);
    25.8    errno = 0;
    25.9 -  while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) {
   25.10 +  while ((dentry = os::readdir(tmpdirp)) != NULL) {
   25.11  
   25.12      // check if the directory entry is a hsperfdata file
   25.13      if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) {
   25.14 @@ -557,9 +556,8 @@
   25.15      }
   25.16  
   25.17      struct dirent* udentry;
   25.18 -    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
   25.19      errno = 0;
   25.20 -    while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
   25.21 +    while ((udentry = os::readdir(subdirp)) != NULL) {
   25.22  
   25.23        if (filename_to_pid(udentry->d_name) == vmid) {
   25.24          struct stat statbuf;
   25.25 @@ -603,11 +601,9 @@
   25.26        }
   25.27      }
   25.28      os::closedir(subdirp);
   25.29 -    FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
   25.30      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
   25.31    }
   25.32    os::closedir(tmpdirp);
   25.33 -  FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
   25.34  
   25.35    return(oldest_user);
   25.36  }
   25.37 @@ -686,10 +682,8 @@
   25.38    // opendir/readdir.
   25.39    //
   25.40    struct dirent* entry;
   25.41 -  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
   25.42 -
   25.43    errno = 0;
   25.44 -  while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
   25.45 +  while ((entry = os::readdir(dirp)) != NULL) {
   25.46  
   25.47      pid_t pid = filename_to_pid(entry->d_name);
   25.48  
   25.49 @@ -729,7 +723,6 @@
   25.50    // close the directory and reset the current working directory
   25.51    close_directory_secure_cwd(dirp, saved_cwd_fd);
   25.52  
   25.53 -  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
   25.54  }
   25.55  
   25.56  // make the user specific temporary directory. Returns true if
    26.1 --- a/src/os/linux/vm/os_linux.cpp	Sat Nov 09 20:15:27 2019 +0800
    26.2 +++ b/src/os/linux/vm/os_linux.cpp	Sat Nov 09 20:29:45 2019 +0800
    26.3 @@ -5522,8 +5522,7 @@
    26.4  
    26.5    /* Scan the directory */
    26.6    bool result = true;
    26.7 -  char buf[sizeof(struct dirent) + MAX_PATH];
    26.8 -  while (result && (ptr = ::readdir(dir)) != NULL) {
    26.9 +  while (result && (ptr = readdir(dir)) != NULL) {
   26.10      if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
   26.11        result = false;
   26.12      }
    27.1 --- a/src/os/linux/vm/os_linux.inline.hpp	Sat Nov 09 20:15:27 2019 +0800
    27.2 +++ b/src/os/linux/vm/os_linux.inline.hpp	Sat Nov 09 20:29:45 2019 +0800
    27.3 @@ -87,17 +87,6 @@
    27.4  
    27.5  inline const int os::default_file_open_flags() { return 0;}
    27.6  
    27.7 -inline DIR* os::opendir(const char* dirname)
    27.8 -{
    27.9 -  assert(dirname != NULL, "just checking");
   27.10 -  return ::opendir(dirname);
   27.11 -}
   27.12 -
   27.13 -inline int os::readdir_buf_size(const char *path)
   27.14 -{
   27.15 -  return NAME_MAX + sizeof(dirent) + 1;
   27.16 -}
   27.17 -
   27.18  inline jlong os::lseek(int fd, jlong offset, int whence) {
   27.19    return (jlong) ::lseek64(fd, offset, whence);
   27.20  }
   27.21 @@ -114,28 +103,6 @@
   27.22    return ::ftruncate64(fd, length);
   27.23  }
   27.24  
   27.25 -inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf)
   27.26 -{
   27.27 -  dirent* p;
   27.28 -  int status;
   27.29 -  assert(dirp != NULL, "just checking");
   27.30 -
   27.31 -  // NOTE: Linux readdir_r (on RH 6.2 and 7.2 at least) is NOT like the POSIX
   27.32 -  // version. Here is the doc for this function:
   27.33 -  // http://www.gnu.org/manual/glibc-2.2.3/html_node/libc_262.html
   27.34 -
   27.35 -  if((status = ::readdir_r(dirp, dbuf, &p)) != 0) {
   27.36 -    errno = status;
   27.37 -    return NULL;
   27.38 -  } else
   27.39 -    return p;
   27.40 -}
   27.41 -
   27.42 -inline int os::closedir(DIR *dirp) {
   27.43 -  assert(dirp != NULL, "argument is NULL");
   27.44 -  return ::closedir(dirp);
   27.45 -}
   27.46 -
   27.47  // macros for restartable system calls
   27.48  
   27.49  #define RESTARTABLE(_cmd, _result) do { \
    28.1 --- a/src/os/linux/vm/perfMemory_linux.cpp	Sat Nov 09 20:15:27 2019 +0800
    28.2 +++ b/src/os/linux/vm/perfMemory_linux.cpp	Sat Nov 09 20:29:45 2019 +0800
    28.3 @@ -533,9 +533,8 @@
    28.4    // to determine the user name for the process id.
    28.5    //
    28.6    struct dirent* dentry;
    28.7 -  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal);
    28.8    errno = 0;
    28.9 -  while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) {
   28.10 +  while ((dentry = os::readdir(tmpdirp)) != NULL) {
   28.11  
   28.12      // check if the directory entry is a hsperfdata file
   28.13      if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) {
   28.14 @@ -569,9 +568,8 @@
   28.15      }
   28.16  
   28.17      struct dirent* udentry;
   28.18 -    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
   28.19      errno = 0;
   28.20 -    while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
   28.21 +    while ((udentry = os::readdir(subdirp)) != NULL) {
   28.22  
   28.23        if (filename_to_pid(udentry->d_name) == vmid) {
   28.24          struct stat statbuf;
   28.25 @@ -615,11 +613,9 @@
   28.26        }
   28.27      }
   28.28      os::closedir(subdirp);
   28.29 -    FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
   28.30      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
   28.31    }
   28.32    os::closedir(tmpdirp);
   28.33 -  FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
   28.34  
   28.35    return(oldest_user);
   28.36  }
   28.37 @@ -698,10 +694,8 @@
   28.38    // opendir/readdir.
   28.39    //
   28.40    struct dirent* entry;
   28.41 -  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
   28.42 -
   28.43    errno = 0;
   28.44 -  while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
   28.45 +  while ((entry = os::readdir(dirp)) != NULL) {
   28.46  
   28.47      pid_t pid = filename_to_pid(entry->d_name);
   28.48  
   28.49 @@ -738,8 +732,6 @@
   28.50  
   28.51    // close the directory and reset the current working directory
   28.52    close_directory_secure_cwd(dirp, saved_cwd_fd);
   28.53 -
   28.54 -  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
   28.55  }
   28.56  
   28.57  // make the user specific temporary directory. Returns true if
    29.1 --- a/src/os/posix/vm/os_posix.cpp	Sat Nov 09 20:15:27 2019 +0800
    29.2 +++ b/src/os/posix/vm/os_posix.cpp	Sat Nov 09 20:29:45 2019 +0800
    29.3 @@ -302,6 +302,21 @@
    29.4    return ::fdopen(fd, mode);
    29.5  }
    29.6  
    29.7 +DIR* os::opendir(const char* dirname) {
    29.8 +  assert(dirname != NULL, "just checking");
    29.9 +  return ::opendir(dirname);
   29.10 +}
   29.11 +
   29.12 +struct dirent* os::readdir(DIR* dirp) {
   29.13 +  assert(dirp != NULL, "just checking");
   29.14 +  return ::readdir(dirp);
   29.15 +}
   29.16 +
   29.17 +int os::closedir(DIR *dirp) {
   29.18 +  assert(dirp != NULL, "just checking");
   29.19 +  return ::closedir(dirp);
   29.20 +}
   29.21 +
   29.22  // Builds a platform dependent Agent_OnLoad_<lib_name> function name
   29.23  // which is used to find statically linked in agents.
   29.24  // Parameters:
    30.1 --- a/src/os/solaris/vm/os_solaris.cpp	Sat Nov 09 20:15:27 2019 +0800
    30.2 +++ b/src/os/solaris/vm/os_solaris.cpp	Sat Nov 09 20:29:45 2019 +0800
    30.3 @@ -5163,9 +5163,7 @@
    30.4  
    30.5    /* Scan the directory */
    30.6    bool result = true;
    30.7 -  char buf[sizeof(struct dirent) + MAX_PATH];
    30.8 -  struct dirent *dbuf = (struct dirent *) buf;
    30.9 -  while (result && (ptr = readdir(dir, dbuf)) != NULL) {
   30.10 +  while (result && (ptr = readdir(dir)) != NULL) {
   30.11      if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
   30.12        result = false;
   30.13      }
    31.1 --- a/src/os/solaris/vm/os_solaris.inline.hpp	Sat Nov 09 20:15:27 2019 +0800
    31.2 +++ b/src/os/solaris/vm/os_solaris.inline.hpp	Sat Nov 09 20:29:45 2019 +0800
    31.3 @@ -71,37 +71,6 @@
    31.4  }
    31.5  inline void os::dll_unload(void *lib) { ::dlclose(lib); }
    31.6  
    31.7 -inline DIR* os::opendir(const char* dirname) {
    31.8 -  assert(dirname != NULL, "just checking");
    31.9 -  return ::opendir(dirname);
   31.10 -}
   31.11 -
   31.12 -inline int os::readdir_buf_size(const char *path) {
   31.13 -  int size = pathconf(path, _PC_NAME_MAX);
   31.14 -  return (size < 0 ? MAXPATHLEN : size) + sizeof(dirent) + 1;
   31.15 -}
   31.16 -
   31.17 -inline struct dirent* os::readdir(DIR* dirp, dirent* dbuf) {
   31.18 -  assert(dirp != NULL, "just checking");
   31.19 -#if defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64
   31.20 -  dirent* p;
   31.21 -  int status;
   31.22 -
   31.23 -  if((status = ::readdir_r(dirp, dbuf, &p)) != 0) {
   31.24 -    errno = status;
   31.25 -    return NULL;
   31.26 -  } else
   31.27 -    return p;
   31.28 -#else  // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64
   31.29 -  return ::readdir_r(dirp, dbuf);
   31.30 -#endif // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64
   31.31 -}
   31.32 -
   31.33 -inline int os::closedir(DIR *dirp) {
   31.34 -  assert(dirp != NULL, "argument is NULL");
   31.35 -  return ::closedir(dirp);
   31.36 -}
   31.37 -
   31.38  //////////////////////////////////////////////////////////////////////////////
   31.39  ////////////////////////////////////////////////////////////////////////////////
   31.40  
    32.1 --- a/src/os/solaris/vm/perfMemory_solaris.cpp	Sat Nov 09 20:15:27 2019 +0800
    32.2 +++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Sat Nov 09 20:29:45 2019 +0800
    32.3 @@ -524,9 +524,8 @@
    32.4    // to determine the user name for the process id.
    32.5    //
    32.6    struct dirent* dentry;
    32.7 -  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal);
    32.8    errno = 0;
    32.9 -  while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) {
   32.10 +  while ((dentry = os::readdir(tmpdirp)) != NULL) {
   32.11  
   32.12      // check if the directory entry is a hsperfdata file
   32.13      if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) {
   32.14 @@ -560,9 +559,8 @@
   32.15      }
   32.16  
   32.17      struct dirent* udentry;
   32.18 -    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
   32.19      errno = 0;
   32.20 -    while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
   32.21 +    while ((udentry = os::readdir(subdirp)) != NULL) {
   32.22  
   32.23        if (filename_to_pid(udentry->d_name) == vmid) {
   32.24          struct stat statbuf;
   32.25 @@ -606,11 +604,9 @@
   32.26        }
   32.27      }
   32.28      os::closedir(subdirp);
   32.29 -    FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
   32.30      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
   32.31    }
   32.32    os::closedir(tmpdirp);
   32.33 -  FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
   32.34  
   32.35    return(oldest_user);
   32.36  }
   32.37 @@ -737,10 +733,8 @@
   32.38    // opendir/readdir.
   32.39    //
   32.40    struct dirent* entry;
   32.41 -  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
   32.42 -
   32.43    errno = 0;
   32.44 -  while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
   32.45 +  while ((entry = os::readdir(dirp)) != NULL) {
   32.46  
   32.47      pid_t pid = filename_to_pid(entry->d_name);
   32.48  
   32.49 @@ -780,7 +774,6 @@
   32.50    // close the directory and reset the current working directory
   32.51    close_directory_secure_cwd(dirp, saved_cwd_fd);
   32.52  
   32.53 -  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
   32.54  }
   32.55  
   32.56  // make the user specific temporary directory. Returns true if
    33.1 --- a/src/os/windows/vm/os_windows.cpp	Sat Nov 09 20:15:27 2019 +0800
    33.2 +++ b/src/os/windows/vm/os_windows.cpp	Sat Nov 09 20:29:45 2019 +0800
    33.3 @@ -1172,14 +1172,12 @@
    33.4      return dirp;
    33.5  }
    33.6  
    33.7 -/* parameter dbuf unused on Windows */
    33.8 -
    33.9  struct dirent *
   33.10 -os::readdir(DIR *dirp, dirent *dbuf)
   33.11 +os::readdir(DIR *dirp)
   33.12  {
   33.13      assert(dirp != NULL, "just checking");      // hotspot change
   33.14      if (dirp->handle == INVALID_HANDLE_VALUE) {
   33.15 -        return 0;
   33.16 +        return NULL;
   33.17      }
   33.18  
   33.19      strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
   33.20 @@ -1187,7 +1185,7 @@
   33.21      if (!FindNextFile(dirp->handle, &dirp->find_data)) {
   33.22          if (GetLastError() == ERROR_INVALID_HANDLE) {
   33.23              errno = EBADF;
   33.24 -            return 0;
   33.25 +            return NULL;
   33.26          }
   33.27          FindClose(dirp->handle);
   33.28          dirp->handle = INVALID_HANDLE_VALUE;
    34.1 --- a/src/os/windows/vm/os_windows.inline.hpp	Sat Nov 09 20:15:27 2019 +0800
    34.2 +++ b/src/os/windows/vm/os_windows.inline.hpp	Sat Nov 09 20:29:45 2019 +0800
    34.3 @@ -65,14 +65,6 @@
    34.4    return true;
    34.5  }
    34.6  
    34.7 -inline int os::readdir_buf_size(const char *path)
    34.8 -{
    34.9 -  /* As Windows doesn't use the directory entry buffer passed to
   34.10 -     os::readdir() this can be as short as possible */
   34.11 -
   34.12 -  return 1;
   34.13 -}
   34.14 -
   34.15  // Bang the shadow pages if they need to be touched to be mapped.
   34.16  inline void os::bang_stack_shadow_pages() {
   34.17    // Write to each page of our new frame to force OS mapping.
    35.1 --- a/src/os/windows/vm/perfMemory_windows.cpp	Sat Nov 09 20:15:27 2019 +0800
    35.2 +++ b/src/os/windows/vm/perfMemory_windows.cpp	Sat Nov 09 20:29:45 2019 +0800
    35.3 @@ -316,9 +316,8 @@
    35.4    // to determine the user name for the process id.
    35.5    //
    35.6    struct dirent* dentry;
    35.7 -  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal);
    35.8    errno = 0;
    35.9 -  while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) {
   35.10 +  while ((dentry = os::readdir(tmpdirp)) != NULL) {
   35.11  
   35.12      // check if the directory entry is a hsperfdata file
   35.13      if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) {
   35.14 @@ -351,9 +350,8 @@
   35.15      }
   35.16  
   35.17      struct dirent* udentry;
   35.18 -    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
   35.19      errno = 0;
   35.20 -    while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
   35.21 +    while ((udentry = os::readdir(subdirp)) != NULL) {
   35.22  
   35.23        if (filename_to_pid(udentry->d_name) == vmid) {
   35.24          struct stat statbuf;
   35.25 @@ -405,11 +403,9 @@
   35.26        }
   35.27      }
   35.28      os::closedir(subdirp);
   35.29 -    FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
   35.30      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
   35.31    }
   35.32    os::closedir(tmpdirp);
   35.33 -  FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
   35.34  
   35.35    return(latest_user);
   35.36  }
   35.37 @@ -639,9 +635,8 @@
   35.38    // opendir/readdir.
   35.39    //
   35.40    struct dirent* entry;
   35.41 -  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
   35.42    errno = 0;
   35.43 -  while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
   35.44 +  while ((entry = os::readdir(dirp)) != NULL) {
   35.45  
   35.46      int pid = filename_to_pid(entry->d_name);
   35.47  
   35.48 @@ -682,7 +677,6 @@
   35.49      errno = 0;
   35.50    }
   35.51    os::closedir(dirp);
   35.52 -  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
   35.53  }
   35.54  
   35.55  // create a file mapping object with the requested name, and size
    36.1 --- a/src/share/vm/c1/c1_Optimizer.cpp	Sat Nov 09 20:15:27 2019 +0800
    36.2 +++ b/src/share/vm/c1/c1_Optimizer.cpp	Sat Nov 09 20:29:45 2019 +0800
    36.3 @@ -175,6 +175,12 @@
    36.4    for_each_phi_fun(t_block, phi, return; );
    36.5    for_each_phi_fun(f_block, phi, return; );
    36.6  
    36.7 +  // Only replace safepoint gotos if state_before information is available (if is a safepoint)
    36.8 +  bool is_safepoint = if_->is_safepoint();
    36.9 +  if (!is_safepoint && (t_goto->is_safepoint() || f_goto->is_safepoint())) {
   36.10 +    return;
   36.11 +  }
   36.12 +
   36.13    // 2) substitute conditional expression
   36.14    //    with an IfOp followed by a Goto
   36.15    // cut if_ away and get node before
   36.16 @@ -203,7 +209,7 @@
   36.17  
   36.18    // append Goto to successor
   36.19    ValueStack* state_before = if_->state_before();
   36.20 -  Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint());
   36.21 +  Goto* goto_ = new Goto(sux, state_before, is_safepoint);
   36.22  
   36.23    // prepare state for Goto
   36.24    ValueStack* goto_state = if_state;
    37.1 --- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Sat Nov 09 20:15:27 2019 +0800
    37.2 +++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Sat Nov 09 20:29:45 2019 +0800
    37.3 @@ -1170,45 +1170,43 @@
    37.4    }
    37.5  }
    37.6  
    37.7 -bool BCEscapeAnalyzer::do_analysis() {
    37.8 +void BCEscapeAnalyzer::do_analysis() {
    37.9    Arena* arena = CURRENT_ENV->arena();
   37.10    // identify basic blocks
   37.11    _methodBlocks = _method->get_method_blocks();
   37.12  
   37.13    iterate_blocks(arena);
   37.14 -  // TEMPORARY
   37.15 -  return true;
   37.16  }
   37.17  
   37.18  vmIntrinsics::ID BCEscapeAnalyzer::known_intrinsic() {
   37.19    vmIntrinsics::ID iid = method()->intrinsic_id();
   37.20 -
   37.21    if (iid == vmIntrinsics::_getClass ||
   37.22        iid ==  vmIntrinsics::_fillInStackTrace ||
   37.23 -      iid == vmIntrinsics::_hashCode)
   37.24 +      iid == vmIntrinsics::_hashCode) {
   37.25      return iid;
   37.26 -  else
   37.27 +  } else {
   37.28      return vmIntrinsics::_none;
   37.29 +  }
   37.30  }
   37.31  
   37.32 -bool BCEscapeAnalyzer::compute_escape_for_intrinsic(vmIntrinsics::ID iid) {
   37.33 +void BCEscapeAnalyzer::compute_escape_for_intrinsic(vmIntrinsics::ID iid) {
   37.34    ArgumentMap arg;
   37.35    arg.clear();
   37.36    switch (iid) {
   37.37 -  case vmIntrinsics::_getClass:
   37.38 -    _return_local = false;
   37.39 -    break;
   37.40 -  case vmIntrinsics::_fillInStackTrace:
   37.41 -    arg.set(0); // 'this'
   37.42 -    set_returned(arg);
   37.43 -    break;
   37.44 -  case vmIntrinsics::_hashCode:
   37.45 -    // initialized state is correct
   37.46 -    break;
   37.47 +    case vmIntrinsics::_getClass:
   37.48 +      _return_local = false;
   37.49 +      _return_allocated = false;
   37.50 +      break;
   37.51 +    case vmIntrinsics::_fillInStackTrace:
   37.52 +      arg.set(0); // 'this'
   37.53 +      set_returned(arg);
   37.54 +      break;
   37.55 +    case vmIntrinsics::_hashCode:
   37.56 +      // initialized state is correct
   37.57 +      break;
   37.58    default:
   37.59      assert(false, "unexpected intrinsic");
   37.60    }
   37.61 -  return true;
   37.62  }
   37.63  
   37.64  void BCEscapeAnalyzer::initialize() {
   37.65 @@ -1279,7 +1277,7 @@
   37.66    vmIntrinsics::ID iid = known_intrinsic();
   37.67  
   37.68    // check if method can be analyzed
   37.69 -  if (iid ==  vmIntrinsics::_none && (method()->is_abstract() || method()->is_native() || !method()->holder()->is_initialized()
   37.70 +  if (iid == vmIntrinsics::_none && (method()->is_abstract() || method()->is_native() || !method()->holder()->is_initialized()
   37.71        || _level > MaxBCEAEstimateLevel
   37.72        || method()->code_size() > MaxBCEAEstimateSize)) {
   37.73      if (BCEATraceLevel >= 1) {
   37.74 @@ -1312,8 +1310,6 @@
   37.75      tty->print_cr(" (%d bytes)", method()->code_size());
   37.76    }
   37.77  
   37.78 -  bool success;
   37.79 -
   37.80    initialize();
   37.81  
   37.82    // Do not scan method if it has no object parameters and
   37.83 @@ -1329,9 +1325,9 @@
   37.84    }
   37.85  
   37.86    if (iid != vmIntrinsics::_none)
   37.87 -    success = compute_escape_for_intrinsic(iid);
   37.88 +    compute_escape_for_intrinsic(iid);
   37.89    else {
   37.90 -    success = do_analysis();
   37.91 +    do_analysis();
   37.92    }
   37.93  
   37.94    // don't store interprocedural escape information if it introduces
    38.1 --- a/src/share/vm/ci/bcEscapeAnalyzer.hpp	Sat Nov 09 20:15:27 2019 +0800
    38.2 +++ b/src/share/vm/ci/bcEscapeAnalyzer.hpp	Sat Nov 09 20:29:45 2019 +0800
    38.3 @@ -101,8 +101,8 @@
    38.4    void clear_escape_info();
    38.5    void compute_escape_info();
    38.6    vmIntrinsics::ID known_intrinsic();
    38.7 -  bool compute_escape_for_intrinsic(vmIntrinsics::ID iid);
    38.8 -  bool do_analysis();
    38.9 +  void compute_escape_for_intrinsic(vmIntrinsics::ID iid);
   38.10 +  void do_analysis();
   38.11  
   38.12    void read_escape_info();
   38.13  
    39.1 --- a/src/share/vm/ci/ciStreams.cpp	Sat Nov 09 20:15:27 2019 +0800
    39.2 +++ b/src/share/vm/ci/ciStreams.cpp	Sat Nov 09 20:29:45 2019 +0800
    39.3 @@ -1,5 +1,5 @@
    39.4  /*
    39.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    39.6 + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
    39.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    39.8   *
    39.9   * This code is free software; you can redistribute it and/or modify it
   39.10 @@ -361,14 +361,14 @@
   39.11  ciMethod* ciBytecodeStream::get_method(bool& will_link, ciSignature* *declared_signature_result) {
   39.12    VM_ENTRY_MARK;
   39.13    ciEnv* env = CURRENT_ENV;
   39.14 -  constantPoolHandle cpool(_method->get_Method()->constants());
   39.15 +  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   39.16    ciMethod* m = env->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
   39.17    will_link = m->is_loaded();
   39.18  
   39.19    // Use the MethodType stored in the CP cache to create a signature
   39.20    // with correct types (in respect to class loaders).
   39.21    if (has_method_type()) {
   39.22 -    ciSymbol*     sig_sym     = env->get_symbol(cpool->symbol_at(get_method_signature_index()));
   39.23 +    ciSymbol*     sig_sym     = env->get_symbol(cpool->symbol_at(get_method_signature_index(cpool)));
   39.24      ciKlass*      pool_holder = env->get_klass(cpool->pool_holder());
   39.25      ciMethodType* method_type = get_method_type();
   39.26      ciSignature* declared_signature = new (env->arena()) ciSignature(pool_holder, sig_sym, method_type);
   39.27 @@ -465,9 +465,8 @@
   39.28  // Get the constant pool index of the signature of the method
   39.29  // referenced by the current bytecode.  Used for generating
   39.30  // deoptimization information.
   39.31 -int ciBytecodeStream::get_method_signature_index() {
   39.32 +int ciBytecodeStream::get_method_signature_index(const constantPoolHandle& cpool) {
   39.33    GUARDED_VM_ENTRY(
   39.34 -    ConstantPool* cpool = _holder->get_instanceKlass()->constants();
   39.35      const int method_index = get_method_index();
   39.36      const int name_and_type_index = cpool->name_and_type_ref_index_at(method_index);
   39.37      return cpool->signature_ref_index_at(name_and_type_index);
    40.1 --- a/src/share/vm/ci/ciStreams.hpp	Sat Nov 09 20:15:27 2019 +0800
    40.2 +++ b/src/share/vm/ci/ciStreams.hpp	Sat Nov 09 20:29:45 2019 +0800
    40.3 @@ -1,5 +1,5 @@
    40.4  /*
    40.5 - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    40.6 + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
    40.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    40.8   *
    40.9   * This code is free software; you can redistribute it and/or modify it
   40.10 @@ -264,7 +264,7 @@
   40.11    ciMethodType* get_method_type();
   40.12    ciKlass*      get_declared_method_holder();
   40.13    int           get_method_holder_index();
   40.14 -  int           get_method_signature_index();
   40.15 +  int           get_method_signature_index(const constantPoolHandle& cpool);
   40.16  
   40.17    // Get the resolved references arrays from the constant pool
   40.18    ciObjArray* get_resolved_references();
    41.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Nov 09 20:15:27 2019 +0800
    41.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Nov 09 20:29:45 2019 +0800
    41.3 @@ -2790,6 +2790,7 @@
    41.4        // Previous workers starting region is valid
    41.5        // so let's iterate from there
    41.6        start_ind = (cs_size * (worker_i - 1)) / active_workers;
    41.7 +      OrderAccess::loadload();
    41.8        result = _worker_cset_start_region[worker_i - 1];
    41.9      }
   41.10  
    42.1 --- a/src/share/vm/oops/instanceKlass.cpp	Sat Nov 09 20:15:27 2019 +0800
    42.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Sat Nov 09 20:29:45 2019 +0800
    42.3 @@ -147,7 +147,7 @@
    42.4        len = name->utf8_length();                                 \
    42.5      }                                                            \
    42.6      HOTSPOT_CLASS_INITIALIZATION_##type(                         \
    42.7 -      data, len, (clss)->class_loader(), thread_type);           \
    42.8 +      data, len, (void *)(clss)->class_loader(), thread_type); \
    42.9    }
   42.10  
   42.11  #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
   42.12 @@ -160,7 +160,7 @@
   42.13        len = name->utf8_length();                                 \
   42.14      }                                                            \
   42.15      HOTSPOT_CLASS_INITIALIZATION_##type(                         \
   42.16 -      data, len, (clss)->class_loader(), thread_type, wait);     \
   42.17 +      data, len, (void *)(clss)->class_loader(), thread_type, wait); \
   42.18    }
   42.19  #endif /* USDT2 */
   42.20  
    43.1 --- a/src/share/vm/oops/instanceKlass.hpp	Sat Nov 09 20:15:27 2019 +0800
    43.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Sat Nov 09 20:29:45 2019 +0800
    43.3 @@ -225,6 +225,7 @@
    43.4    // _is_marked_dependent can be set concurrently, thus cannot be part of the
    43.5    // _misc_flags.
    43.6    bool            _is_marked_dependent;  // used for marking during flushing and deoptimization
    43.7 +  bool            _is_being_redefined;   // used for locking redefinition
    43.8    bool            _has_unloaded_dependent;
    43.9  
   43.10    enum {
   43.11 @@ -667,6 +668,10 @@
   43.12      _nonstatic_oop_map_size = words;
   43.13    }
   43.14  
   43.15 +  // Redefinition locking.  Class can only be redefined by one thread at a time.
   43.16 +  bool is_being_redefined() const          { return _is_being_redefined; }
   43.17 +  void set_is_being_redefined(bool value)  { _is_being_redefined = value; }
   43.18 +
   43.19    // RedefineClasses() support for previous versions:
   43.20    void add_previous_version(instanceKlassHandle ikh, int emcp_method_count);
   43.21  
    44.1 --- a/src/share/vm/oops/klass.hpp	Sat Nov 09 20:15:27 2019 +0800
    44.2 +++ b/src/share/vm/oops/klass.hpp	Sat Nov 09 20:29:45 2019 +0800
    44.3 @@ -348,10 +348,11 @@
    44.4      _lh_header_size_mask        = right_n_bits(BitsPerByte),  // shifted mask
    44.5      _lh_array_tag_bits          = 2,
    44.6      _lh_array_tag_shift         = BitsPerInt - _lh_array_tag_bits,
    44.7 -    _lh_array_tag_type_value    = ~0x00,  // 0xC0000000 >> 30
    44.8      _lh_array_tag_obj_value     = ~0x01   // 0x80000000 >> 30
    44.9    };
   44.10  
   44.11 +  static const unsigned int _lh_array_tag_type_value = 0Xffffffff; // ~0x00,  // 0xC0000000 >> 30
   44.12 +
   44.13    static int layout_helper_size_in_bytes(jint lh) {
   44.14      assert(lh > (jint)_lh_neutral_value, "must be instance");
   44.15      return (int) lh & ~_lh_instance_slow_path_bit;
    45.1 --- a/src/share/vm/oops/klassVtable.cpp	Sat Nov 09 20:15:27 2019 +0800
    45.2 +++ b/src/share/vm/oops/klassVtable.cpp	Sat Nov 09 20:29:45 2019 +0800
    45.3 @@ -663,6 +663,7 @@
    45.4    Method* super_method = NULL;
    45.5    InstanceKlass *holder = NULL;
    45.6    Method* recheck_method =  NULL;
    45.7 +  bool found_pkg_prvt_method = false;
    45.8    while (k != NULL) {
    45.9      // lookup through the hierarchy for a method with matching name and sign.
   45.10      super_method = InstanceKlass::cast(k)->lookup_method(name, signature);
   45.11 @@ -684,12 +685,31 @@
   45.12          return false;
   45.13        // else keep looking for transitive overrides
   45.14        }
   45.15 +      // If we get here then one of the super classes has a package private method
   45.16 +      // that will not get overridden because it is in a different package.  But,
   45.17 +      // that package private method does "override" any matching methods in super
   45.18 +      // interfaces, so there will be no miranda vtable entry created.  So, set flag
   45.19 +      // to TRUE for use below, in case there are no methods in super classes that
   45.20 +      // this target method overrides.
   45.21 +      assert(super_method->is_package_private(), "super_method must be package private");
   45.22 +      assert(!superk->is_same_class_package(classloader(), classname),
   45.23 +             "Must be different packages");
   45.24 +      found_pkg_prvt_method = true;
   45.25      }
   45.26  
   45.27      // Start with lookup result and continue to search up
   45.28      k = superk->super(); // haven't found an override match yet; continue to look
   45.29    }
   45.30  
   45.31 +  // If found_pkg_prvt_method is set, then the ONLY matching method in the
   45.32 +  // superclasses is package private in another package. That matching method will
   45.33 +  // prevent a miranda vtable entry from being created. Because the target method can not
   45.34 +  // override the package private method in another package, then it needs to be the root
   45.35 +  // for its own vtable entry.
   45.36 +  if (found_pkg_prvt_method) {
   45.37 +     return true;
   45.38 +  }
   45.39 +
   45.40    // if the target method is public or protected it may have a matching
   45.41    // miranda method in the super, whose entry it should re-use.
   45.42    // Actually, to handle cases that javac would not generate, we need
   45.43 @@ -697,7 +717,7 @@
   45.44    InstanceKlass *sk = InstanceKlass::cast(super);
   45.45    if (sk->has_miranda_methods()) {
   45.46      if (sk->lookup_method_in_all_interfaces(name, signature, Klass::find_defaults) != NULL) {
   45.47 -      return false;  // found a matching miranda; we do not need a new entry
   45.48 +      return false; // found a matching miranda; we do not need a new entry
   45.49      }
   45.50    }
   45.51    return true; // found no match; we need a new entry
    46.1 --- a/src/share/vm/opto/connode.cpp	Sat Nov 09 20:15:27 2019 +0800
    46.2 +++ b/src/share/vm/opto/connode.cpp	Sat Nov 09 20:29:45 2019 +0800
    46.3 @@ -1,5 +1,5 @@
    46.4  /*
    46.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    46.6 + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
    46.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    46.8   *
    46.9   * This code is free software; you can redistribute it and/or modify it
   46.10 @@ -1083,7 +1083,11 @@
   46.11      assert(rxlo == (int)rxlo && rxhi == (int)rxhi, "x should not overflow");
   46.12      assert(rylo == (int)rylo && ryhi == (int)ryhi, "y should not overflow");
   46.13      Node* cx = phase->C->constrained_convI2L(phase, x, TypeInt::make(rxlo, rxhi, widen), NULL);
   46.14 +    Node *hook = new (phase->C) Node(1);
   46.15 +    hook->init_req(0, cx);  // Add a use to cx to prevent him from dying
   46.16      Node* cy = phase->C->constrained_convI2L(phase, y, TypeInt::make(rylo, ryhi, widen), NULL);
   46.17 +    hook->del_req(0);  // Just yank bogus edge
   46.18 +    hook->destruct();
   46.19      switch (op) {
   46.20      case Op_AddI:  return new (phase->C) AddLNode(cx, cy);
   46.21      case Op_SubI:  return new (phase->C) SubLNode(cx, cy);
    47.1 --- a/src/share/vm/opto/graphKit.cpp	Sat Nov 09 20:15:27 2019 +0800
    47.2 +++ b/src/share/vm/opto/graphKit.cpp	Sat Nov 09 20:29:45 2019 +0800
    47.3 @@ -1787,12 +1787,13 @@
    47.4  // A better answer would be to separate out card marks from other memory.
    47.5  // For now, return the input memory state, so that it can be reused
    47.6  // after the call, if this call has restricted memory effects.
    47.7 -Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
    47.8 +Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
    47.9    // Set fixed predefined input arguments
   47.10    Node* memory = reset_memory();
   47.11 +  Node* m = narrow_mem == NULL ? memory : narrow_mem;
   47.12    call->init_req( TypeFunc::Control,   control()  );
   47.13    call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
   47.14 -  call->init_req( TypeFunc::Memory,    memory     ); // may gc ptrs
   47.15 +  call->init_req( TypeFunc::Memory,    m          ); // may gc ptrs
   47.16    call->init_req( TypeFunc::FramePtr,  frameptr() );
   47.17    call->init_req( TypeFunc::ReturnAdr, top()      );
   47.18    return memory;
   47.19 @@ -2382,9 +2383,7 @@
   47.20    } else {
   47.21      assert(!wide_out, "narrow in => narrow out");
   47.22      Node* narrow_mem = memory(adr_type);
   47.23 -    prev_mem = reset_memory();
   47.24 -    map()->set_memory(narrow_mem);
   47.25 -    set_predefined_input_for_runtime_call(call);
   47.26 +    prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
   47.27    }
   47.28  
   47.29    // Hook each parm in order.  Stop looking at the first NULL.
    48.1 --- a/src/share/vm/opto/graphKit.hpp	Sat Nov 09 20:15:27 2019 +0800
    48.2 +++ b/src/share/vm/opto/graphKit.hpp	Sat Nov 09 20:29:45 2019 +0800
    48.3 @@ -700,7 +700,7 @@
    48.4    void  set_predefined_output_for_runtime_call(Node* call,
    48.5                                                 Node* keep_mem,
    48.6                                                 const TypePtr* hook_mem);
    48.7 -  Node* set_predefined_input_for_runtime_call(SafePointNode* call);
    48.8 +  Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL);
    48.9  
   48.10    // Replace the call with the current state of the kit.  Requires
   48.11    // that the call was generated with separate io_projs so that
    49.1 --- a/src/share/vm/opto/ifnode.cpp	Sat Nov 09 20:15:27 2019 +0800
    49.2 +++ b/src/share/vm/opto/ifnode.cpp	Sat Nov 09 20:29:45 2019 +0800
    49.3 @@ -601,7 +601,7 @@
    49.4      if( din4->is_Call() &&      // Handle a slow-path call on either arm
    49.5          (din4 = din4->in(0)) )
    49.6        din4 = din4->in(0);
    49.7 -    if( din3 == din4 && din3->is_If() )
    49.8 +    if (din3 != NULL && din3 == din4 && din3->is_If()) // Regions not degraded to a copy
    49.9        return din3;              // Skip around diamonds
   49.10    }
   49.11  
    50.1 --- a/src/share/vm/opto/library_call.cpp	Sat Nov 09 20:15:27 2019 +0800
    50.2 +++ b/src/share/vm/opto/library_call.cpp	Sat Nov 09 20:29:45 2019 +0800
    50.3 @@ -1,5 +1,5 @@
    50.4  /*
    50.5 - * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
    50.6 + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
    50.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    50.8   *
    50.9   * This code is free software; you can redistribute it and/or modify it
   50.10 @@ -3803,7 +3803,7 @@
   50.11    }
   50.12    // Now test the correct condition.
   50.13    jint  nval = (obj_array
   50.14 -                ? ((jint)Klass::_lh_array_tag_type_value
   50.15 +                ? (jint)(Klass::_lh_array_tag_type_value
   50.16                     <<    Klass::_lh_array_tag_shift)
   50.17                  : Klass::_lh_neutral_value);
   50.18    Node* cmp = _gvn.transform(new(C) CmpINode(layout_val, intcon(nval)));
   50.19 @@ -6759,10 +6759,18 @@
   50.20    if (state == NULL) return false;
   50.21  
   50.22    // Call the stub.
   50.23 -  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
   50.24 -                                 OptoRuntime::digestBase_implCompressMB_Type(),
   50.25 -                                 stubAddr, stubName, TypePtr::BOTTOM,
   50.26 -                                 src_start, state, ofs, limit);
   50.27 +  Node *call;
   50.28 +  if (CCallingConventionRequiresIntsAsLongs) {
   50.29 +    call = make_runtime_call(RC_LEAF|RC_NO_FP,
   50.30 +                             OptoRuntime::digestBase_implCompressMB_Type(),
   50.31 +                             stubAddr, stubName, TypePtr::BOTTOM,
   50.32 +                             src_start, state, ofs XTOP, limit XTOP);
   50.33 +  } else {
   50.34 +    call = make_runtime_call(RC_LEAF|RC_NO_FP,
   50.35 +                             OptoRuntime::digestBase_implCompressMB_Type(),
   50.36 +                             stubAddr, stubName, TypePtr::BOTTOM,
   50.37 +                             src_start, state, ofs, limit);
   50.38 +  }
   50.39    // return ofs (int)
   50.40    Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
   50.41    set_result(result);
    51.1 --- a/src/share/vm/opto/loopPredicate.cpp	Sat Nov 09 20:15:27 2019 +0800
    51.2 +++ b/src/share/vm/opto/loopPredicate.cpp	Sat Nov 09 20:29:45 2019 +0800
    51.3 @@ -611,7 +611,11 @@
    51.4    const TypeInt* idx_type = TypeInt::INT;
    51.5    if ((stride > 0) == (scale > 0) == upper) {
    51.6      if (TraceLoopPredicate) {
    51.7 -      predString->print(limit->is_Con() ? "(%d " : "(limit ", con_limit);
    51.8 +      if (limit->is_Con()) {
    51.9 +        predString->print("(%d ", con_limit);
   51.10 +      } else {
   51.11 +        predString->print("(limit ");
   51.12 +      }
   51.13        predString->print("- %d) ", stride);
   51.14      }
   51.15      // Check if (limit - stride) may overflow
   51.16 @@ -639,7 +643,11 @@
   51.17      register_new_node(max_idx_expr, ctrl);
   51.18    } else {
   51.19      if (TraceLoopPredicate) {
   51.20 -      predString->print(init->is_Con() ? "%d " : "init ", con_init);
   51.21 +      if (init->is_Con()) {
   51.22 +        predString->print("%d ", con_init);
   51.23 +      } else {
   51.24 +        predString->print("init ");
   51.25 +      }
   51.26      }
   51.27      idx_type = _igvn.type(init)->isa_int();
   51.28      max_idx_expr = init;
   51.29 @@ -675,7 +683,11 @@
   51.30  
   51.31    if (offset && (!offset->is_Con() || con_offset != 0)){
   51.32      if (TraceLoopPredicate) {
   51.33 -      predString->print(offset->is_Con() ? "+ %d " : "+ offset", con_offset);
   51.34 +      if (offset->is_Con()) {
   51.35 +        predString->print("+ %d ", con_offset);
   51.36 +      } else {
   51.37 +        predString->print("+ offset");
   51.38 +      }
   51.39      }
   51.40      // Check if (max_idx_expr + offset) may overflow
   51.41      const TypeInt* offset_type = _igvn.type(offset)->isa_int();
    52.1 --- a/src/share/vm/opto/loopTransform.cpp	Sat Nov 09 20:15:27 2019 +0800
    52.2 +++ b/src/share/vm/opto/loopTransform.cpp	Sat Nov 09 20:29:45 2019 +0800
    52.3 @@ -1537,13 +1537,20 @@
    52.4  
    52.5  //------------------------------adjust_limit-----------------------------------
    52.6  // Helper function for add_constraint().
    52.7 -Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) {
    52.8 +Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl, bool round_up) {
    52.9    // Compute "I :: (limit-offset)/scale"
   52.10    Node *con = new (C) SubINode(rc_limit, offset);
   52.11    register_new_node(con, pre_ctrl);
   52.12    Node *X = new (C) DivINode(0, con, scale);
   52.13    register_new_node(X, pre_ctrl);
   52.14  
   52.15 +  // When the absolute value of scale is greater than one, the integer
   52.16 +  // division may round limit down so add one to the limit.
   52.17 +  if (round_up) {
   52.18 +    X = new (C) AddINode(X, _igvn.intcon(1));
   52.19 +    register_new_node(X, pre_ctrl);
   52.20 +  }
   52.21 +
   52.22    // Adjust loop limit
   52.23    loop_limit = (stride_con > 0)
   52.24                 ? (Node*)(new (C) MinINode(loop_limit, X))
   52.25 @@ -1584,7 +1591,7 @@
   52.26      // (upper_limit-offset) may overflow or underflow.
   52.27      // But it is fine since main loop will either have
   52.28      // less iterations or will be skipped in such case.
   52.29 -    *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl);
   52.30 +    *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl, false);
   52.31  
   52.32      // The underflow limit: low_limit <= scale*I+offset.
   52.33      // For pre-loop compute
   52.34 @@ -1620,7 +1627,8 @@
   52.35        // max(pre_limit, original_limit) is used in do_range_check().
   52.36      }
   52.37      // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
   52.38 -    *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl);
   52.39 +    *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl,
   52.40 +                              scale_con > 1 && stride_con > 0);
   52.41  
   52.42    } else { // stride_con*scale_con < 0
   52.43      // For negative stride*scale pre-loop checks for overflow and
   52.44 @@ -1646,7 +1654,8 @@
   52.45      Node *plus_one = new (C) AddINode(offset, one);
   52.46      register_new_node( plus_one, pre_ctrl );
   52.47      // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
   52.48 -    *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl);
   52.49 +    *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl,
   52.50 +                              scale_con < -1 && stride_con > 0);
   52.51  
   52.52      if (low_limit->get_int() == -max_jint) {
   52.53        if (!RangeLimitCheck) return;
   52.54 @@ -1681,7 +1690,8 @@
   52.55      //       I > (low_limit-(offset+1))/scale
   52.56      //   )
   52.57  
   52.58 -    *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl);
   52.59 +    *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl,
   52.60 +                               false);
   52.61    }
   52.62  }
   52.63  
    53.1 --- a/src/share/vm/opto/loopnode.hpp	Sat Nov 09 20:15:27 2019 +0800
    53.2 +++ b/src/share/vm/opto/loopnode.hpp	Sat Nov 09 20:29:45 2019 +0800
    53.3 @@ -959,7 +959,7 @@
    53.4    // loop.  Scale_con, offset and limit are all loop invariant.
    53.5    void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
    53.6    // Helper function for add_constraint().
    53.7 -  Node* adjust_limit( int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl );
    53.8 +  Node* adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl, bool round_up);
    53.9  
   53.10    // Partially peel loop up through last_peel node.
   53.11    bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
    54.1 --- a/src/share/vm/opto/matcher.cpp	Sat Nov 09 20:15:27 2019 +0800
    54.2 +++ b/src/share/vm/opto/matcher.cpp	Sat Nov 09 20:29:45 2019 +0800
    54.3 @@ -2051,6 +2051,12 @@
    54.4          // Node is shared and has no reason to clone.  Flag it as shared.
    54.5          // This causes it to match into a register for the sharing.
    54.6          set_shared(n);       // Flag as shared and
    54.7 +        if (n->is_DecodeNarrowPtr()) {
    54.8 +          // Oop field/array element loads must be shared but since
    54.9 +          // they are shared through a DecodeN they may appear to have
   54.10 +          // a single use so force sharing here.
   54.11 +          set_shared(n->in(1));
   54.12 +        }
   54.13          mstack.pop();        // remove node from stack
   54.14          continue;
   54.15        }
   54.16 @@ -2173,13 +2179,6 @@
   54.17            continue; // for(int i = ...)
   54.18          }
   54.19  
   54.20 -        if( mop == Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) {
   54.21 -          // Bases used in addresses must be shared but since
   54.22 -          // they are shared through a DecodeN they may appear
   54.23 -          // to have a single use so force sharing here.
   54.24 -          set_shared(m->in(AddPNode::Base)->in(1));
   54.25 -        }
   54.26 -
   54.27          // if 'n' and 'm' are part of a graph for BMI instruction, clone this node.
   54.28  #ifdef X86
   54.29          if (UseBMI1Instructions && is_bmi_pattern(n, m)) {
    55.1 --- a/src/share/vm/opto/memnode.cpp	Sat Nov 09 20:15:27 2019 +0800
    55.2 +++ b/src/share/vm/opto/memnode.cpp	Sat Nov 09 20:29:45 2019 +0800
    55.3 @@ -1359,6 +1359,14 @@
    55.4          Node* in = mem->in(i);
    55.5          Node*  m = optimize_memory_chain(in, t_oop, this, phase);
    55.6          if (m == mem) {
    55.7 +          if (i == 1) {
    55.8 +            // if the first edge was a loop, check second edge too.
    55.9 +            // If both are replaceable - we are in an infinite loop
   55.10 +            Node *n = optimize_memory_chain(mem->in(2), t_oop, this, phase);
   55.11 +            if (n == mem) {
   55.12 +              break;
   55.13 +            }
   55.14 +          }
   55.15            set_req(Memory, mem->in(cnt - i));
   55.16            return this; // made change
   55.17          }
    56.1 --- a/src/share/vm/opto/reg_split.cpp	Sat Nov 09 20:15:27 2019 +0800
    56.2 +++ b/src/share/vm/opto/reg_split.cpp	Sat Nov 09 20:29:45 2019 +0800
    56.3 @@ -1171,9 +1171,8 @@
    56.4                (deflrg._direct_conflict || deflrg._must_spill)) ||
    56.5               // Check for LRG being up in a register and we are inside a high
    56.6               // pressure area.  Spill it down immediately.
    56.7 -             (defup && is_high_pressure(b,&deflrg,insidx))) ) {
    56.8 +             (defup && is_high_pressure(b,&deflrg,insidx) && !n->is_SpillCopy())) ) {
    56.9            assert( !n->rematerialize(), "" );
   56.10 -          assert( !n->is_SpillCopy(), "" );
   56.11            // Do a split at the def site.
   56.12            maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx );
   56.13            // If it wasn't split bail
    57.1 --- a/src/share/vm/opto/runtime.cpp	Sat Nov 09 20:15:27 2019 +0800
    57.2 +++ b/src/share/vm/opto/runtime.cpp	Sat Nov 09 20:29:45 2019 +0800
    57.3 @@ -1,5 +1,5 @@
    57.4  /*
    57.5 - * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
    57.6 + * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
    57.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    57.8   *
    57.9   * This code is free software; you can redistribute it and/or modify it
   57.10 @@ -938,12 +938,24 @@
   57.11    // create input type (domain)
   57.12    int num_args = 4;
   57.13    int argcnt = num_args;
   57.14 +  if(CCallingConventionRequiresIntsAsLongs) {
   57.15 +    argcnt += 2;
   57.16 +  }
   57.17    const Type** fields = TypeTuple::fields(argcnt);
   57.18    int argp = TypeFunc::Parms;
   57.19 -  fields[argp++] = TypePtr::NOTNULL; // buf
   57.20 -  fields[argp++] = TypePtr::NOTNULL; // state
   57.21 -  fields[argp++] = TypeInt::INT;     // ofs
   57.22 -  fields[argp++] = TypeInt::INT;     // limit
   57.23 +  if(CCallingConventionRequiresIntsAsLongs) {
   57.24 +    fields[argp++] = TypePtr::NOTNULL; // buf
   57.25 +    fields[argp++] = TypePtr::NOTNULL; // state
   57.26 +    fields[argp++] = TypeLong::LONG;   // ofs
   57.27 +    fields[argp++] = Type::HALF;
   57.28 +    fields[argp++] = TypeLong::LONG;   // limit
   57.29 +    fields[argp++] = Type::HALF;
   57.30 +  } else {
   57.31 +    fields[argp++] = TypePtr::NOTNULL; // buf
   57.32 +    fields[argp++] = TypePtr::NOTNULL; // state
   57.33 +    fields[argp++] = TypeInt::INT;     // ofs
   57.34 +    fields[argp++] = TypeInt::INT;     // limit
   57.35 +  }
   57.36    assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
   57.37    const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
   57.38  
    58.1 --- a/src/share/vm/opto/superword.cpp	Sat Nov 09 20:15:27 2019 +0800
    58.2 +++ b/src/share/vm/opto/superword.cpp	Sat Nov 09 20:29:45 2019 +0800
    58.3 @@ -482,7 +482,9 @@
    58.4    if (init_nd->is_Con() && p.invar() == NULL) {
    58.5      int init = init_nd->bottom_type()->is_int()->get_con();
    58.6      int init_offset = init * p.scale_in_bytes() + offset;
    58.7 -    assert(init_offset >= 0, "positive offset from object start");
    58.8 +    if (init_offset < 0) { // negative offset from object start?
    58.9 +      return false;        // may happen in dead loop
   58.10 +    }
   58.11      if (vw % span == 0) {
   58.12        // If vm is a multiple of span, we use formula (1).
   58.13        if (span > 0) {
    59.1 --- a/src/share/vm/prims/jvmtiEnvBase.hpp	Sat Nov 09 20:15:27 2019 +0800
    59.2 +++ b/src/share/vm/prims/jvmtiEnvBase.hpp	Sat Nov 09 20:29:45 2019 +0800
    59.3 @@ -32,6 +32,7 @@
    59.4  #include "runtime/fieldDescriptor.hpp"
    59.5  #include "runtime/frame.hpp"
    59.6  #include "runtime/handles.inline.hpp"
    59.7 +#include "runtime/orderAccess.hpp"
    59.8  #include "runtime/thread.hpp"
    59.9  #include "runtime/vm_operations.hpp"
   59.10  #include "utilities/growableArray.hpp"
   59.11 @@ -97,7 +98,7 @@
   59.12    const void *_env_local_storage;     // per env agent allocated data.
   59.13    jvmtiEventCallbacks _event_callbacks;
   59.14    jvmtiExtEventCallbacks _ext_event_callbacks;
   59.15 -  JvmtiTagMap* _tag_map;
   59.16 +  JvmtiTagMap* volatile _tag_map;
   59.17    JvmtiEnvEventEnable _env_event_enable;
   59.18    jvmtiCapabilities _current_capabilities;
   59.19    jvmtiCapabilities _prohibited_capabilities;
   59.20 @@ -251,6 +252,13 @@
   59.21      return _tag_map;
   59.22    }
   59.23  
   59.24 +  JvmtiTagMap* tag_map_acquire() {
   59.25 +    return (JvmtiTagMap*)OrderAccess::load_ptr_acquire(&_tag_map);
   59.26 +  }
   59.27 +
   59.28 +  void release_set_tag_map(JvmtiTagMap* tag_map) {
   59.29 +    OrderAccess::release_store_ptr(&_tag_map, tag_map);
   59.30 +  }
   59.31  
   59.32    // return true if event is enabled globally or for any thread
   59.33    // True only if there is a callback for it.
    60.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Sat Nov 09 20:15:27 2019 +0800
    60.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Sat Nov 09 20:29:45 2019 +0800
    60.3 @@ -67,6 +67,43 @@
    60.4    _res = JVMTI_ERROR_NONE;
    60.5  }
    60.6  
    60.7 +static inline InstanceKlass* get_ik(jclass def) {
    60.8 +  oop mirror = JNIHandles::resolve_non_null(def);
    60.9 +  return InstanceKlass::cast(java_lang_Class::as_Klass(mirror));
   60.10 +}
   60.11 +
   60.12 +// If any of the classes are being redefined, wait
   60.13 +// Parallel constant pool merging leads to indeterminate constant pools.
   60.14 +void VM_RedefineClasses::lock_classes() {
   60.15 +  MutexLocker ml(RedefineClasses_lock);
   60.16 +  bool has_redefined;
   60.17 +  do {
   60.18 +    has_redefined = false;
   60.19 +    // Go through classes each time until none are being redefined.
   60.20 +    for (int i = 0; i < _class_count; i++) {
   60.21 +      if (get_ik(_class_defs[i].klass)->is_being_redefined()) {
   60.22 +        RedefineClasses_lock->wait();
   60.23 +        has_redefined = true;
   60.24 +        break;  // for loop
   60.25 +      }
   60.26 +    }
   60.27 +  } while (has_redefined);
   60.28 +  for (int i = 0; i < _class_count; i++) {
   60.29 +    get_ik(_class_defs[i].klass)->set_is_being_redefined(true);
   60.30 +  }
   60.31 +  RedefineClasses_lock->notify_all();
   60.32 +}
   60.33 +
   60.34 +void VM_RedefineClasses::unlock_classes() {
   60.35 +  MutexLocker ml(RedefineClasses_lock);
   60.36 +  for (int i = 0; i < _class_count; i++) {
   60.37 +    assert(get_ik(_class_defs[i].klass)->is_being_redefined(),
   60.38 +           "should be being redefined to get here");
   60.39 +    get_ik(_class_defs[i].klass)->set_is_being_redefined(false);
   60.40 +  }
   60.41 +  RedefineClasses_lock->notify_all();
   60.42 +}
   60.43 +
   60.44  bool VM_RedefineClasses::doit_prologue() {
   60.45    if (_class_count == 0) {
   60.46      _res = JVMTI_ERROR_NONE;
   60.47 @@ -89,12 +126,21 @@
   60.48        _res = JVMTI_ERROR_NULL_POINTER;
   60.49        return false;
   60.50      }
   60.51 +
   60.52 +    oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass);
   60.53 +    // classes for primitives and arrays cannot be redefined
   60.54 +    // check here so following code can assume these classes are InstanceKlass
   60.55 +    if (!is_modifiable_class(mirror)) {
   60.56 +      _res = JVMTI_ERROR_UNMODIFIABLE_CLASS;
   60.57 +      return false;
   60.58 +    }
   60.59    }
   60.60  
   60.61    // Start timer after all the sanity checks; not quite accurate, but
   60.62    // better than adding a bunch of stop() calls.
   60.63    RC_TIMER_START(_timer_vm_op_prologue);
   60.64  
   60.65 +  lock_classes();
   60.66    // We first load new class versions in the prologue, because somewhere down the
   60.67    // call chain it is required that the current thread is a Java thread.
   60.68    _res = load_new_class_versions(Thread::current());
   60.69 @@ -105,12 +151,18 @@
   60.70          ClassLoaderData* cld = _scratch_classes[i]->class_loader_data();
   60.71          // Free the memory for this class at class unloading time.  Not before
   60.72          // because CMS might think this is still live.
   60.73 +        InstanceKlass* ik = get_ik(_class_defs[i].klass);
   60.74 +        if (ik->get_cached_class_file() == ((InstanceKlass*)_scratch_classes[i])->get_cached_class_file()) {
   60.75 +          // Don't double-free cached_class_file copied from the original class if error.
   60.76 +          ((InstanceKlass*)_scratch_classes[i])->set_cached_class_file(NULL);
   60.77 +        }
   60.78          cld->add_to_deallocate_list((InstanceKlass*)_scratch_classes[i]);
   60.79        }
   60.80      }
   60.81      // Free os::malloc allocated memory in load_new_class_version.
   60.82      os::free(_scratch_classes);
   60.83      RC_TIMER_STOP(_timer_vm_op_prologue);
   60.84 +    unlock_classes();
   60.85      return false;
   60.86    }
   60.87  
   60.88 @@ -170,6 +222,8 @@
   60.89  }
   60.90  
   60.91  void VM_RedefineClasses::doit_epilogue() {
   60.92 +  unlock_classes();
   60.93 +
   60.94    // Free os::malloc allocated memory.
   60.95    os::free(_scratch_classes);
   60.96  
   60.97 @@ -961,14 +1015,7 @@
   60.98      // versions are deleted. Constant pools are deallocated while merging
   60.99      // constant pools
  60.100      HandleMark hm(THREAD);
  60.101 -
  60.102 -    oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass);
  60.103 -    // classes for primitives cannot be redefined
  60.104 -    if (!is_modifiable_class(mirror)) {
  60.105 -      return JVMTI_ERROR_UNMODIFIABLE_CLASS;
  60.106 -    }
  60.107 -    Klass* the_class_oop = java_lang_Class::as_Klass(mirror);
  60.108 -    instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop);
  60.109 +    instanceKlassHandle the_class(THREAD, get_ik(_class_defs[i].klass));
  60.110      Symbol*  the_class_sym = the_class->name();
  60.111  
  60.112      // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
  60.113 @@ -3855,22 +3902,19 @@
  60.114    HandleMark hm(THREAD);   // make sure handles from this call are freed
  60.115    RC_TIMER_START(_timer_rsc_phase1);
  60.116  
  60.117 -  instanceKlassHandle scratch_class(scratch_class_oop);
  60.118 -
  60.119 -  oop the_class_mirror = JNIHandles::resolve_non_null(the_jclass);
  60.120 -  Klass* the_class_oop = java_lang_Class::as_Klass(the_class_mirror);
  60.121 -  instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop);
  60.122 +  instanceKlassHandle scratch_class(THREAD, scratch_class_oop);
  60.123 +  instanceKlassHandle the_class(THREAD, get_ik(the_jclass));
  60.124  
  60.125    // Remove all breakpoints in methods of this class
  60.126    JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints();
  60.127 -  jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop);
  60.128 +  jvmti_breakpoints.clearall_in_class_at_safepoint(the_class());
  60.129  
  60.130    // Deoptimize all compiled code that depends on this class
  60.131    flush_dependent_code(the_class, THREAD);
  60.132  
  60.133    _old_methods = the_class->methods();
  60.134    _new_methods = scratch_class->methods();
  60.135 -  _the_class_oop = the_class_oop;
  60.136 +  _the_class_oop = the_class();
  60.137    compute_added_deleted_matching_methods();
  60.138    update_jmethod_ids();
  60.139  
  60.140 @@ -3980,12 +4024,12 @@
  60.141    // with them was cached on the scratch class, move to the_class.
  60.142    // Note: we still want to do this if nothing needed caching since it
  60.143    // should get cleared in the_class too.
  60.144 -  if (the_class->get_cached_class_file_bytes() == 0) {
  60.145 +  if (the_class->get_cached_class_file() == 0) {
  60.146      // the_class doesn't have a cache yet so copy it
  60.147      the_class->set_cached_class_file(scratch_class->get_cached_class_file());
  60.148    }
  60.149 -  else if (scratch_class->get_cached_class_file_bytes() !=
  60.150 -           the_class->get_cached_class_file_bytes()) {
  60.151 +  else if (scratch_class->get_cached_class_file() !=
  60.152 +           the_class->get_cached_class_file()) {
  60.153      // The same class can be present twice in the scratch classes list or there
  60.154      // are multiple concurrent RetransformClasses calls on different threads.
  60.155      // In such cases we have to deallocate scratch_class cached_class_file.
  60.156 @@ -4094,14 +4138,14 @@
  60.157    RC_TRACE_WITH_THREAD(0x00000001, THREAD,
  60.158      ("redefined name=%s, count=%d (avail_mem=" UINT64_FORMAT "K)",
  60.159      the_class->external_name(),
  60.160 -    java_lang_Class::classRedefinedCount(the_class_mirror),
  60.161 +    java_lang_Class::classRedefinedCount(the_class->java_mirror()),
  60.162      os::available_memory() >> 10));
  60.163  
  60.164    {
  60.165      ResourceMark rm(THREAD);
  60.166      Events::log_redefinition(THREAD, "redefined class name=%s, count=%d",
  60.167                               the_class->external_name(),
  60.168 -                             java_lang_Class::classRedefinedCount(the_class_mirror));
  60.169 +                             java_lang_Class::classRedefinedCount(the_class->java_mirror()));
  60.170  
  60.171    }
  60.172    RC_TIMER_STOP(_timer_rsc_phase2);
    61.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.hpp	Sat Nov 09 20:15:27 2019 +0800
    61.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp	Sat Nov 09 20:29:45 2019 +0800
    61.3 @@ -490,6 +490,10 @@
    61.4  
    61.5    void flush_dependent_code(instanceKlassHandle k_h, TRAPS);
    61.6  
    61.7 +  // lock classes to redefine since constant pool merging isn't thread safe.
    61.8 +  void lock_classes();
    61.9 +  void unlock_classes();
   61.10 +
   61.11    static void dump_methods();
   61.12  
   61.13    // Check that there are no old or obsolete methods
    62.1 --- a/src/share/vm/prims/jvmtiTagMap.cpp	Sat Nov 09 20:15:27 2019 +0800
    62.2 +++ b/src/share/vm/prims/jvmtiTagMap.cpp	Sat Nov 09 20:29:45 2019 +0800
    62.3 @@ -430,7 +430,7 @@
    62.4    _hashmap = new JvmtiTagHashmap();
    62.5  
    62.6    // finally add us to the environment
    62.7 -  ((JvmtiEnvBase *)env)->set_tag_map(this);
    62.8 +  ((JvmtiEnvBase *)env)->release_set_tag_map(this);
    62.9  }
   62.10  
   62.11  
   62.12 @@ -499,7 +499,7 @@
   62.13  // returns the tag map for the given environments. If the tag map
   62.14  // doesn't exist then it is created.
   62.15  JvmtiTagMap* JvmtiTagMap::tag_map_for(JvmtiEnv* env) {
   62.16 -  JvmtiTagMap* tag_map = ((JvmtiEnvBase*)env)->tag_map();
   62.17 +  JvmtiTagMap* tag_map = ((JvmtiEnvBase*)env)->tag_map_acquire();
   62.18    if (tag_map == NULL) {
   62.19      MutexLocker mu(JvmtiThreadState_lock);
   62.20      tag_map = ((JvmtiEnvBase*)env)->tag_map();
   62.21 @@ -3282,7 +3282,7 @@
   62.22    if (JvmtiEnv::environments_might_exist()) {
   62.23      JvmtiEnvIterator it;
   62.24      for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
   62.25 -      JvmtiTagMap* tag_map = env->tag_map();
   62.26 +      JvmtiTagMap* tag_map = env->tag_map_acquire();
   62.27        if (tag_map != NULL && !tag_map->is_empty()) {
   62.28          tag_map->do_weak_oops(is_alive, f);
   62.29        }
    63.1 --- a/src/share/vm/runtime/arguments.cpp	Sat Nov 09 20:15:27 2019 +0800
    63.2 +++ b/src/share/vm/runtime/arguments.cpp	Sat Nov 09 20:29:45 2019 +0800
    63.3 @@ -556,8 +556,7 @@
    63.4  
    63.5    /* Scan the directory for jars/zips, appending them to path. */
    63.6    struct dirent *entry;
    63.7 -  char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal);
    63.8 -  while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
    63.9 +  while ((entry = os::readdir(dir)) != NULL) {
   63.10      const char* name = entry->d_name;
   63.11      const char* ext = name + strlen(name) - 4;
   63.12      bool isJarOrZip = ext > name &&
   63.13 @@ -571,7 +570,6 @@
   63.14        FREE_C_HEAP_ARRAY(char, jarpath, mtInternal);
   63.15      }
   63.16    }
   63.17 -  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
   63.18    os::closedir(dir);
   63.19    return path;
   63.20  }
   63.21 @@ -3485,14 +3483,12 @@
   63.22    if (dir == NULL) return false;
   63.23  
   63.24    struct dirent *entry;
   63.25 -  char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal);
   63.26    bool hasJarFile = false;
   63.27 -  while (!hasJarFile && (entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
   63.28 +  while (!hasJarFile && (entry = os::readdir(dir)) != NULL) {
   63.29      const char* name = entry->d_name;
   63.30      const char* ext = name + strlen(name) - 4;
   63.31      hasJarFile = ext > name && (os::file_name_strcmp(ext, ".jar") == 0);
   63.32    }
   63.33 -  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
   63.34    os::closedir(dir);
   63.35    return hasJarFile ;
   63.36  }
   63.37 @@ -3574,8 +3570,7 @@
   63.38    if (dir != NULL) {
   63.39      int num_ext_jars = 0;
   63.40      struct dirent *entry;
   63.41 -    char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(extDir), mtInternal);
   63.42 -    while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
   63.43 +    while ((entry = os::readdir(dir)) != NULL) {
   63.44        const char* name = entry->d_name;
   63.45        const char* ext = name + strlen(name) - 4;
   63.46        if (ext > name && (os::file_name_strcmp(ext, ".jar") == 0)) {
   63.47 @@ -3594,7 +3589,6 @@
   63.48          }
   63.49        }
   63.50      }
   63.51 -    FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
   63.52      os::closedir(dir);
   63.53      if (num_ext_jars > 0) {
   63.54        nonEmptyDirs += 1;
    64.1 --- a/src/share/vm/runtime/java.cpp	Sat Nov 09 20:15:27 2019 +0800
    64.2 +++ b/src/share/vm/runtime/java.cpp	Sat Nov 09 20:29:45 2019 +0800
    64.3 @@ -51,6 +51,7 @@
    64.4  #include "runtime/arguments.hpp"
    64.5  #include "runtime/biasedLocking.hpp"
    64.6  #include "runtime/compilationPolicy.hpp"
    64.7 +#include "runtime/deoptimization.hpp"
    64.8  #include "runtime/fprofiler.hpp"
    64.9  #include "runtime/init.hpp"
   64.10  #include "runtime/interfaceSupport.hpp"
    65.1 --- a/src/share/vm/runtime/mutexLocker.cpp	Sat Nov 09 20:15:27 2019 +0800
    65.2 +++ b/src/share/vm/runtime/mutexLocker.cpp	Sat Nov 09 20:29:45 2019 +0800
    65.3 @@ -125,6 +125,7 @@
    65.4  Mutex*   Management_lock              = NULL;
    65.5  Monitor* Service_lock                 = NULL;
    65.6  Monitor* PeriodicTask_lock            = NULL;
    65.7 +Monitor* RedefineClasses_lock         = NULL;
    65.8  
    65.9  #ifdef INCLUDE_TRACE
   65.10  Mutex*   JfrStacktrace_lock           = NULL;
   65.11 @@ -279,6 +280,7 @@
   65.12    def(ProfileVM_lock               , Monitor, special,   false); // used for profiling of the VMThread
   65.13    def(CompileThread_lock           , Monitor, nonleaf+5,   false );
   65.14    def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
   65.15 +  def(RedefineClasses_lock         , Monitor, nonleaf+5,   true);
   65.16  
   65.17  #ifdef INCLUDE_TRACE
   65.18    def(JfrMsg_lock                  , Monitor, leaf,        true);
    66.1 --- a/src/share/vm/runtime/mutexLocker.hpp	Sat Nov 09 20:15:27 2019 +0800
    66.2 +++ b/src/share/vm/runtime/mutexLocker.hpp	Sat Nov 09 20:29:45 2019 +0800
    66.3 @@ -141,6 +141,7 @@
    66.4  extern Mutex*   Management_lock;                 // a lock used to serialize JVM management
    66.5  extern Monitor* Service_lock;                    // a lock used for service thread operation
    66.6  extern Monitor* PeriodicTask_lock;               // protects the periodic task structure
    66.7 +extern Monitor* RedefineClasses_lock;            // locks classes from parallel redefinition
    66.8  
    66.9  #ifdef INCLUDE_TRACE
   66.10  extern Mutex*   JfrStacktrace_lock;              // used to guard access to the JFR stacktrace table
    67.1 --- a/src/share/vm/runtime/os.hpp	Sat Nov 09 20:15:27 2019 +0800
    67.2 +++ b/src/share/vm/runtime/os.hpp	Sat Nov 09 20:29:45 2019 +0800
    67.3 @@ -567,8 +567,7 @@
    67.4  
    67.5    // Reading directories.
    67.6    static DIR*           opendir(const char* dirname);
    67.7 -  static int            readdir_buf_size(const char *path);
    67.8 -  static struct dirent* readdir(DIR* dirp, dirent* dbuf);
    67.9 +  static struct dirent* readdir(DIR* dirp);
   67.10    static int            closedir(DIR* dirp);
   67.11  
   67.12    // Dynamic library extension
    68.1 --- a/src/share/vm/runtime/stubRoutines.cpp	Sat Nov 09 20:15:27 2019 +0800
    68.2 +++ b/src/share/vm/runtime/stubRoutines.cpp	Sat Nov 09 20:29:45 2019 +0800
    68.3 @@ -174,6 +174,9 @@
    68.4      }
    68.5      CodeBuffer buffer(_code1);
    68.6      StubGenerator_generate(&buffer, false);
    68.7 +    // When new stubs added we need to make sure there is some space left
    68.8 +    // to catch situation when we should increase size again.
    68.9 +    assert(buffer.insts_remaining() > 200, "increase code_size1");
   68.10    }
   68.11  }
   68.12  
   68.13 @@ -226,6 +229,9 @@
   68.14      }
   68.15      CodeBuffer buffer(_code2);
   68.16      StubGenerator_generate(&buffer, true);
   68.17 +    // When new stubs added we need to make sure there is some space left
   68.18 +    // to catch situation when we should increase size again.
   68.19 +    assert(buffer.insts_remaining() > 200, "increase code_size2");
   68.20    }
   68.21  
   68.22  #ifdef ASSERT
    69.1 --- a/src/share/vm/services/classLoadingService.cpp	Sat Nov 09 20:15:27 2019 +0800
    69.2 +++ b/src/share/vm/services/classLoadingService.cpp	Sat Nov 09 20:29:45 2019 +0800
    69.3 @@ -1,5 +1,5 @@
    69.4  /*
    69.5 - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
    69.6 + * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
    69.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    69.8   *
    69.9   * This code is free software; you can redistribute it and/or modify it
   69.10 @@ -69,7 +69,7 @@
   69.11        len = name->utf8_length();                    \
   69.12      }                                               \
   69.13      HOTSPOT_CLASS_##type( /* type = unloaded, loaded */ \
   69.14 -      data, len, (clss)->class_loader(), (shared)); \
   69.15 +      data, len, (void *)(clss)->class_loader(), (shared)); \
   69.16    }
   69.17  
   69.18  #endif /* USDT2 */
    70.1 --- a/src/share/vm/utilities/taskqueue.hpp	Sat Nov 09 20:15:27 2019 +0800
    70.2 +++ b/src/share/vm/utilities/taskqueue.hpp	Sat Nov 09 20:29:45 2019 +0800
    70.3 @@ -29,6 +29,7 @@
    70.4  #include "memory/allocation.inline.hpp"
    70.5  #include "runtime/mutex.hpp"
    70.6  #include "runtime/orderAccess.inline.hpp"
    70.7 +#include "utilities/globalDefinitions.hpp"
    70.8  #include "utilities/stack.hpp"
    70.9  
   70.10  // Simple TaskQueue stats that are collected by default in debug builds.
   70.11 @@ -668,7 +669,9 @@
   70.12  private:
   70.13    int _n_threads;
   70.14    TaskQueueSetSuper* _queue_set;
   70.15 +  char _pad_before[DEFAULT_CACHE_LINE_SIZE];
   70.16    int _offered_termination;
   70.17 +  char _pad_after[DEFAULT_CACHE_LINE_SIZE];
   70.18  
   70.19  #ifdef TRACESPINNING
   70.20    static uint _total_yields;
    71.1 --- a/test/Makefile	Sat Nov 09 20:15:27 2019 +0800
    71.2 +++ b/test/Makefile	Sat Nov 09 20:29:45 2019 +0800
    71.3 @@ -119,11 +119,11 @@
    71.4  
    71.5  # Root of all test results
    71.6  ifdef ALT_OUTPUTDIR
    71.7 -  ABS_BUILD_ROOT = $(ALT_OUTPUTDIR)/$(PLATFORM)-$(ARCH)
    71.8 +  ABS_BUILD_ROOT = $(ALT_OUTPUTDIR)
    71.9  else
   71.10    ABS_BUILD_ROOT = $(TEST_ROOT)/../build/$(PLATFORM)-$(ARCH)
   71.11  endif
   71.12 -ABS_TEST_OUTPUT_DIR = $(ABS_BUILD_ROOT)/testoutput
   71.13 +ABS_TEST_OUTPUT_DIR = $(ABS_BUILD_ROOT)/testoutput/$(UNIQUE_DIR)
   71.14  
   71.15  # Expect JPRT to set PRODUCT_HOME (the product or jdk in this case to test)
   71.16  ifndef PRODUCT_HOME
   71.17 @@ -267,7 +267,7 @@
   71.18  # Only run automatic tests
   71.19  JTREG_BASIC_OPTIONS += -a
   71.20  # Report details on all failed or error tests, times too
   71.21 -JTREG_BASIC_OPTIONS += -v:fail,error,time
   71.22 +JTREG_BASIC_OPTIONS += -v:fail,error,summary
   71.23  # Retain all files for failing tests
   71.24  JTREG_BASIC_OPTIONS += -retain:fail,error
   71.25  # Ignore tests are not run and completely silent about it
    72.1 --- a/test/TEST.groups	Sat Nov 09 20:15:27 2019 +0800
    72.2 +++ b/test/TEST.groups	Sat Nov 09 20:29:45 2019 +0800
    72.3 @@ -124,7 +124,6 @@
    72.4   -:needs_jre \
    72.5   -:needs_jdk
    72.6  
    72.7 -
    72.8  # When called from top level the test suites use the hotspot_ prefix
    72.9  hotspot_wbapitest = \
   72.10    sanity/
   72.11 @@ -147,6 +146,11 @@
   72.12    :hotspot_gc \
   72.13    :hotspot_runtime \
   72.14    :hotspot_serviceability
   72.15 +
   72.16 +# Right now tier1 runs all hotspot tests
   72.17 +hotspot_tier1 = \
   72.18 +  :jdk
   72.19 +
   72.20  # Tests that require compact3 API's
   72.21  #
   72.22  needs_compact3 = \
    73.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    73.2 +++ b/test/compiler/c1/TestGotoIf.jasm	Sat Nov 09 20:29:45 2019 +0800
    73.3 @@ -0,0 +1,171 @@
    73.4 +/*
    73.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
    73.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.7 + *
    73.8 + * This code is free software; you can redistribute it and/or modify it
    73.9 + * under the terms of the GNU General Public License version 2 only, as
   73.10 + * published by the Free Software Foundation.
   73.11 + *
   73.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   73.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   73.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   73.15 + * version 2 for more details (a copy is included in the LICENSE file that
   73.16 + * accompanied this code).
   73.17 + *
   73.18 + * You should have received a copy of the GNU General Public License version
   73.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   73.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   73.21 + *
   73.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   73.23 + * or visit www.oracle.com if you need additional information or have any
   73.24 + * questions.
   73.25 + *
   73.26 + */
   73.27 +
   73.28 +public class compiler/c1/TestGotoIf version 52:0 {
   73.29 +    public Field f1:"I";
   73.30 +    public Field f2:"I";
   73.31 +    public static Field i:"I";
   73.32 +
   73.33 +    Method "<init>":"()V" stack 1 locals 1 {
   73.34 +        aload_0;
   73.35 +        invokespecial Method java/lang/Object."<init>":"()V";
   73.36 +        return;
   73.37 +    }
   73.38 +
   73.39 +    public Method test1:"()I" stack 3 locals 1 {
   73.40 +        aload_0;
   73.41 +        getfield Field f1:"I";
   73.42 +        aload_0;
   73.43 +        getfield Field f2:"I";
   73.44 +        iconst_1;
   73.45 +        isub;
   73.46 +        // Without the fix, if got eliminated by CEE
   73.47 +        if_icmpgt Null;
   73.48 +        iconst_1;
   73.49 +      Return: stack_frame_type stack1;
   73.50 +      stack_map int;
   73.51 +        ireturn;
   73.52 +      Null: stack_frame_type same;
   73.53 +        iconst_0;
   73.54 +        goto Return; // Backbranch (t_goto) with safepoint
   73.55 +    }
   73.56 +
   73.57 +    public Method test2:"()I" stack 3 locals 1 {
   73.58 +        aload_0;
   73.59 +        getfield Field f1:"I";
   73.60 +        aload_0;
   73.61 +        getfield Field f2:"I";
   73.62 +        iconst_1;
   73.63 +        isub;
   73.64 +        goto Skip;
   73.65 +      Return: stack_frame_type full;
   73.66 +      stack_map int;
   73.67 +        ireturn;
   73.68 +      Skip: stack_frame_type full;
   73.69 +      stack_map int, int;
   73.70 +        // Without the fix, if got eliminated by CEE
   73.71 +        if_icmpgt Null;
   73.72 +        iconst_1;
   73.73 +        goto Return; // Backbranch (f_goto) with safepoint
   73.74 +      Null: stack_frame_type full;
   73.75 +      stack_map;
   73.76 +        iconst_0;
   73.77 +        goto Return; // Backbranch (t_goto) with safepoint
   73.78 +    }
   73.79 +
   73.80 +    public Method test3:"()I" stack 3 locals 1 {
   73.81 +        aload_0;
   73.82 +        getfield Field f1:"I";
   73.83 +        aload_0;
   73.84 +        getfield Field f2:"I";
   73.85 +        iconst_1;
   73.86 +        isub;
   73.87 +        goto Skip;
   73.88 +      Return: stack_frame_type full;
   73.89 +      stack_map int;
   73.90 +        ireturn;
   73.91 +      Null: stack_frame_type full;
   73.92 +      stack_map;
   73.93 +        iconst_0;
   73.94 +        goto Return; // Backbranch (t_goto) with safepoint
   73.95 +      Skip: stack_frame_type full;
   73.96 +      stack_map int, int;
   73.97 +        // If will be eliminated by CEE
   73.98 +        if_icmpgt Null; // Backbranch (if) with safepoint
   73.99 +        iconst_1;
  73.100 +        goto Return; // Backbranch (f_goto) with safepoint
  73.101 +    }
  73.102 +
  73.103 +    public Method test4:"()I" stack 3 locals 1 {
  73.104 +        aload_0;
  73.105 +        getfield Field f1:"I";
  73.106 +        aload_0;
  73.107 +        getfield Field f2:"I";
  73.108 +        iconst_1;
  73.109 +        isub;
  73.110 +        goto Skip;
  73.111 +      Null: stack_frame_type full;
  73.112 +      stack_map;
  73.113 +        iconst_0;
  73.114 +      Return: stack_frame_type full;
  73.115 +      stack_map int;
  73.116 +        ireturn; 
  73.117 +      Skip: stack_frame_type full;
  73.118 +      stack_map int, int;
  73.119 +        // If will be eliminated by CEE
  73.120 +        if_icmpgt Null; // Backbranch (if) with safepoint
  73.121 +        iconst_1;
  73.122 +        goto Return; // Backbranch (f_goto) with safepoint
  73.123 +    }
  73.124 +
  73.125 +    public Method test5:"()I" stack 3 locals 2 {
  73.126 +        aload_0;
  73.127 +        getfield Field f1:"I";
  73.128 +        aload_0;
  73.129 +        getfield Field f2:"I";
  73.130 +        iconst_1;
  73.131 +        isub;
  73.132 +        goto Skip;
  73.133 +      Null: stack_frame_type full;
  73.134 +      stack_map;
  73.135 +        iconst_0;
  73.136 +        goto Return;
  73.137 +      Skip: stack_frame_type full;
  73.138 +      stack_map int, int;
  73.139 +        // If will be eliminated by CEE
  73.140 +        if_icmpgt Null; // Backbranch (if) with safepoint
  73.141 +        iconst_1;
  73.142 +      Return: stack_frame_type full;
  73.143 +      stack_map int;
  73.144 +        ireturn; 
  73.145 +    }
  73.146 +
  73.147 +    public Method test6:"()I" stack 4 locals 1 {
  73.148 +        getstatic Field i:"I";
  73.149 +      Loop: stack_frame_type full;
  73.150 +      stack_map int;
  73.151 +        // Decrement i and exit loop if < 0
  73.152 +        iconst_0;
  73.153 +        getstatic Field i:"I";
  73.154 +        iconst_1;
  73.155 +        isub;
  73.156 +        dup;
  73.157 +        putstatic Field i:"I";
  73.158 +        if_icmpgt Exit;
  73.159 +
  73.160 +        iconst_1;
  73.161 +        // Without the fix, if got eliminated by CEE
  73.162 +        if_icmpgt Null;
  73.163 +        iconst_1;
  73.164 +        goto Loop; // Backbranch (f_goto) with safepoint
  73.165 +      Null: stack_frame_type same;
  73.166 +        iconst_0;
  73.167 +        goto Loop; // Backbranch (t_goto) with safepoint
  73.168 +
  73.169 +      Exit: stack_frame_type full;
  73.170 +      stack_map int;
  73.171 +        iconst_0;
  73.172 +        ireturn; 
  73.173 +    }
  73.174 +}
    74.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    74.2 +++ b/test/compiler/c1/TestGotoIfMain.java	Sat Nov 09 20:29:45 2019 +0800
    74.3 @@ -0,0 +1,46 @@
    74.4 +/*
    74.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
    74.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    74.7 + *
    74.8 + * This code is free software; you can redistribute it and/or modify it
    74.9 + * under the terms of the GNU General Public License version 2 only, as
   74.10 + * published by the Free Software Foundation.
   74.11 + *
   74.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   74.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   74.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   74.15 + * version 2 for more details (a copy is included in the LICENSE file that
   74.16 + * accompanied this code).
   74.17 + *
   74.18 + * You should have received a copy of the GNU General Public License version
   74.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   74.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   74.21 + *
   74.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   74.23 + * or visit www.oracle.com if you need additional information or have any
   74.24 + * questions.
   74.25 + */
   74.26 +
   74.27 +/*
   74.28 + * @test
   74.29 + * @bug 8218721
   74.30 + * @compile TestGotoIf.jasm
   74.31 + * @run main/othervm -XX:TieredStopAtLevel=1 -Xcomp
   74.32 + *                   -XX:CompileCommand=compileonly,compiler.c1.TestGotoIf::test*
   74.33 + *                   compiler.c1.TestGotoIfMain
   74.34 + */
   74.35 +
   74.36 +package compiler.c1;
   74.37 +
   74.38 +public class TestGotoIfMain {
   74.39 +    public static void main(String[] args) {
   74.40 +        TestGotoIf test = new TestGotoIf();
   74.41 +        test.i = 5;
   74.42 +        test.test1();
   74.43 +        test.test2();
   74.44 +        test.test3();
   74.45 +        test.test4();
   74.46 +        test.test5();
   74.47 +        test.test6();
   74.48 +    }
   74.49 +}
    75.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    75.2 +++ b/test/compiler/c2/Test8217359.java	Sat Nov 09 20:29:45 2019 +0800
    75.3 @@ -0,0 +1,74 @@
    75.4 +/*
    75.5 + * Copyright (c) 2019, Huawei Technologies Co. Ltd. All rights reserved.
    75.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    75.7 + *
    75.8 + * This code is free software; you can redistribute it and/or modify it
    75.9 + * under the terms of the GNU General Public License version 2 only, as
   75.10 + * published by the Free Software Foundation.
   75.11 + *
   75.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   75.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   75.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   75.15 + * version 2 for more details (a copy is included in the LICENSE file that
   75.16 + * accompanied this code).
   75.17 + *
   75.18 + * You should have received a copy of the GNU General Public License version
   75.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   75.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   75.21 + *
   75.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   75.23 + * or visit www.oracle.com if you need additional information or have any
   75.24 + * questions.
   75.25 + */
   75.26 +
   75.27 +/**
   75.28 + * @test
   75.29 + * @bug 8217359
   75.30 + * @summary C2 compiler triggers SIGSEGV after transformation in ConvI2LNode::Ideal
   75.31 + *
   75.32 + * @run main/othervm -XX:-TieredCompilation
   75.33 + *      -XX:CompileCommand=compileonly,compiler.c2.Test8217359::test
   75.34 + *      compiler.c2.Test8217359
   75.35 + */
   75.36 +
   75.37 +package compiler.c2;
   75.38 +
   75.39 +public class Test8217359 {
   75.40 +
   75.41 +    public static int ival = 0;
   75.42 +    public static long lsum = 0;
   75.43 +    public static long lval = 0;
   75.44 +
   75.45 +    public static void test() {
   75.46 +        short s = -25632;
   75.47 +        float f = 0.512F, f1 = 2.556F;
   75.48 +        int i6 = 32547, i7 = 9, i8 = -9, i9 = 36, i10 = -223;
   75.49 +
   75.50 +        for (i6 = 4; i6 < 182; i6++) {
   75.51 +            i8 = 1;
   75.52 +            while (++i8 < 17) {
   75.53 +                f1 = 1;
   75.54 +                do {
   75.55 +                    i7 += (182 + (f1 * f1));
   75.56 +                } while (++f1 < 1);
   75.57 +
   75.58 +                Test8217359.ival += (i8 | Test8217359.ival);
   75.59 +            }
   75.60 +        }
   75.61 +
   75.62 +        for (i9 = 9; i9 < 100; ++i9) {
   75.63 +            i10 -= i6;
   75.64 +            i10 >>= s;
   75.65 +            i7 += (((i9 * i10) + i6) - Test8217359.lval);
   75.66 +        }
   75.67 +
   75.68 +        lsum += i6 + i7 + i8;
   75.69 +    }
   75.70 +
   75.71 +    public static void main(String[] args) {
   75.72 +        for (int i = 0; i < 16000; i++) {
   75.73 +            test();
   75.74 +        }
   75.75 +    }
   75.76 +
   75.77 +}
    76.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    76.2 +++ b/test/compiler/c2/TestIfWithDeadRegion.java	Sat Nov 09 20:29:45 2019 +0800
    76.3 @@ -0,0 +1,57 @@
    76.4 +/*
    76.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
    76.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    76.7 + *
    76.8 + * This code is free software; you can redistribute it and/or modify it
    76.9 + * under the terms of the GNU General Public License version 2 only, as
   76.10 + * published by the Free Software Foundation.
   76.11 + *
   76.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   76.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   76.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   76.15 + * version 2 for more details (a copy is included in the LICENSE file that
   76.16 + * accompanied this code).
   76.17 + *
   76.18 + * You should have received a copy of the GNU General Public License version
   76.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   76.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   76.21 + *
   76.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   76.23 + * or visit www.oracle.com if you need additional information or have any
   76.24 + * questions.
   76.25 + */
   76.26 +
   76.27 +/**
   76.28 + * @test
   76.29 + * @bug 8219807
   76.30 + * @summary Test IfNode::up_one_dom() with dead regions.
   76.31 + * @compile -XDstringConcat=inline TestIfWithDeadRegion.java
   76.32 + * @run main/othervm -XX:CompileCommand=compileonly,compiler.c2.TestIfWithDeadRegion::test
   76.33 + *                   compiler.c2.TestIfWithDeadRegion
   76.34 + */
   76.35 +
   76.36 +package compiler.c2;
   76.37 +
   76.38 +import java.util.function.Supplier;
   76.39 +
   76.40 +public class TestIfWithDeadRegion {
   76.41 +
   76.42 +    static String msg;
   76.43 +
   76.44 +    static String getString(String s, int i) {
   76.45 +        String current = s + String.valueOf(i);
   76.46 +        System.nanoTime();
   76.47 +        return current;
   76.48 +    }
   76.49 +
   76.50 +    static void test(Supplier<String> supplier) {
   76.51 +        msg = supplier.get();
   76.52 +    }
   76.53 +
   76.54 +    public static void main(String[] args) {
   76.55 +        for (int i = 0; i < 20_000; ++i) {
   76.56 +            test(() -> getString("Test1", 42));
   76.57 +            test(() -> getString("Test2", 42));
   76.58 +        }
   76.59 +    }
   76.60 +}
    77.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    77.2 +++ b/test/compiler/escapeAnalysis/TestGetClass.java	Sat Nov 09 20:29:45 2019 +0800
    77.3 @@ -0,0 +1,52 @@
    77.4 +/*
    77.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
    77.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    77.7 + *
    77.8 + * This code is free software; you can redistribute it and/or modify it
    77.9 + * under the terms of the GNU General Public License version 2 only, as
   77.10 + * published by the Free Software Foundation.
   77.11 + *
   77.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   77.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   77.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   77.15 + * version 2 for more details (a copy is included in the LICENSE file that
   77.16 + * accompanied this code).
   77.17 + *
   77.18 + * You should have received a copy of the GNU General Public License version
   77.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   77.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   77.21 + *
   77.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   77.23 + * or visit www.oracle.com if you need additional information or have any
   77.24 + * questions.
   77.25 + */
   77.26 +
   77.27 +/*
   77.28 + * @test
   77.29 + * @bug 8218201
   77.30 + * @summary BCEscapeAnalyzer assigns wrong escape state to getClass return value.
   77.31 + * @run main/othervm -XX:-TieredCompilation -Xcomp -XX:+UnlockDiagnosticVMOptions -XX:DisableIntrinsic=_getClass
   77.32 + *                   -XX:CompileCommand=quiet -XX:CompileCommand=compileonly,compiler.escapeAnalysis.TestGetClass::test
   77.33 + *                   -XX:+PrintCompilation compiler.escapeAnalysis.TestGetClass
   77.34 + */
   77.35 +
   77.36 +package compiler.escapeAnalysis;
   77.37 +
   77.38 +public class TestGetClass {
   77.39 +    static Object obj = new Object();
   77.40 +
   77.41 +    public static boolean test() {
   77.42 +        if (obj.getClass() == Object.class) {
   77.43 +            synchronized (obj) {
   77.44 +                return true;
   77.45 +            }
   77.46 +        }
   77.47 +        return false;
   77.48 +    }
   77.49 +
   77.50 +    public static void main(String[] args) {
   77.51 +        if (!test()) {
   77.52 +            throw new RuntimeException("Test failed");
   77.53 +        }
   77.54 +    }
   77.55 +}
    78.1 --- a/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java	Sat Nov 09 20:15:27 2019 +0800
    78.2 +++ b/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java	Sat Nov 09 20:29:45 2019 +0800
    78.3 @@ -36,7 +36,8 @@
    78.4      public GenericTestCaseForOtherCPU(String optionName) {
    78.5          // Execute the test case on any CPU except SPARC and X86
    78.6          super(optionName, new NotPredicate(new OrPredicate(Platform::isSparc,
    78.7 -                new OrPredicate(Platform::isX64, Platform::isX86))));
    78.8 +                new OrPredicate(Platform::isPPC,
    78.9 +                new OrPredicate(Platform::isX64, Platform::isX86)))));
   78.10      }
   78.11  
   78.12      @Override
    79.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    79.2 +++ b/test/compiler/loopopts/superword/TestNegBaseOffset.java	Sat Nov 09 20:29:45 2019 +0800
    79.3 @@ -0,0 +1,59 @@
    79.4 +/*
    79.5 + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
    79.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    79.7 + *
    79.8 + * This code is free software; you can redistribute it and/or modify it
    79.9 + * under the terms of the GNU General Public License version 2 only, as
   79.10 + * published by the Free Software Foundation.
   79.11 + *
   79.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   79.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   79.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   79.15 + * version 2 for more details (a copy is included in the LICENSE file that
   79.16 + * accompanied this code).
   79.17 + *
   79.18 + * You should have received a copy of the GNU General Public License version
   79.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   79.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   79.21 + *
   79.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   79.23 + * or visit www.oracle.com if you need additional information or have any
   79.24 + * questions.
   79.25 + */
   79.26 +
   79.27 +/*
   79.28 + * @test
   79.29 + * @bug 8202948
   79.30 + * @summary Test skipping vector packs with negative base offset.
   79.31 + * @comment Test fails only with -Xcomp when profiling data is not present.
   79.32 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions
   79.33 + *                   -Xcomp -XX:-TieredCompilation -XX:CICompilerCount=1
   79.34 + *                   -XX:CompileOnly=compiler/loopopts/superword/TestNegBaseOffset
   79.35 + *                   compiler.loopopts.superword.TestNegBaseOffset
   79.36 + */
   79.37 +
   79.38 +package compiler.loopopts.superword;
   79.39 +
   79.40 +public class TestNegBaseOffset {
   79.41 +    public static final int N = 400;
   79.42 +    public static int iFld=10;
   79.43 +    public static int iArr[]=new int[N];
   79.44 +
   79.45 +    public static void mainTest() {
   79.46 +        int i0=1, i2;
   79.47 +        while (++i0 < 339) {
   79.48 +            if ((i0 % 2) == 0) {
   79.49 +                for (i2 = 2; i2 > i0; i2 -= 3) {
   79.50 +                    iArr[i2 - 1] &= iFld;
   79.51 +                }
   79.52 +            }
   79.53 +        }
   79.54 +    }
   79.55 +
   79.56 +    public static void main(String[] strArr) {
   79.57 +        for (int i = 0; i < 10; i++) {
   79.58 +            mainTest();
   79.59 +        }
   79.60 +    }
   79.61 +}
   79.62 +
    80.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    80.2 +++ b/test/compiler/rangechecks/RangeCheckEliminationScaleNotOne.java	Sat Nov 09 20:29:45 2019 +0800
    80.3 @@ -0,0 +1,100 @@
    80.4 +/*
    80.5 + * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
    80.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    80.7 + *
    80.8 + * This code is free software; you can redistribute it and/or modify it
    80.9 + * under the terms of the GNU General Public License version 2 only, as
   80.10 + * published by the Free Software Foundation.
   80.11 + *
   80.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   80.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   80.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   80.15 + * version 2 for more details (a copy is included in the LICENSE file that
   80.16 + * accompanied this code).
   80.17 + *
   80.18 + * You should have received a copy of the GNU General Public License version
   80.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   80.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   80.21 + *
   80.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   80.23 + * or visit www.oracle.com if you need additional information or have any
   80.24 + * questions.
   80.25 + */
   80.26 +
   80.27 +/*
   80.28 + * @test
   80.29 + * @bug 8215265
   80.30 + * @summary C2: range check elimination may allow illegal out of bound access
   80.31 + *
   80.32 + * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-UseLoopPredicate RangeCheckEliminationScaleNotOne
   80.33 + *
   80.34 + */
   80.35 +
   80.36 +import java.util.Arrays;
   80.37 +
   80.38 +public class RangeCheckEliminationScaleNotOne {
   80.39 +    public static void main(String[] args) {
   80.40 +        {
   80.41 +            int[] array = new int[199];
   80.42 +            boolean[] flags = new boolean[100];
   80.43 +            Arrays.fill(flags, true);
   80.44 +            flags[0] = false;
   80.45 +            flags[1] = false;
   80.46 +            for (int i = 0; i < 20_000; i++) {
   80.47 +                test1(100, array, 0, flags);
   80.48 +            }
   80.49 +            boolean ex = false;
   80.50 +            try {
   80.51 +                test1(100, array, -5, flags);
   80.52 +            } catch (ArrayIndexOutOfBoundsException aie) {
   80.53 +                ex = true;
   80.54 +            }
   80.55 +            if (!ex) {
   80.56 +                throw new RuntimeException("no AIOOB exception");
   80.57 +            }
   80.58 +        }
   80.59 +
   80.60 +        {
   80.61 +            int[] array = new int[199];
   80.62 +            boolean[] flags = new boolean[100];
   80.63 +            Arrays.fill(flags, true);
   80.64 +            flags[0] = false;
   80.65 +            flags[1] = false;
   80.66 +            for (int i = 0; i < 20_000; i++) {
   80.67 +                test2(100, array, 198, flags);
   80.68 +            }
   80.69 +            boolean ex = false;
   80.70 +            try {
   80.71 +                test2(100, array, 203, flags);
   80.72 +            } catch (ArrayIndexOutOfBoundsException aie) {
   80.73 +                ex = true;
   80.74 +            }
   80.75 +            if (!ex) {
   80.76 +                throw new RuntimeException("no AIOOB exception");
   80.77 +            }
   80.78 +        }
   80.79 +    }
   80.80 +
   80.81 +    private static int test1(int stop, int[] array, int offset, boolean[] flags) {
   80.82 +        if (array == null) {}
   80.83 +        int res = 0;
   80.84 +        for (int i = 0; i < stop; i++) {
   80.85 +            if (flags[i]) {
   80.86 +                res += array[2 * i + offset];
   80.87 +            }
   80.88 +        }
   80.89 +        return res;
   80.90 +    }
   80.91 +
   80.92 +
   80.93 +    private static int test2(int stop, int[] array, int offset, boolean[] flags) {
   80.94 +        if (array == null) {}
   80.95 +        int res = 0;
   80.96 +        for (int i = 0; i < stop; i++) {
   80.97 +            if (flags[i]) {
   80.98 +                res += array[-2 * i + offset];
   80.99 +            }
  80.100 +        }
  80.101 +        return res;
  80.102 +    }
  80.103 +}
    81.1 --- a/test/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java	Sat Nov 09 20:15:27 2019 +0800
    81.2 +++ b/test/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java	Sat Nov 09 20:29:45 2019 +0800
    81.3 @@ -63,12 +63,20 @@
    81.4                      null);
    81.5  
    81.6      public static final BooleanSupplier SHA256_INSTRUCTION_AVAILABLE
    81.7 -            = new CPUSpecificPredicate("sparc.*", new String[] { "sha256" },
    81.8 -                    null);
    81.9 +            = new OrPredicate(new CPUSpecificPredicate("sparc.*",   new String[] { "sha256" },
   81.10 +                                                       null),
   81.11 +              new OrPredicate(new CPUSpecificPredicate("ppc64.*",   new String[] { "sha"    },
   81.12 +                                                       null),
   81.13 +                              new CPUSpecificPredicate("ppc64le.*", new String[] { "sha"    },
   81.14 +                                                       null)));
   81.15  
   81.16      public static final BooleanSupplier SHA512_INSTRUCTION_AVAILABLE
   81.17 -            = new CPUSpecificPredicate("sparc.*", new String[] { "sha512" },
   81.18 -                    null);
   81.19 +            = new OrPredicate(new CPUSpecificPredicate("sparc.*",   new String[] { "sha512" },
   81.20 +                                                       null),
   81.21 +              new OrPredicate(new CPUSpecificPredicate("ppc64.*",   new String[] { "sha"    },
   81.22 +                                                       null),
   81.23 +                              new CPUSpecificPredicate("ppc64le.*", new String[] { "sha"    },
   81.24 +                                                       null)));
   81.25  
   81.26      public static final BooleanSupplier ANY_SHA_INSTRUCTION_AVAILABLE
   81.27              = new OrPredicate(IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE,
    82.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    82.2 +++ b/test/runtime/RedefineTests/RedefineDoubleDelete.java	Sat Nov 09 20:29:45 2019 +0800
    82.3 @@ -0,0 +1,85 @@
    82.4 +/*
    82.5 + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
    82.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    82.7 + *
    82.8 + * This code is free software; you can redistribute it and/or modify it
    82.9 + * under the terms of the GNU General Public License version 2 only, as
   82.10 + * published by the Free Software Foundation.
   82.11 + *
   82.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   82.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   82.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   82.15 + * version 2 for more details (a copy is included in the LICENSE file that
   82.16 + * accompanied this code).
   82.17 + *
   82.18 + * You should have received a copy of the GNU General Public License version
   82.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   82.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   82.21 + *
   82.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   82.23 + * or visit www.oracle.com if you need additional information or have any
   82.24 + * questions.
   82.25 + */
   82.26 +
   82.27 +/*
   82.28 + * @test
   82.29 + * @bug 8178870
   82.30 + * @library /testlibrary
   82.31 + * @summary Redefine class with CFLH twice to test deleting the cached_class_file
   82.32 + * @build RedefineClassHelper
   82.33 + * @run main RedefineClassHelper
   82.34 + * @run main/othervm -javaagent:redefineagent.jar RedefineDoubleDelete
   82.35 + */
   82.36 +
   82.37 +public class RedefineDoubleDelete {
   82.38 +
   82.39 +    // Class gets a redefinition error because it adds a data member
   82.40 +    public static String newB =
   82.41 +                "class RedefineDoubleDelete$B {" +
   82.42 +                "   int count1 = 0;" +
   82.43 +                "}";
   82.44 +
   82.45 +    public static String newerB =
   82.46 +                "class RedefineDoubleDelete$B { " +
   82.47 +                "   int faa() { System.out.println(\"baa\"); return 2; }" +
   82.48 +                "}";
   82.49 +
   82.50 +    // The ClassFileLoadHook for this class turns foo into faa and prints out faa.
   82.51 +    static class B {
   82.52 +      int faa() { System.out.println("foo"); return 1; }
   82.53 +    }
   82.54 +
   82.55 +    public static void main(String args[]) throws Exception {
   82.56 +
   82.57 +        B b = new B();
   82.58 +        int val = b.faa();
   82.59 +        if (val != 1) {
   82.60 +            throw new RuntimeException("return value wrong " + val);
   82.61 +        }
   82.62 +
   82.63 +        // Redefine B twice to get cached_class_file in both B scratch classes
   82.64 +        try {
   82.65 +            RedefineClassHelper.redefineClass(B.class, newB);
   82.66 +        } catch (java.lang.UnsupportedOperationException e) {
   82.67 +            // this is expected
   82.68 +        }
   82.69 +        try {
   82.70 +            RedefineClassHelper.redefineClass(B.class, newB);
   82.71 +        } catch (java.lang.UnsupportedOperationException e) {
   82.72 +            // this is expected
   82.73 +        }
   82.74 +
   82.75 +        // Do a full GC.
   82.76 +        System.gc();
   82.77 +
   82.78 +        // Redefine with a compatible class
   82.79 +        RedefineClassHelper.redefineClass(B.class, newerB);
   82.80 +        val = b.faa();
   82.81 +        if (val != 2) {
   82.82 +            throw new RuntimeException("return value wrong " + val);
   82.83 +        }
   82.84 +
   82.85 +        // Do another full GC to clean things up.
   82.86 +        System.gc();
   82.87 +    }
   82.88 +}
    83.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    83.2 +++ b/test/runtime/RedefineTests/libRedefineDoubleDelete.c	Sat Nov 09 20:29:45 2019 +0800
    83.3 @@ -0,0 +1,164 @@
    83.4 +/*
    83.5 + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
    83.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    83.7 + *
    83.8 + * This code is free software; you can redistribute it and/or modify it
    83.9 + * under the terms of the GNU General Public License version 2 only, as
   83.10 + * published by the Free Software Foundation.
   83.11 + *
   83.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   83.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   83.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   83.15 + * version 2 for more details (a copy is included in the LICENSE file that
   83.16 + * accompanied this code).
   83.17 + *
   83.18 + * You should have received a copy of the GNU General Public License version
   83.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   83.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   83.21 + *
   83.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   83.23 + * or visit www.oracle.com if you need additional information or have any
   83.24 + * questions.
   83.25 + */
   83.26 +
   83.27 +#include <stdio.h>
   83.28 +#include <string.h>
   83.29 +#include "jvmti.h"
   83.30 +
   83.31 +#ifdef __cplusplus
   83.32 +extern "C" {
   83.33 +#endif
   83.34 +
   83.35 +#ifndef JNI_ENV_ARG
   83.36 +
   83.37 +#ifdef __cplusplus
   83.38 +#define JNI_ENV_ARG(x, y) y
   83.39 +#define JNI_ENV_PTR(x) x
   83.40 +#else
   83.41 +#define JNI_ENV_ARG(x,y) x, y
   83.42 +#define JNI_ENV_PTR(x) (*x)
   83.43 +#endif
   83.44 +
   83.45 +#endif
   83.46 +
   83.47 +#define TranslateError(err) "JVMTI error"
   83.48 +
   83.49 +static jvmtiEnv *jvmti = NULL;
   83.50 +
   83.51 +static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved);
   83.52 +
   83.53 +JNIEXPORT
   83.54 +jint JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) {
   83.55 +    return Agent_Initialize(jvm, options, reserved);
   83.56 +}
   83.57 +
   83.58 +JNIEXPORT
   83.59 +jint JNICALL Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) {
   83.60 +    return Agent_Initialize(jvm, options, reserved);
   83.61 +}
   83.62 +
   83.63 +JNIEXPORT
   83.64 +jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) {
   83.65 +    return JNI_VERSION_1_8;
   83.66 +}
   83.67 +
   83.68 +
   83.69 +static jint newClassDataLen = 0;
   83.70 +static unsigned char* newClassData = NULL;
   83.71 +
   83.72 +static jint
   83.73 +getBytecodes(jvmtiEnv *jvmti_env,
   83.74 +             jint class_data_len, const unsigned char* class_data) {
   83.75 +    int i;
   83.76 +    jint res;
   83.77 +
   83.78 +    newClassDataLen = class_data_len;
   83.79 +    res = (*jvmti_env)->Allocate(jvmti_env, newClassDataLen, &newClassData);
   83.80 +    if (res != JNI_OK) {
   83.81 +        printf("    Unable to allocate bytes\n");
   83.82 +        return JNI_ERR;
   83.83 +    }
   83.84 +    for (i = 0; i < newClassDataLen; i++) {
   83.85 +        newClassData[i] = class_data[i];
   83.86 +        // Rewrite oo in class to aa
   83.87 +        if (i > 0 && class_data[i] == 'o' && class_data[i-1] == 'o') {
   83.88 +            newClassData[i] = newClassData[i-1] = 'a';
   83.89 +        }
   83.90 +    }
   83.91 +    printf("  ... copied bytecode: %d bytes\n", (int)newClassDataLen);
   83.92 +    return JNI_OK;
   83.93 +}
   83.94 +
   83.95 +
   83.96 +static void JNICALL
   83.97 +Callback_ClassFileLoadHook(jvmtiEnv *jvmti_env, JNIEnv *env,
   83.98 +                           jclass class_being_redefined,
   83.99 +                           jobject loader, const char* name, jobject protection_domain,
  83.100 +                           jint class_data_len, const unsigned char* class_data,
  83.101 +                           jint *new_class_data_len, unsigned char** new_class_data) {
  83.102 +    if (name != NULL && strcmp(name, "RedefineDoubleDelete$B") == 0) {
  83.103 +        if (newClassData == NULL) {
  83.104 +            jint res = getBytecodes(jvmti_env, class_data_len, class_data);
  83.105 +            if (res == JNI_ERR) {
  83.106 +              printf(">>>    ClassFileLoadHook event: class name %s FAILED\n", name);
  83.107 +              return;
  83.108 +            }
  83.109 +            // Only change for first CFLH event.
  83.110 +            *new_class_data_len = newClassDataLen;
  83.111 +            *new_class_data = newClassData;
  83.112 +        }
  83.113 +        printf(">>>    ClassFileLoadHook event: class name %s\n", name);
  83.114 +    }
  83.115 +}
  83.116 +
  83.117 +static
  83.118 +jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) {
  83.119 +    jint res, size;
  83.120 +    jvmtiCapabilities caps;
  83.121 +    jvmtiEventCallbacks callbacks;
  83.122 +    jvmtiError err;
  83.123 +
  83.124 +    res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti),
  83.125 +        JVMTI_VERSION_1_2);
  83.126 +    if (res != JNI_OK || jvmti == NULL) {
  83.127 +        printf("    Error: wrong result of a valid call to GetEnv!\n");
  83.128 +        return JNI_ERR;
  83.129 +    }
  83.130 +
  83.131 +    printf("Enabling following capabilities: can_generate_all_class_hook_events, "
  83.132 +           "can_retransform_classes, can_redefine_classes");
  83.133 +    memset(&caps, 0, sizeof(caps));
  83.134 +    caps.can_generate_all_class_hook_events = 1;
  83.135 +    caps.can_retransform_classes = 1;
  83.136 +    caps.can_redefine_classes = 1;
  83.137 +    printf("\n");
  83.138 +
  83.139 +    err = (*jvmti)->AddCapabilities(jvmti, &caps);
  83.140 +    if (err != JVMTI_ERROR_NONE) {
  83.141 +        printf("    Error in AddCapabilites: %s (%d)\n", TranslateError(err), err);
  83.142 +        return JNI_ERR;
  83.143 +    }
  83.144 +
  83.145 +    size = (jint)sizeof(callbacks);
  83.146 +
  83.147 +    memset(&callbacks, 0, sizeof(callbacks));
  83.148 +    callbacks.ClassFileLoadHook = Callback_ClassFileLoadHook;
  83.149 +
  83.150 +    err = (*jvmti)->SetEventCallbacks(jvmti, &callbacks, size);
  83.151 +    if (err != JVMTI_ERROR_NONE) {
  83.152 +        printf("    Error in SetEventCallbacks: %s (%d)\n", TranslateError(err), err);
  83.153 +        return JNI_ERR;
  83.154 +    }
  83.155 +
  83.156 +    err = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, NULL);
  83.157 +    if (err != JVMTI_ERROR_NONE) {
  83.158 +        printf("    Error in SetEventNotificationMode: %s (%d)\n", TranslateError(err), err);
  83.159 +        return JNI_ERR;
  83.160 +    }
  83.161 +
  83.162 +    return JNI_OK;
  83.163 +}
  83.164 +
  83.165 +#ifdef __cplusplus
  83.166 +}
  83.167 +#endif
    84.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    84.2 +++ b/test/runtime/VtableTests/VTableTest.java	Sat Nov 09 20:29:45 2019 +0800
    84.3 @@ -0,0 +1,50 @@
    84.4 +/*
    84.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
    84.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    84.7 + *
    84.8 + * This code is free software; you can redistribute it and/or modify it
    84.9 + * under the terms of the GNU General Public License version 2 only, as
   84.10 + * published by the Free Software Foundation.
   84.11 + *
   84.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   84.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   84.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   84.15 + * version 2 for more details (a copy is included in the LICENSE file that
   84.16 + * accompanied this code).
   84.17 + *
   84.18 + * You should have received a copy of the GNU General Public License version
   84.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   84.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   84.21 + *
   84.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   84.23 + * or visit www.oracle.com if you need additional information or have any
   84.24 + * questions.
   84.25 + */
   84.26 +
   84.27 +/*
   84.28 + * @test
   84.29 + * @bug 8226798
   84.30 + * @summary Check that the vTable for class C gets set up without causing
   84.31 + *          an assertion failure.
   84.32 + * @compile pkg/A.java
   84.33 + * @run main VTableTest
   84.34 + */
   84.35 +
   84.36 +public class VTableTest {
   84.37 +
   84.38 +    interface Intf {
   84.39 +        public default void m() { }
   84.40 +        public default void unusedButNeededToReproduceIssue() { }
   84.41 +    }
   84.42 +
   84.43 +    static class B extends pkg.A implements Intf {
   84.44 +    }
   84.45 +
   84.46 +    static class C extends B {
   84.47 +        public void m() { System.out.println("In C.m()"); }
   84.48 +    }
   84.49 +
   84.50 +    public static void main(String[] args) {
   84.51 +        new C().m();
   84.52 +    }
   84.53 +}
    85.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    85.2 +++ b/test/runtime/VtableTests/pkg/A.java	Sat Nov 09 20:29:45 2019 +0800
    85.3 @@ -0,0 +1,28 @@
    85.4 +/*
    85.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
    85.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    85.7 + *
    85.8 + * This code is free software; you can redistribute it and/or modify it
    85.9 + * under the terms of the GNU General Public License version 2 only, as
   85.10 + * published by the Free Software Foundation.
   85.11 + *
   85.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   85.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   85.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   85.15 + * version 2 for more details (a copy is included in the LICENSE file that
   85.16 + * accompanied this code).
   85.17 + *
   85.18 + * You should have received a copy of the GNU General Public License version
   85.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   85.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   85.21 + *
   85.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   85.23 + * or visit www.oracle.com if you need additional information or have any
   85.24 + * questions.
   85.25 + */
   85.26 +
   85.27 +package pkg;
   85.28 +
   85.29 +public class A {
   85.30 +    void m() { }
   85.31 +}

mercurial