Thu, 02 Nov 2017 11:31:01 -0700
Merge
.hgtags | file | annotate | diff | comparison | revisions |
1.1 --- a/.hgtags Fri Oct 27 20:39:22 2017 +0100 1.2 +++ b/.hgtags Thu Nov 02 11:31:01 2017 -0700 1.3 @@ -1101,4 +1101,14 @@ 1.4 98fb3d75fe567bda30a3667c58a9f83ad2acbdf3 jdk8u152-b06 1.5 02d5e7b64b288429f3836dcfb8ac304774775965 jdk8u161-b00 1.6 432f92e99174244479f0011169baf828368506c7 jdk8u161-b01 1.7 +a17bab9405474602b18cd62e060a09b17d6413ac jdk8u161-b02 1.8 +4cf0fd9f1fe53e3140413623d72f6a00e587830d jdk8u161-b03 1.9 +4022d56fc6b83eab7213c92e1277e7ce9753a21f jdk8u161-b04 1.10 +37e8b74faccc8e795be1a171033487e2270dc605 jdk8u161-b05 1.11 +7b96cfeed22242bb68a387d1680e602e37e48050 jdk8u162-b00 1.12 +92693f9dd704467ddd5fbae5a5908c1713a08ee0 jdk8u162-b01 1.13 +741b7a6743c0e02eae7e4a864f378fa1df0da112 jdk8u162-b02 1.14 +e0c000e8eb7588501637873c6dcd255306c9c5cc jdk8u162-b03 1.15 +18366fa39fe0839291b672ce4f8f12605d88b0d0 jdk8u162-b04 1.16 +e34428c12886692f9d562263a10bc72c8d222613 jdk8u162-b05 1.17 a17bab9405474602b18cd62e060a09b17d6413ac jdk8u171-b00
2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java Fri Oct 27 20:39:22 2017 +0100 2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java Thu Nov 02 11:31:01 2017 -0700 2.3 @@ -1,5 +1,5 @@ 2.4 /* 2.5 - * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 2.6 + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 2.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 2.8 * 2.9 * This code is free software; you can redistribute it and/or modify it 2.10 @@ -103,11 +103,12 @@ 2.11 } 2.12 2.13 SystemDictionary dict = VM.getVM().getSystemDictionary(); 2.14 - dict.classesDo(new SystemDictionary.ClassAndLoaderVisitor() { 2.15 - public void visit(Klass k, Oop loader) { 2.16 + dict.classesDo(new SystemDictionary.ClassVisitor() { 2.17 + public void visit(Klass k) { 2.18 if (! (k instanceof InstanceKlass)) { 2.19 return; 2.20 } 2.21 + Oop loader = ((InstanceKlass) k).getClassLoader(); 2.22 LoaderData ld = (loader != null) ? (LoaderData)loaderMap.get(loader) 2.23 : bootstrapLoaderData; 2.24 if (ld != null) {
3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Fri Oct 27 20:39:22 2017 +0100 3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Thu Nov 02 11:31:01 2017 -0700 3.3 @@ -1,5 +1,5 @@ 3.4 /* 3.5 - * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. 3.6 + * Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved. 3.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3.8 * 3.9 * This code is free software; you can redistribute it and/or modify it 3.10 @@ -804,6 +804,16 @@ 3.11 // VM type to SA class map 3.12 var vmType2Class = new Object(); 3.13 3.14 +// C2 only classes 3.15 +try{ 3.16 + vmType2Class["ExceptionBlob"] = sapkg.code.ExceptionBlob; 3.17 + vmType2Class["UncommonTrapBlob"] = sapkg.code.UncommonTrapBlob; 3.18 +} catch(e) { 3.19 + // Ignore exception. C2 specific objects might be not 3.20 + // available in client VM 3.21 +} 3.22 + 3.23 + 3.24 // This is *not* exhaustive. Add more if needed. 3.25 // code blobs 3.26 vmType2Class["BufferBlob"] = sapkg.code.BufferBlob; 3.27 @@ -812,10 +822,8 @@ 3.28 vmType2Class["SafepointBlob"] = sapkg.code.SafepointBlob; 3.29 vmType2Class["C2IAdapter"] = sapkg.code.C2IAdapter; 3.30 vmType2Class["DeoptimizationBlob"] = sapkg.code.DeoptimizationBlob; 3.31 -vmType2Class["ExceptionBlob"] = sapkg.code.ExceptionBlob; 3.32 vmType2Class["I2CAdapter"] = sapkg.code.I2CAdapter; 3.33 vmType2Class["OSRAdapter"] = sapkg.code.OSRAdapter; 3.34 -vmType2Class["UncommonTrapBlob"] = sapkg.code.UncommonTrapBlob; 3.35 vmType2Class["PCDesc"] = sapkg.code.PCDesc; 3.36 3.37 // interpreter
4.1 --- a/src/cpu/ppc/vm/assembler_ppc.hpp Fri Oct 27 20:39:22 2017 +0100 4.2 +++ b/src/cpu/ppc/vm/assembler_ppc.hpp Thu Nov 02 11:31:01 2017 -0700 4.3 @@ -1180,6 +1180,8 @@ 4.4 inline void mullw_( Register d, Register a, Register b); 4.5 inline void mulhw( Register d, Register a, Register b); 4.6 inline void mulhw_( Register d, Register a, Register b); 4.7 + inline void mulhwu( Register d, Register a, Register b); 4.8 + inline void mulhwu_(Register d, Register a, Register b); 4.9 inline void mulhd( Register d, Register a, Register b); 4.10 inline void mulhd_( Register d, Register a, Register b); 4.11 inline void mulhdu( Register d, Register a, Register b);
5.1 --- a/src/cpu/ppc/vm/assembler_ppc.inline.hpp Fri Oct 27 20:39:22 2017 +0100 5.2 +++ b/src/cpu/ppc/vm/assembler_ppc.inline.hpp Thu Nov 02 11:31:01 2017 -0700 5.3 @@ -109,6 +109,8 @@ 5.4 inline void Assembler::mullw_( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); } 5.5 inline void Assembler::mulhw( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); } 5.6 inline void Assembler::mulhw_( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); } 5.7 +inline void Assembler::mulhwu( Register d, Register a, Register b) { emit_int32(MULHWU_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); } 5.8 +inline void Assembler::mulhwu_(Register d, Register a, Register b) { emit_int32(MULHWU_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); } 5.9 inline void Assembler::mulhd( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); } 5.10 inline void Assembler::mulhd_( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); } 5.11 inline void Assembler::mulhdu( Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
6.1 --- a/src/cpu/ppc/vm/c2_init_ppc.cpp Fri Oct 27 20:39:22 2017 +0100 6.2 +++ b/src/cpu/ppc/vm/c2_init_ppc.cpp Thu Nov 02 11:31:01 2017 -0700 6.3 @@ -45,4 +45,10 @@ 6.4 FLAG_SET_ERGO(bool, InsertEndGroupPPC64, true); 6.5 } 6.6 } 6.7 + 6.8 + if (OptimizeFill) { 6.9 + warning("OptimizeFill is not supported on this CPU."); 6.10 + FLAG_SET_DEFAULT(OptimizeFill, false); 6.11 + } 6.12 + 6.13 }
7.1 --- a/src/cpu/ppc/vm/ppc.ad Fri Oct 27 20:39:22 2017 +0100 7.2 +++ b/src/cpu/ppc/vm/ppc.ad Thu Nov 02 11:31:01 2017 -0700 7.3 @@ -1,6 +1,6 @@ 7.4 // 7.5 // Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. 7.6 -// Copyright 2012, 2014 SAP AG. All rights reserved. 7.7 +// Copyright (c) 2012, 2017 SAP SE. All rights reserved. 7.8 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7.9 // 7.10 // This code is free software; you can redistribute it and/or modify it 7.11 @@ -8610,6 +8610,44 @@ 7.12 ins_pipe(pipe_class_default); 7.13 %} 7.14 7.15 +// Bitfield Extract: URShiftI + AndI 7.16 +instruct andI_urShiftI_regI_immI_immIpow2minus1(iRegIdst dst, iRegIsrc src1, immI src2, immIpow2minus1 src3) %{ 7.17 + match(Set dst (AndI (URShiftI src1 src2) src3)); 7.18 + 7.19 + format %{ "EXTRDI $dst, $src1, shift=$src2, mask=$src3 \t// int bitfield extract" %} 7.20 + size(4); 7.21 + ins_encode %{ 7.22 + // TODO: PPC port $archOpcode(ppc64Opcode_rldicl); 7.23 + int rshift = ($src2$$constant) & 0x1f; 7.24 + int length = log2_long(((jlong) $src3$$constant) + 1); 7.25 + if (rshift + length > 32) { 7.26 + // if necessary, adjust mask to omit rotated bits. 7.27 + length = 32 - rshift; 7.28 + } 7.29 + __ extrdi($dst$$Register, $src1$$Register, length, 64 - (rshift + length)); 7.30 + %} 7.31 + ins_pipe(pipe_class_default); 7.32 +%} 7.33 + 7.34 +// Bitfield Extract: URShiftL + AndL 7.35 +instruct andL_urShiftL_regL_immI_immLpow2minus1(iRegLdst dst, iRegLsrc src1, immI src2, immLpow2minus1 src3) %{ 7.36 + match(Set dst (AndL (URShiftL src1 src2) src3)); 7.37 + 7.38 + format %{ "EXTRDI $dst, $src1, shift=$src2, mask=$src3 \t// long bitfield extract" %} 7.39 + size(4); 7.40 + ins_encode %{ 7.41 + // TODO: PPC port $archOpcode(ppc64Opcode_rldicl); 7.42 + int rshift = ($src2$$constant) & 0x3f; 7.43 + int length = log2_long(((jlong) $src3$$constant) + 1); 7.44 + if (rshift + length > 64) { 7.45 + // if necessary, adjust mask to omit rotated bits. 7.46 + length = 64 - rshift; 7.47 + } 7.48 + __ extrdi($dst$$Register, $src1$$Register, length, 64 - (rshift + length)); 7.49 + %} 7.50 + ins_pipe(pipe_class_default); 7.51 +%} 7.52 + 7.53 instruct sxtI_reg(iRegIdst dst, iRegIsrc src) %{ 7.54 match(Set dst (ConvL2I (ConvI2L src))); 7.55 7.56 @@ -8889,6 +8927,19 @@ 7.57 ins_pipe(pipe_class_default); 7.58 %} 7.59 7.60 +// Left shifted Immediate And 7.61 +instruct andI_reg_immIhi16(iRegIdst dst, iRegIsrc src1, immIhi16 src2, flagsRegCR0 cr0) %{ 7.62 + match(Set dst (AndI src1 src2)); 7.63 + effect(KILL cr0); 7.64 + format %{ "ANDIS $dst, $src1, $src2.hi" %} 7.65 + size(4); 7.66 + ins_encode %{ 7.67 + // TODO: PPC port $archOpcode(ppc64Opcode_andis_); 7.68 + __ andis_($dst$$Register, $src1$$Register, (int)((unsigned short)(($src2$$constant & 0xFFFF0000) >> 16))); 7.69 + %} 7.70 + ins_pipe(pipe_class_default); 7.71 +%} 7.72 + 7.73 // Immediate And 7.74 instruct andI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2, flagsRegCR0 cr0) %{ 7.75 match(Set dst (AndI src1 src2)); 7.76 @@ -10571,6 +10622,17 @@ 7.77 ins_pipe(pipe_class_compare); 7.78 %} 7.79 7.80 +instruct cmpP_reg_null(flagsReg crx, iRegP_N2P src1, immP_0or1 src2) %{ 7.81 + match(Set crx (CmpP src1 src2)); 7.82 + format %{ "CMPLDI $crx, $src1, $src2 \t// ptr" %} 7.83 + size(4); 7.84 + ins_encode %{ 7.85 + // TODO: PPC port $archOpcode(ppc64Opcode_cmpl); 7.86 + __ cmpldi($crx$$CondRegister, $src1$$Register, (int)((short)($src2$$constant & 0xFFFF))); 7.87 + %} 7.88 + ins_pipe(pipe_class_compare); 7.89 +%} 7.90 + 7.91 // Used in postalloc expand. 7.92 instruct cmpP_reg_imm16(flagsReg crx, iRegPsrc src1, immL16 src2) %{ 7.93 // This match rule prevents reordering of node before a safepoint.
8.1 --- a/src/cpu/ppc/vm/sharedRuntime_ppc.cpp Fri Oct 27 20:39:22 2017 +0100 8.2 +++ b/src/cpu/ppc/vm/sharedRuntime_ppc.cpp Thu Nov 02 11:31:01 2017 -0700 8.3 @@ -42,6 +42,8 @@ 8.4 #include "opto/runtime.hpp" 8.5 #endif 8.6 8.7 +#include <alloca.h> 8.8 + 8.9 #define __ masm-> 8.10 8.11 #ifdef PRODUCT 8.12 @@ -3268,3 +3270,245 @@ 8.13 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, 8.14 oop_maps, true); 8.15 } 8.16 + 8.17 + 8.18 +//------------------------------Montgomery multiplication------------------------ 8.19 +// 8.20 + 8.21 +// Subtract 0:b from carry:a. Return carry. 8.22 +static unsigned long 8.23 +sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) { 8.24 + long i = 0; 8.25 + unsigned long tmp, tmp2; 8.26 + __asm__ __volatile__ ( 8.27 + "subfc %[tmp], %[tmp], %[tmp] \n" // pre-set CA 8.28 + "mtctr %[len] \n" 8.29 + "0: \n" 8.30 + "ldx %[tmp], %[i], %[a] \n" 8.31 + "ldx %[tmp2], %[i], %[b] \n" 8.32 + "subfe %[tmp], %[tmp2], %[tmp] \n" // subtract extended 8.33 + "stdx %[tmp], %[i], %[a] \n" 8.34 + "addi %[i], %[i], 8 \n" 8.35 + "bdnz 0b \n" 8.36 + "addme %[tmp], %[carry] \n" // carry + CA - 1 8.37 + : [i]"+b"(i), [tmp]"=&r"(tmp), [tmp2]"=&r"(tmp2) 8.38 + : [a]"r"(a), [b]"r"(b), [carry]"r"(carry), [len]"r"(len) 8.39 + : "ctr", "xer", "memory" 8.40 + ); 8.41 + return tmp; 8.42 +} 8.43 + 8.44 +// Multiply (unsigned) Long A by Long B, accumulating the double- 8.45 +// length result into the accumulator formed of T0, T1, and T2. 8.46 +inline void MACC(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 8.47 + unsigned long hi, lo; 8.48 + __asm__ __volatile__ ( 8.49 + "mulld %[lo], %[A], %[B] \n" 8.50 + "mulhdu %[hi], %[A], %[B] \n" 8.51 + "addc %[T0], %[T0], %[lo] \n" 8.52 + "adde %[T1], %[T1], %[hi] \n" 8.53 + "addze %[T2], %[T2] \n" 8.54 + : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 8.55 + : [A]"r"(A), [B]"r"(B) 8.56 + : "xer" 8.57 + ); 8.58 +} 8.59 + 8.60 +// As above, but add twice the double-length result into the 8.61 +// accumulator. 8.62 +inline void MACC2(unsigned long A, unsigned long B, unsigned long &T0, unsigned long &T1, unsigned long &T2) { 8.63 + unsigned long hi, lo; 8.64 + __asm__ __volatile__ ( 8.65 + "mulld %[lo], %[A], %[B] \n" 8.66 + "mulhdu %[hi], %[A], %[B] \n" 8.67 + "addc %[T0], %[T0], %[lo] \n" 8.68 + "adde %[T1], %[T1], %[hi] \n" 8.69 + "addze %[T2], %[T2] \n" 8.70 + "addc %[T0], %[T0], %[lo] \n" 8.71 + "adde %[T1], %[T1], %[hi] \n" 8.72 + "addze %[T2], %[T2] \n" 8.73 + : [hi]"=&r"(hi), [lo]"=&r"(lo), [T0]"+r"(T0), [T1]"+r"(T1), [T2]"+r"(T2) 8.74 + : [A]"r"(A), [B]"r"(B) 8.75 + : "xer" 8.76 + ); 8.77 +} 8.78 + 8.79 +// Fast Montgomery multiplication. The derivation of the algorithm is 8.80 +// in "A Cryptographic Library for the Motorola DSP56000, 8.81 +// Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237". 8.82 +static void 8.83 +montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], 8.84 + unsigned long m[], unsigned long inv, int len) { 8.85 + unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 8.86 + int i; 8.87 + 8.88 + assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 8.89 + 8.90 + for (i = 0; i < len; i++) { 8.91 + int j; 8.92 + for (j = 0; j < i; j++) { 8.93 + MACC(a[j], b[i-j], t0, t1, t2); 8.94 + MACC(m[j], n[i-j], t0, t1, t2); 8.95 + } 8.96 + MACC(a[i], b[0], t0, t1, t2); 8.97 + m[i] = t0 * inv; 8.98 + MACC(m[i], n[0], t0, t1, t2); 8.99 + 8.100 + assert(t0 == 0, "broken Montgomery multiply"); 8.101 + 8.102 + t0 = t1; t1 = t2; t2 = 0; 8.103 + } 8.104 + 8.105 + for (i = len; i < 2*len; i++) { 8.106 + int j; 8.107 + for (j = i-len+1; j < len; j++) { 8.108 + MACC(a[j], b[i-j], t0, t1, t2); 8.109 + MACC(m[j], n[i-j], t0, t1, t2); 8.110 + } 8.111 + m[i-len] = t0; 8.112 + t0 = t1; t1 = t2; t2 = 0; 8.113 + } 8.114 + 8.115 + while (t0) { 8.116 + t0 = sub(m, n, t0, len); 8.117 + } 8.118 +} 8.119 + 8.120 +// Fast Montgomery squaring. This uses asymptotically 25% fewer 8.121 +// multiplies so it should be up to 25% faster than Montgomery 8.122 +// multiplication. However, its loop control is more complex and it 8.123 +// may actually run slower on some machines. 8.124 +static void 8.125 +montgomery_square(unsigned long a[], unsigned long n[], 8.126 + unsigned long m[], unsigned long inv, int len) { 8.127 + unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator 8.128 + int i; 8.129 + 8.130 + assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); 8.131 + 8.132 + for (i = 0; i < len; i++) { 8.133 + int j; 8.134 + int end = (i+1)/2; 8.135 + for (j = 0; j < end; j++) { 8.136 + MACC2(a[j], a[i-j], t0, t1, t2); 8.137 + MACC(m[j], n[i-j], t0, t1, t2); 8.138 + } 8.139 + if ((i & 1) == 0) { 8.140 + MACC(a[j], a[j], t0, t1, t2); 8.141 + } 8.142 + for (; j < i; j++) { 8.143 + MACC(m[j], n[i-j], t0, t1, t2); 8.144 + } 8.145 + m[i] = t0 * inv; 8.146 + MACC(m[i], n[0], t0, t1, t2); 8.147 + 8.148 + assert(t0 == 0, "broken Montgomery square"); 8.149 + 8.150 + t0 = t1; t1 = t2; t2 = 0; 8.151 + } 8.152 + 8.153 + for (i = len; i < 2*len; i++) { 8.154 + int start = i-len+1; 8.155 + int end = start + (len - start)/2; 8.156 + int j; 8.157 + for (j = start; j < end; j++) { 8.158 + MACC2(a[j], a[i-j], t0, t1, t2); 8.159 + MACC(m[j], n[i-j], t0, t1, t2); 8.160 + } 8.161 + if ((i & 1) == 0) { 8.162 + MACC(a[j], a[j], t0, t1, t2); 8.163 + } 8.164 + for (; j < len; j++) { 8.165 + MACC(m[j], n[i-j], t0, t1, t2); 8.166 + } 8.167 + m[i-len] = t0; 8.168 + t0 = t1; t1 = t2; t2 = 0; 8.169 + } 8.170 + 8.171 + while (t0) { 8.172 + t0 = sub(m, n, t0, len); 8.173 + } 8.174 +} 8.175 + 8.176 +// The threshold at which squaring is advantageous was determined 8.177 +// experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. 8.178 +// Doesn't seem to be relevant for Power8 so we use the same value. 8.179 +#define MONTGOMERY_SQUARING_THRESHOLD 64 8.180 + 8.181 +// Copy len longwords from s to d, word-swapping as we go. The 8.182 +// destination array is reversed. 8.183 +static void reverse_words(unsigned long *s, unsigned long *d, int len) { 8.184 + d += len; 8.185 + while(len-- > 0) { 8.186 + d--; 8.187 + unsigned long s_val = *s; 8.188 + // Swap words in a longword on little endian machines. 8.189 +#ifdef VM_LITTLE_ENDIAN 8.190 + s_val = (s_val << 32) | (s_val >> 32); 8.191 +#endif 8.192 + *d = s_val; 8.193 + s++; 8.194 + } 8.195 +} 8.196 + 8.197 +void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, 8.198 + jint len, jlong inv, 8.199 + jint *m_ints) { 8.200 + assert(len % 2 == 0, "array length in montgomery_multiply must be even"); 8.201 + int longwords = len/2; 8.202 + assert(longwords > 0, "unsupported"); 8.203 + 8.204 + // Make very sure we don't use so much space that the stack might 8.205 + // overflow. 512 jints corresponds to an 16384-bit integer and 8.206 + // will use here a total of 8k bytes of stack space. 8.207 + int total_allocation = longwords * sizeof (unsigned long) * 4; 8.208 + guarantee(total_allocation <= 8192, "must be"); 8.209 + unsigned long *scratch = (unsigned long *)alloca(total_allocation); 8.210 + 8.211 + // Local scratch arrays 8.212 + unsigned long 8.213 + *a = scratch + 0 * longwords, 8.214 + *b = scratch + 1 * longwords, 8.215 + *n = scratch + 2 * longwords, 8.216 + *m = scratch + 3 * longwords; 8.217 + 8.218 + reverse_words((unsigned long *)a_ints, a, longwords); 8.219 + reverse_words((unsigned long *)b_ints, b, longwords); 8.220 + reverse_words((unsigned long *)n_ints, n, longwords); 8.221 + 8.222 + ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); 8.223 + 8.224 + reverse_words(m, (unsigned long *)m_ints, longwords); 8.225 +} 8.226 + 8.227 +void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, 8.228 + jint len, jlong inv, 8.229 + jint *m_ints) { 8.230 + assert(len % 2 == 0, "array length in montgomery_square must be even"); 8.231 + int longwords = len/2; 8.232 + assert(longwords > 0, "unsupported"); 8.233 + 8.234 + // Make very sure we don't use so much space that the stack might 8.235 + // overflow. 512 jints corresponds to an 16384-bit integer and 8.236 + // will use here a total of 6k bytes of stack space. 8.237 + int total_allocation = longwords * sizeof (unsigned long) * 3; 8.238 + guarantee(total_allocation <= 8192, "must be"); 8.239 + unsigned long *scratch = (unsigned long *)alloca(total_allocation); 8.240 + 8.241 + // Local scratch arrays 8.242 + unsigned long 8.243 + *a = scratch + 0 * longwords, 8.244 + *n = scratch + 1 * longwords, 8.245 + *m = scratch + 2 * longwords; 8.246 + 8.247 + reverse_words((unsigned long *)a_ints, a, longwords); 8.248 + reverse_words((unsigned long *)n_ints, n, longwords); 8.249 + 8.250 + if (len >= MONTGOMERY_SQUARING_THRESHOLD) { 8.251 + ::montgomery_square(a, n, m, (unsigned long)inv, longwords); 8.252 + } else { 8.253 + ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); 8.254 + } 8.255 + 8.256 + reverse_words(m, (unsigned long *)m_ints, longwords); 8.257 +}
9.1 --- a/src/cpu/ppc/vm/stubGenerator_ppc.cpp Fri Oct 27 20:39:22 2017 +0100 9.2 +++ b/src/cpu/ppc/vm/stubGenerator_ppc.cpp Thu Nov 02 11:31:01 2017 -0700 9.3 @@ -2524,6 +2524,14 @@ 9.4 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 9.5 } 9.6 9.7 + if (UseMontgomeryMultiplyIntrinsic) { 9.8 + StubRoutines::_montgomeryMultiply 9.9 + = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 9.10 + } 9.11 + if (UseMontgomerySquareIntrinsic) { 9.12 + StubRoutines::_montgomerySquare 9.13 + = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 9.14 + } 9.15 } 9.16 9.17 public:
10.1 --- a/src/cpu/ppc/vm/templateInterpreter_ppc.cpp Fri Oct 27 20:39:22 2017 +0100 10.2 +++ b/src/cpu/ppc/vm/templateInterpreter_ppc.cpp Thu Nov 02 11:31:01 2017 -0700 10.3 @@ -265,7 +265,7 @@ 10.4 __ cmpdi(CCR0, Rmdo, 0); 10.5 __ beq(CCR0, no_mdo); 10.6 10.7 - // Increment backedge counter in the MDO. 10.8 + // Increment invocation counter in the MDO. 10.9 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 10.10 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 10.11 __ addi(Rscratch2, Rscratch2, increment); 10.12 @@ -277,12 +277,12 @@ 10.13 } 10.14 10.15 // Increment counter in MethodCounters*. 10.16 - const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 10.17 + const int mo_ic_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 10.18 __ bind(no_mdo); 10.19 __ get_method_counters(R19_method, R3_counters, done); 10.20 - __ lwz(Rscratch2, mo_bc_offs, R3_counters); 10.21 + __ lwz(Rscratch2, mo_ic_offs, R3_counters); 10.22 __ addi(Rscratch2, Rscratch2, increment); 10.23 - __ stw(Rscratch2, mo_bc_offs, R3_counters); 10.24 + __ stw(Rscratch2, mo_ic_offs, R3_counters); 10.25 __ load_const_optimized(Rscratch1, mask, R0); 10.26 __ and_(Rscratch1, Rscratch2, Rscratch1); 10.27 __ beq(CCR0, *overflow);
11.1 --- a/src/cpu/ppc/vm/vm_version_ppc.cpp Fri Oct 27 20:39:22 2017 +0100 11.2 +++ b/src/cpu/ppc/vm/vm_version_ppc.cpp Thu Nov 02 11:31:01 2017 -0700 11.3 @@ -201,6 +201,12 @@ 11.4 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 11.5 } 11.6 11.7 + if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 11.8 + UseMontgomeryMultiplyIntrinsic = true; 11.9 + } 11.10 + if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 11.11 + UseMontgomerySquareIntrinsic = true; 11.12 + } 11.13 } 11.14 11.15 void VM_Version::print_features() {
12.1 --- a/src/os/linux/vm/os_linux.cpp Fri Oct 27 20:39:22 2017 +0100 12.2 +++ b/src/os/linux/vm/os_linux.cpp Thu Nov 02 11:31:01 2017 -0700 12.3 @@ -2819,11 +2819,8 @@ 12.4 extern "C" JNIEXPORT void numa_error(char *where) { } 12.5 extern "C" JNIEXPORT int fork1() { return fork(); } 12.6 12.7 - 12.8 -// If we are running with libnuma version > 2, then we should 12.9 -// be trying to use symbols with versions 1.1 12.10 -// If we are running with earlier version, which did not have symbol versions, 12.11 -// we should use the base version. 12.12 +// Handle request to load libnuma symbol version 1.1 (API v1). If it fails 12.13 +// load symbol from base version instead. 12.14 void* os::Linux::libnuma_dlsym(void* handle, const char *name) { 12.15 void *f = dlvsym(handle, name, "libnuma_1.1"); 12.16 if (f == NULL) { 12.17 @@ -2832,6 +2829,12 @@ 12.18 return f; 12.19 } 12.20 12.21 +// Handle request to load libnuma symbol version 1.2 (API v2) only. 12.22 +// Return NULL if the symbol is not defined in this particular version. 12.23 +void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) { 12.24 + return dlvsym(handle, name, "libnuma_1.2"); 12.25 +} 12.26 + 12.27 bool os::Linux::libnuma_init() { 12.28 // sched_getcpu() should be in libc. 12.29 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, 12.30 @@ -2856,6 +2859,8 @@ 12.31 libnuma_dlsym(handle, "numa_tonode_memory"))); 12.32 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, 12.33 libnuma_dlsym(handle, "numa_interleave_memory"))); 12.34 + set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t, 12.35 + libnuma_v2_dlsym(handle, "numa_interleave_memory"))); 12.36 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t, 12.37 libnuma_dlsym(handle, "numa_set_bind_policy"))); 12.38 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t, 12.39 @@ -2975,6 +2980,7 @@ 12.40 os::Linux::numa_available_func_t os::Linux::_numa_available; 12.41 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory; 12.42 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; 12.43 +os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2; 12.44 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy; 12.45 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset; 12.46 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
13.1 --- a/src/os/linux/vm/os_linux.hpp Fri Oct 27 20:39:22 2017 +0100 13.2 +++ b/src/os/linux/vm/os_linux.hpp Thu Nov 02 11:31:01 2017 -0700 13.3 @@ -190,6 +190,8 @@ 13.4 static void libpthread_init(); 13.5 static bool libnuma_init(); 13.6 static void* libnuma_dlsym(void* handle, const char* name); 13.7 + // libnuma v2 (libnuma_1.2) symbols 13.8 + static void* libnuma_v2_dlsym(void* handle, const char* name); 13.9 // Minimum stack size a thread can be created with (allowing 13.10 // the VM to completely create the thread and enter user code) 13.11 static size_t min_stack_allowed; 13.12 @@ -250,6 +252,8 @@ 13.13 typedef int (*numa_available_func_t)(void); 13.14 typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node); 13.15 typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask); 13.16 + typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask); 13.17 + 13.18 typedef void (*numa_set_bind_policy_func_t)(int policy); 13.19 typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n); 13.20 typedef int (*numa_distance_func_t)(int node1, int node2); 13.21 @@ -261,6 +265,7 @@ 13.22 static numa_available_func_t _numa_available; 13.23 static numa_tonode_memory_func_t _numa_tonode_memory; 13.24 static numa_interleave_memory_func_t _numa_interleave_memory; 13.25 + static numa_interleave_memory_v2_func_t _numa_interleave_memory_v2; 13.26 static numa_set_bind_policy_func_t _numa_set_bind_policy; 13.27 static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset; 13.28 static numa_distance_func_t _numa_distance; 13.29 @@ -275,6 +280,7 @@ 13.30 static void set_numa_available(numa_available_func_t func) { _numa_available = func; } 13.31 static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; } 13.32 static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; } 13.33 + static void set_numa_interleave_memory_v2(numa_interleave_memory_v2_func_t func) { _numa_interleave_memory_v2 = func; } 13.34 static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; } 13.35 static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; } 13.36 static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; } 13.37 @@ -296,7 +302,10 @@ 13.38 return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1; 13.39 } 13.40 static void numa_interleave_memory(void *start, size_t size) { 13.41 - if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) { 13.42 + // Use v2 api if available 13.43 + if (_numa_interleave_memory_v2 != NULL && _numa_all_nodes_ptr != NULL) { 13.44 + _numa_interleave_memory_v2(start, size, _numa_all_nodes_ptr); 13.45 + } else if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) { 13.46 _numa_interleave_memory(start, size, _numa_all_nodes); 13.47 } 13.48 }
14.1 --- a/src/os/windows/vm/version.rc Fri Oct 27 20:39:22 2017 +0100 14.2 +++ b/src/os/windows/vm/version.rc Thu Nov 02 11:31:01 2017 -0700 14.3 @@ -36,7 +36,7 @@ 14.4 // 14.5 14.6 VS_VERSION_INFO VERSIONINFO 14.7 - FILEVERSION HS_VER 14.8 + FILEVERSION JDK_VER 14.9 PRODUCTVERSION JDK_VER 14.10 FILEFLAGSMASK 0x3fL 14.11 #ifdef _DEBUG 14.12 @@ -56,7 +56,7 @@ 14.13 BEGIN 14.14 VALUE "CompanyName", XSTR(HS_COMPANY) "\0" 14.15 VALUE "FileDescription", XSTR(HS_FILEDESC) "\0" 14.16 - VALUE "FileVersion", XSTR(HS_DOTVER) "\0" 14.17 + VALUE "FileVersion", XSTR(JDK_DOTVER) "\0" 14.18 VALUE "Full Version", XSTR(HS_BUILD_ID) "\0" 14.19 VALUE "InternalName", XSTR(HS_INTERNAL_NAME) "\0" 14.20 VALUE "LegalCopyright", XSTR(HS_COPYRIGHT) "\0"
15.1 --- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Fri Oct 27 20:39:22 2017 +0100 15.2 +++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Thu Nov 02 11:31:01 2017 -0700 15.3 @@ -442,7 +442,7 @@ 15.4 // is available to us as well 15.5 Sysinfo cpu_info(SI_CPUBRAND); 15.6 bool use_solaris_12_api = cpu_info.valid(); 15.7 - const char* impl; 15.8 + const char* impl = "unknown"; 15.9 int impl_m = 0; 15.10 if (use_solaris_12_api) { 15.11 impl = cpu_info.value(); 15.12 @@ -477,7 +477,7 @@ 15.13 kstat_close(kc); 15.14 } 15.15 } 15.16 - assert(impl_m != 0, err_msg("Unknown CPU implementation %s", impl)); 15.17 + assert(impl_m != 0, err_msg("Unrecognized CPU implementation %s", impl)); 15.18 features |= impl_m; 15.19 15.20 bool is_sun4v = (features & sun4v_m) != 0;
16.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp Fri Oct 27 20:39:22 2017 +0100 16.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Thu Nov 02 11:31:01 2017 -0700 16.3 @@ -1530,7 +1530,7 @@ 16.4 ciMethod* caller = state()->scope()->method(); 16.5 ciMethodData* md = caller->method_data_or_null(); 16.6 ciProfileData* data = md->bci_to_data(invoke_bci); 16.7 - if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) { 16.8 + if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 16.9 bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return(); 16.10 // May not be true in case of an inlined call through a method handle intrinsic. 16.11 if (has_return) { 16.12 @@ -1747,7 +1747,7 @@ 16.13 start = has_receiver ? 1 : 0; 16.14 if (profile_arguments()) { 16.15 ciProfileData* data = method()->method_data()->bci_to_data(bci()); 16.16 - if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) { 16.17 + if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 16.18 n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments(); 16.19 } 16.20 } 16.21 @@ -3313,7 +3313,9 @@ 16.22 // for osr compile, bailout if some requirements are not fulfilled 16.23 if (osr_bci != -1) { 16.24 BlockBegin* osr_block = blm.bci2block()->at(osr_bci); 16.25 - assert(osr_block->is_set(BlockBegin::was_visited_flag),"osr entry must have been visited for osr compile"); 16.26 + if (!osr_block->is_set(BlockBegin::was_visited_flag)) { 16.27 + BAILOUT("osr entry must have been visited for osr compile"); 16.28 + } 16.29 16.30 // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points 16.31 if (!osr_block->state()->stack_is_empty()) { 16.32 @@ -4465,7 +4467,7 @@ 16.33 } 16.34 ciMethodData* md = m->method_data_or_null(); 16.35 ciProfileData* data = md->bci_to_data(invoke_bci); 16.36 - if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) { 16.37 + if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { 16.38 append(new ProfileReturnType(m , invoke_bci, callee, ret)); 16.39 } 16.40 }
17.1 --- a/src/share/vm/c1/c1_Instruction.hpp Fri Oct 27 20:39:22 2017 +0100 17.2 +++ b/src/share/vm/c1/c1_Instruction.hpp Thu Nov 02 11:31:01 2017 -0700 17.3 @@ -1568,7 +1568,7 @@ 17.4 set_needs_null_check(has_receiver); 17.5 17.6 // some intrinsics can't trap, so don't force them to be pinned 17.7 - if (!can_trap()) { 17.8 + if (!can_trap() && !vmIntrinsics::should_be_pinned(_id)) { 17.9 unpin(PinStateSplitConstructor); 17.10 } 17.11 }
18.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp Fri Oct 27 20:39:22 2017 +0100 18.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Thu Nov 02 11:31:01 2017 -0700 18.3 @@ -3185,50 +3185,52 @@ 18.4 int bci = x->bci_of_invoke(); 18.5 ciMethodData* md = x->method()->method_data_or_null(); 18.6 ciProfileData* data = md->bci_to_data(bci); 18.7 - if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) || 18.8 - (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) { 18.9 - ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset(); 18.10 - int base_offset = md->byte_offset_of_slot(data, extra); 18.11 - LIR_Opr mdp = LIR_OprFact::illegalOpr; 18.12 - ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args(); 18.13 - 18.14 - Bytecodes::Code bc = x->method()->java_code_at_bci(bci); 18.15 - int start = 0; 18.16 - int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments(); 18.17 - if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) { 18.18 - // first argument is not profiled at call (method handle invoke) 18.19 - assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected"); 18.20 - start = 1; 18.21 + if (data != NULL) { 18.22 + if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) || 18.23 + (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) { 18.24 + ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset(); 18.25 + int base_offset = md->byte_offset_of_slot(data, extra); 18.26 + LIR_Opr mdp = LIR_OprFact::illegalOpr; 18.27 + ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args(); 18.28 + 18.29 + Bytecodes::Code bc = x->method()->java_code_at_bci(bci); 18.30 + int start = 0; 18.31 + int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments(); 18.32 + if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) { 18.33 + // first argument is not profiled at call (method handle invoke) 18.34 + assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected"); 18.35 + start = 1; 18.36 + } 18.37 + ciSignature* callee_signature = x->callee()->signature(); 18.38 + // method handle call to virtual method 18.39 + bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc); 18.40 + ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL); 18.41 + 18.42 + bool ignored_will_link; 18.43 + ciSignature* signature_at_call = NULL; 18.44 + x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); 18.45 + ciSignatureStream signature_at_call_stream(signature_at_call); 18.46 + 18.47 + // if called through method handle invoke, some arguments may have been popped 18.48 + for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) { 18.49 + int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset()); 18.50 + ciKlass* exact = profile_type(md, base_offset, off, 18.51 + args->type(i), x->profiled_arg_at(i+start), mdp, 18.52 + !x->arg_needs_null_check(i+start), 18.53 + signature_at_call_stream.next_klass(), callee_signature_stream.next_klass()); 18.54 + if (exact != NULL) { 18.55 + md->set_argument_type(bci, i, exact); 18.56 + } 18.57 + } 18.58 + } else { 18.59 +#ifdef ASSERT 18.60 + Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke()); 18.61 + int n = x->nb_profiled_args(); 18.62 + assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() || 18.63 + (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))), 18.64 + "only at JSR292 bytecodes"); 18.65 +#endif 18.66 } 18.67 - ciSignature* callee_signature = x->callee()->signature(); 18.68 - // method handle call to virtual method 18.69 - bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc); 18.70 - ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL); 18.71 - 18.72 - bool ignored_will_link; 18.73 - ciSignature* signature_at_call = NULL; 18.74 - x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); 18.75 - ciSignatureStream signature_at_call_stream(signature_at_call); 18.76 - 18.77 - // if called through method handle invoke, some arguments may have been popped 18.78 - for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) { 18.79 - int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset()); 18.80 - ciKlass* exact = profile_type(md, base_offset, off, 18.81 - args->type(i), x->profiled_arg_at(i+start), mdp, 18.82 - !x->arg_needs_null_check(i+start), 18.83 - signature_at_call_stream.next_klass(), callee_signature_stream.next_klass()); 18.84 - if (exact != NULL) { 18.85 - md->set_argument_type(bci, i, exact); 18.86 - } 18.87 - } 18.88 - } else { 18.89 -#ifdef ASSERT 18.90 - Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke()); 18.91 - int n = x->nb_profiled_args(); 18.92 - assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() || 18.93 - (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))), 18.94 - "only at JSR292 bytecodes"); 18.95 -#endif 18.96 } 18.97 } 18.98 } 18.99 @@ -3319,24 +3321,26 @@ 18.100 int bci = x->bci_of_invoke(); 18.101 ciMethodData* md = x->method()->method_data_or_null(); 18.102 ciProfileData* data = md->bci_to_data(bci); 18.103 - assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type"); 18.104 - ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret(); 18.105 - LIR_Opr mdp = LIR_OprFact::illegalOpr; 18.106 - 18.107 - bool ignored_will_link; 18.108 - ciSignature* signature_at_call = NULL; 18.109 - x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); 18.110 - 18.111 - // The offset within the MDO of the entry to update may be too large 18.112 - // to be used in load/store instructions on some platforms. So have 18.113 - // profile_type() compute the address of the profile in a register. 18.114 - ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0, 18.115 - ret->type(), x->ret(), mdp, 18.116 - !x->needs_null_check(), 18.117 - signature_at_call->return_type()->as_klass(), 18.118 - x->callee()->signature()->return_type()->as_klass()); 18.119 - if (exact != NULL) { 18.120 - md->set_return_type(bci, exact); 18.121 + if (data != NULL) { 18.122 + assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type"); 18.123 + ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret(); 18.124 + LIR_Opr mdp = LIR_OprFact::illegalOpr; 18.125 + 18.126 + bool ignored_will_link; 18.127 + ciSignature* signature_at_call = NULL; 18.128 + x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); 18.129 + 18.130 + // The offset within the MDO of the entry to update may be too large 18.131 + // to be used in load/store instructions on some platforms. So have 18.132 + // profile_type() compute the address of the profile in a register. 18.133 + ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0, 18.134 + ret->type(), x->ret(), mdp, 18.135 + !x->needs_null_check(), 18.136 + signature_at_call->return_type()->as_klass(), 18.137 + x->callee()->signature()->return_type()->as_klass()); 18.138 + if (exact != NULL) { 18.139 + md->set_return_type(bci, exact); 18.140 + } 18.141 } 18.142 } 18.143
19.1 --- a/src/share/vm/ci/ciMethodData.cpp Fri Oct 27 20:39:22 2017 +0100 19.2 +++ b/src/share/vm/ci/ciMethodData.cpp Thu Nov 02 11:31:01 2017 -0700 19.3 @@ -391,11 +391,13 @@ 19.4 MethodData* mdo = get_MethodData(); 19.5 if (mdo != NULL) { 19.6 ProfileData* data = mdo->bci_to_data(bci); 19.7 - if (data->is_CallTypeData()) { 19.8 - data->as_CallTypeData()->set_argument_type(i, k->get_Klass()); 19.9 - } else { 19.10 - assert(data->is_VirtualCallTypeData(), "no arguments!"); 19.11 - data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass()); 19.12 + if (data != NULL) { 19.13 + if (data->is_CallTypeData()) { 19.14 + data->as_CallTypeData()->set_argument_type(i, k->get_Klass()); 19.15 + } else { 19.16 + assert(data->is_VirtualCallTypeData(), "no arguments!"); 19.17 + data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass()); 19.18 + } 19.19 } 19.20 } 19.21 } 19.22 @@ -413,11 +415,13 @@ 19.23 MethodData* mdo = get_MethodData(); 19.24 if (mdo != NULL) { 19.25 ProfileData* data = mdo->bci_to_data(bci); 19.26 - if (data->is_CallTypeData()) { 19.27 - data->as_CallTypeData()->set_return_type(k->get_Klass()); 19.28 - } else { 19.29 - assert(data->is_VirtualCallTypeData(), "no arguments!"); 19.30 - data->as_VirtualCallTypeData()->set_return_type(k->get_Klass()); 19.31 + if (data != NULL) { 19.32 + if (data->is_CallTypeData()) { 19.33 + data->as_CallTypeData()->set_return_type(k->get_Klass()); 19.34 + } else { 19.35 + assert(data->is_VirtualCallTypeData(), "no arguments!"); 19.36 + data->as_VirtualCallTypeData()->set_return_type(k->get_Klass()); 19.37 + } 19.38 } 19.39 } 19.40 }
20.1 --- a/src/share/vm/classfile/vmSymbols.cpp Fri Oct 27 20:39:22 2017 +0100 20.2 +++ b/src/share/vm/classfile/vmSymbols.cpp Thu Nov 02 11:31:01 2017 -0700 20.3 @@ -324,6 +324,20 @@ 20.4 return vmIntrinsics::_none; 20.5 } 20.6 20.7 +// Some intrinsics produce different results if they are not pinned 20.8 +bool vmIntrinsics::should_be_pinned(vmIntrinsics::ID id) { 20.9 + assert(id != vmIntrinsics::_none, "must be a VM intrinsic"); 20.10 + switch(id) { 20.11 +#ifdef TRACE_HAVE_INTRINSICS 20.12 + case vmIntrinsics::_counterTime: 20.13 +#endif 20.14 + case vmIntrinsics::_currentTimeMillis: 20.15 + case vmIntrinsics::_nanoTime: 20.16 + return true; 20.17 + default: 20.18 + return false; 20.19 + } 20.20 +} 20.21 20.22 #define VM_INTRINSIC_INITIALIZE(id, klass, name, sig, flags) #id "\0" 20.23 static const char* vm_intrinsic_name_bodies =
21.1 --- a/src/share/vm/classfile/vmSymbols.hpp Fri Oct 27 20:39:22 2017 +0100 21.2 +++ b/src/share/vm/classfile/vmSymbols.hpp Thu Nov 02 11:31:01 2017 -0700 21.3 @@ -1301,6 +1301,8 @@ 21.4 21.5 // Raw conversion: 21.6 static ID for_raw_conversion(BasicType src, BasicType dest); 21.7 + 21.8 + static bool should_be_pinned(vmIntrinsics::ID id); 21.9 }; 21.10 21.11 #endif // SHARE_VM_CLASSFILE_VMSYMBOLS_HPP
22.1 --- a/src/share/vm/compiler/oopMap.cpp Fri Oct 27 20:39:22 2017 +0100 22.2 +++ b/src/share/vm/compiler/oopMap.cpp Thu Nov 02 11:31:01 2017 -0700 22.3 @@ -1,5 +1,5 @@ 22.4 /* 22.5 - * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 22.6 + * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 22.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 22.8 * 22.9 * This code is free software; you can redistribute it and/or modify it 22.10 @@ -389,17 +389,16 @@ 22.11 omv = oms.current(); 22.12 oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map); 22.13 if ( loc != NULL ) { 22.14 + oop *derived_loc = loc; 22.15 oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map); 22.16 - oop *derived_loc = loc; 22.17 - oop val = *base_loc; 22.18 - if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) { 22.19 - // Ignore NULL oops and decoded NULL narrow oops which 22.20 - // equal to Universe::narrow_oop_base when a narrow oop 22.21 - // implicit null check is used in compiled code. 22.22 - // The narrow_oop_base could be NULL or be the address 22.23 - // of the page below heap depending on compressed oops mode. 22.24 - } else 22.25 + // Ignore NULL oops and decoded NULL narrow oops which 22.26 + // equal to Universe::narrow_oop_base when a narrow oop 22.27 + // implicit null check is used in compiled code. 22.28 + // The narrow_oop_base could be NULL or be the address 22.29 + // of the page below heap depending on compressed oops mode. 22.30 + if (base_loc != NULL && *base_loc != (oop)NULL && !Universe::is_narrow_oop_base(*base_loc)) { 22.31 derived_oop_fn(base_loc, derived_loc); 22.32 + } 22.33 } 22.34 oms.next(); 22.35 } while (!oms.is_done());
23.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp Fri Oct 27 20:39:22 2017 +0100 23.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Thu Nov 02 11:31:01 2017 -0700 23.3 @@ -980,6 +980,7 @@ 23.4 // ProfileData is essentially a wrapper around a derived oop, so we 23.5 // need to take the lock before making any ProfileData structures. 23.6 ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(fr.interpreter_frame_mdp())); 23.7 + guarantee(data != NULL, "profile data must be valid"); 23.8 RetData* rdata = data->as_RetData(); 23.9 address new_mdp = rdata->fixup_ret(return_bci, h_mdo); 23.10 fr.interpreter_frame_set_mdp(new_mdp);
24.1 --- a/src/share/vm/opto/ifnode.cpp Fri Oct 27 20:39:22 2017 +0100 24.2 +++ b/src/share/vm/opto/ifnode.cpp Thu Nov 02 11:31:01 2017 -0700 24.3 @@ -234,6 +234,13 @@ 24.4 predicate_proj = proj; 24.5 } 24.6 } 24.7 + 24.8 + // If all the defs of the phi are the same constant, we already have the desired end state. 24.9 + // Skip the split that would create empty phi and region nodes. 24.10 + if((r->req() - req_c) == 1) { 24.11 + return NULL; 24.12 + } 24.13 + 24.14 Node* predicate_c = NULL; 24.15 Node* predicate_x = NULL; 24.16 bool counted_loop = r->is_CountedLoop();
25.1 --- a/src/share/vm/opto/library_call.cpp Fri Oct 27 20:39:22 2017 +0100 25.2 +++ b/src/share/vm/opto/library_call.cpp Thu Nov 02 11:31:01 2017 -0700 25.3 @@ -6068,11 +6068,21 @@ 25.4 Node* n_start = array_element_address(n, intcon(0), n_elem); 25.5 Node* m_start = array_element_address(m, intcon(0), m_elem); 25.6 25.7 - Node* call = make_runtime_call(RC_LEAF, 25.8 - OptoRuntime::montgomeryMultiply_Type(), 25.9 - stubAddr, stubName, TypePtr::BOTTOM, 25.10 - a_start, b_start, n_start, len, inv, top(), 25.11 - m_start); 25.12 + Node* call = NULL; 25.13 + if (CCallingConventionRequiresIntsAsLongs) { 25.14 + Node* len_I2L = ConvI2L(len); 25.15 + call = make_runtime_call(RC_LEAF, 25.16 + OptoRuntime::montgomeryMultiply_Type(), 25.17 + stubAddr, stubName, TypePtr::BOTTOM, 25.18 + a_start, b_start, n_start, len_I2L XTOP, inv, 25.19 + top(), m_start); 25.20 + } else { 25.21 + call = make_runtime_call(RC_LEAF, 25.22 + OptoRuntime::montgomeryMultiply_Type(), 25.23 + stubAddr, stubName, TypePtr::BOTTOM, 25.24 + a_start, b_start, n_start, len, inv, top(), 25.25 + m_start); 25.26 + } 25.27 set_result(m); 25.28 } 25.29 25.30 @@ -6122,11 +6132,22 @@ 25.31 Node* n_start = array_element_address(n, intcon(0), n_elem); 25.32 Node* m_start = array_element_address(m, intcon(0), m_elem); 25.33 25.34 - Node* call = make_runtime_call(RC_LEAF, 25.35 - OptoRuntime::montgomerySquare_Type(), 25.36 - stubAddr, stubName, TypePtr::BOTTOM, 25.37 - a_start, n_start, len, inv, top(), 25.38 - m_start); 25.39 + Node* call = NULL; 25.40 + if (CCallingConventionRequiresIntsAsLongs) { 25.41 + Node* len_I2L = ConvI2L(len); 25.42 + call = make_runtime_call(RC_LEAF, 25.43 + OptoRuntime::montgomerySquare_Type(), 25.44 + stubAddr, stubName, TypePtr::BOTTOM, 25.45 + a_start, n_start, len_I2L XTOP, inv, top(), 25.46 + m_start); 25.47 + } else { 25.48 + call = make_runtime_call(RC_LEAF, 25.49 + OptoRuntime::montgomerySquare_Type(), 25.50 + stubAddr, stubName, TypePtr::BOTTOM, 25.51 + a_start, n_start, len, inv, top(), 25.52 + m_start); 25.53 + } 25.54 + 25.55 set_result(m); 25.56 } 25.57
26.1 --- a/src/share/vm/opto/loopnode.cpp Fri Oct 27 20:39:22 2017 +0100 26.2 +++ b/src/share/vm/opto/loopnode.cpp Thu Nov 02 11:31:01 2017 -0700 26.3 @@ -1,5 +1,5 @@ 26.4 /* 26.5 - * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 26.6 + * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 26.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 26.8 * 26.9 * This code is free software; you can redistribute it and/or modify it 26.10 @@ -1773,6 +1773,12 @@ 26.11 Node *init2 = phi2->in( LoopNode::EntryControl ); 26.12 int stride_con2 = incr2->in(2)->get_int(); 26.13 26.14 + // The ratio of the two strides cannot be represented as an int 26.15 + // if stride_con2 is min_int and stride_con is -1. 26.16 + if (stride_con2 == min_jint && stride_con == -1) { 26.17 + continue; 26.18 + } 26.19 + 26.20 // The general case here gets a little tricky. We want to find the 26.21 // GCD of all possible parallel IV's and make a new IV using this 26.22 // GCD for the loop. Then all possible IVs are simple multiples of
27.1 --- a/src/share/vm/opto/loopnode.hpp Fri Oct 27 20:39:22 2017 +0100 27.2 +++ b/src/share/vm/opto/loopnode.hpp Thu Nov 02 11:31:01 2017 -0700 27.3 @@ -1,5 +1,5 @@ 27.4 /* 27.5 - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 27.6 + * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 27.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 27.8 * 27.9 * This code is free software; you can redistribute it and/or modify it 27.10 @@ -257,19 +257,29 @@ 27.11 Node *incr() const { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } 27.12 Node *limit() const { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; } 27.13 Node *stride() const { Node *tmp = incr (); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; } 27.14 - Node *phi() const { Node *tmp = incr (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } 27.15 Node *init_trip() const { Node *tmp = phi (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } 27.16 int stride_con() const; 27.17 bool stride_is_con() const { Node *tmp = stride (); return (tmp != NULL && tmp->is_Con()); } 27.18 BoolTest::mask test_trip() const { return in(TestValue)->as_Bool()->_test._test; } 27.19 + PhiNode *phi() const { 27.20 + Node *tmp = incr(); 27.21 + if (tmp && tmp->req() == 3) { 27.22 + Node* phi = tmp->in(1); 27.23 + if (phi->is_Phi()) { 27.24 + return phi->as_Phi(); 27.25 + } 27.26 + } 27.27 + return NULL; 27.28 + } 27.29 CountedLoopNode *loopnode() const { 27.30 // The CountedLoopNode that goes with this CountedLoopEndNode may 27.31 // have been optimized out by the IGVN so be cautious with the 27.32 // pattern matching on the graph 27.33 - if (phi() == NULL) { 27.34 + PhiNode* iv_phi = phi(); 27.35 + if (iv_phi == NULL) { 27.36 return NULL; 27.37 } 27.38 - Node *ln = phi()->in(0); 27.39 + Node *ln = iv_phi->in(0); 27.40 if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit() == this) { 27.41 return (CountedLoopNode*)ln; 27.42 }
28.1 --- a/src/share/vm/opto/memnode.cpp Fri Oct 27 20:39:22 2017 +0100 28.2 +++ b/src/share/vm/opto/memnode.cpp Thu Nov 02 11:31:01 2017 -0700 28.3 @@ -1,5 +1,5 @@ 28.4 /* 28.5 - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 28.6 + * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 28.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 28.8 * 28.9 * This code is free software; you can redistribute it and/or modify it 28.10 @@ -55,6 +55,15 @@ 28.11 return calculate_adr_type(adr->bottom_type(), cross_check); 28.12 } 28.13 28.14 +bool MemNode::check_if_adr_maybe_raw(Node* adr) { 28.15 + if (adr != NULL) { 28.16 + if (adr->bottom_type()->base() == Type::RawPtr || adr->bottom_type()->base() == Type::AnyPtr) { 28.17 + return true; 28.18 + } 28.19 + } 28.20 + return false; 28.21 +} 28.22 + 28.23 #ifndef PRODUCT 28.24 void MemNode::dump_spec(outputStream *st) const { 28.25 if (in(Address) == NULL) return; // node is dead 28.26 @@ -503,6 +512,7 @@ 28.27 if (offset == Type::OffsetBot) 28.28 return NULL; // cannot unalias unless there are precise offsets 28.29 28.30 + const bool adr_maybe_raw = check_if_adr_maybe_raw(adr); 28.31 const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr(); 28.32 28.33 intptr_t size_in_bytes = memory_size(); 28.34 @@ -519,6 +529,13 @@ 28.35 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); 28.36 if (st_base == NULL) 28.37 break; // inscrutable pointer 28.38 + 28.39 + // For raw accesses it's not enough to prove that constant offsets don't intersect. 28.40 + // We need the bases to be the equal in order for the offset check to make sense. 28.41 + if ((adr_maybe_raw || check_if_adr_maybe_raw(st_adr)) && st_base != base) { 28.42 + break; 28.43 + } 28.44 + 28.45 if (st_offset != offset && st_offset != Type::OffsetBot) { 28.46 const int MAX_STORE = BytesPerLong; 28.47 if (st_offset >= offset + size_in_bytes ||
29.1 --- a/src/share/vm/opto/memnode.hpp Fri Oct 27 20:39:22 2017 +0100 29.2 +++ b/src/share/vm/opto/memnode.hpp Thu Nov 02 11:31:01 2017 -0700 29.3 @@ -1,5 +1,5 @@ 29.4 /* 29.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 29.6 + * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 29.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 29.8 * 29.9 * This code is free software; you can redistribute it and/or modify it 29.10 @@ -75,6 +75,8 @@ 29.11 debug_only(_adr_type=at; adr_type();) 29.12 } 29.13 29.14 + static bool check_if_adr_maybe_raw(Node* adr); 29.15 + 29.16 public: 29.17 // Helpers for the optimizer. Documented in memnode.cpp. 29.18 static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
30.1 --- a/src/share/vm/opto/parse2.cpp Fri Oct 27 20:39:22 2017 +0100 30.2 +++ b/src/share/vm/opto/parse2.cpp Thu Nov 02 11:31:01 2017 -0700 30.3 @@ -812,6 +812,9 @@ 30.4 ciMethodData* methodData = method()->method_data(); 30.5 if (!methodData->is_mature()) return PROB_UNKNOWN; 30.6 ciProfileData* data = methodData->bci_to_data(bci()); 30.7 + if (data == NULL) { 30.8 + return PROB_UNKNOWN; 30.9 + } 30.10 if (!data->is_JumpData()) return PROB_UNKNOWN; 30.11 30.12 // get taken and not taken values 30.13 @@ -903,8 +906,8 @@ 30.14 // of the OSR-ed method, and we want to deopt to gather more stats. 30.15 // If you have ANY counts, then this loop is simply 'cold' relative 30.16 // to the OSR loop. 30.17 - if (data->as_BranchData()->taken() + 30.18 - data->as_BranchData()->not_taken() == 0 ) { 30.19 + if (data == NULL || 30.20 + (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 30.21 // This is the only way to return PROB_UNKNOWN: 30.22 return PROB_UNKNOWN; 30.23 }
31.1 --- a/src/share/vm/opto/phaseX.cpp Fri Oct 27 20:39:22 2017 +0100 31.2 +++ b/src/share/vm/opto/phaseX.cpp Thu Nov 02 11:31:01 2017 -0700 31.3 @@ -1,5 +1,5 @@ 31.4 /* 31.5 - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 31.6 + * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 31.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 31.8 * 31.9 * This code is free software; you can redistribute it and/or modify it 31.10 @@ -1416,6 +1416,27 @@ 31.11 } 31.12 } 31.13 31.14 +// Return counted loop Phi if as a counted loop exit condition, cmp 31.15 +// compares the the induction variable with n 31.16 +static PhiNode* countedloop_phi_from_cmp(CmpINode* cmp, Node* n) { 31.17 + for (DUIterator_Fast imax, i = cmp->fast_outs(imax); i < imax; i++) { 31.18 + Node* bol = cmp->fast_out(i); 31.19 + for (DUIterator_Fast i2max, i2 = bol->fast_outs(i2max); i2 < i2max; i2++) { 31.20 + Node* iff = bol->fast_out(i2); 31.21 + if (iff->is_CountedLoopEnd()) { 31.22 + CountedLoopEndNode* cle = iff->as_CountedLoopEnd(); 31.23 + if (cle->limit() == n) { 31.24 + PhiNode* phi = cle->phi(); 31.25 + if (phi != NULL) { 31.26 + return phi; 31.27 + } 31.28 + } 31.29 + } 31.30 + } 31.31 + } 31.32 + return NULL; 31.33 +} 31.34 + 31.35 void PhaseIterGVN::add_users_to_worklist( Node *n ) { 31.36 add_users_to_worklist0(n); 31.37 31.38 @@ -1445,18 +1466,7 @@ 31.39 Node* bol = use->raw_out(0); 31.40 if (bol->outcnt() > 0) { 31.41 Node* iff = bol->raw_out(0); 31.42 - if (use_op == Op_CmpI && 31.43 - iff->is_CountedLoopEnd()) { 31.44 - CountedLoopEndNode* cle = iff->as_CountedLoopEnd(); 31.45 - if (cle->limit() == n && cle->phi() != NULL) { 31.46 - // If an opaque node feeds into the limit condition of a 31.47 - // CountedLoop, we need to process the Phi node for the 31.48 - // induction variable when the opaque node is removed: 31.49 - // the range of values taken by the Phi is now known and 31.50 - // so its type is also known. 31.51 - _worklist.push(cle->phi()); 31.52 - } 31.53 - } else if (iff->outcnt() == 2) { 31.54 + if (iff->outcnt() == 2) { 31.55 // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the 31.56 // phi merging either 0 or 1 onto the worklist 31.57 Node* ifproj0 = iff->raw_out(0); 31.58 @@ -1471,6 +1481,15 @@ 31.59 } 31.60 } 31.61 if (use_op == Op_CmpI) { 31.62 + Node* phi = countedloop_phi_from_cmp((CmpINode*)use, n); 31.63 + if (phi != NULL) { 31.64 + // If an opaque node feeds into the limit condition of a 31.65 + // CountedLoop, we need to process the Phi node for the 31.66 + // induction variable when the opaque node is removed: 31.67 + // the range of values taken by the Phi is now known and 31.68 + // so its type is also known. 31.69 + _worklist.push(phi); 31.70 + } 31.71 Node* in1 = use->in(1); 31.72 for (uint i = 0; i < in1->outcnt(); i++) { 31.73 if (in1->raw_out(i)->Opcode() == Op_CastII) { 31.74 @@ -1659,6 +1678,15 @@ 31.75 } 31.76 } 31.77 } 31.78 + // If n is used in a counted loop exit condition then the type 31.79 + // of the counted loop's Phi depends on the type of n. See 31.80 + // PhiNode::Value(). 31.81 + if (m_op == Op_CmpI) { 31.82 + PhiNode* phi = countedloop_phi_from_cmp((CmpINode*)m, n); 31.83 + if (phi != NULL) { 31.84 + worklist.push(phi); 31.85 + } 31.86 + } 31.87 } 31.88 } 31.89 }
32.1 --- a/src/share/vm/opto/phaseX.hpp Fri Oct 27 20:39:22 2017 +0100 32.2 +++ b/src/share/vm/opto/phaseX.hpp Thu Nov 02 11:31:01 2017 -0700 32.3 @@ -1,5 +1,5 @@ 32.4 /* 32.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 32.6 + * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 32.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 32.8 * 32.9 * This code is free software; you can redistribute it and/or modify it 32.10 @@ -417,7 +417,7 @@ 32.11 // Phase for iteratively performing local, pessimistic GVN-style optimizations. 32.12 // and ideal transformations on the graph. 32.13 class PhaseIterGVN : public PhaseGVN { 32.14 - private: 32.15 +private: 32.16 bool _delay_transform; // When true simply register the node when calling transform 32.17 // instead of actually optimizing it 32.18
33.1 --- a/src/share/vm/opto/runtime.cpp Fri Oct 27 20:39:22 2017 +0100 33.2 +++ b/src/share/vm/opto/runtime.cpp Thu Nov 02 11:31:01 2017 -0700 33.3 @@ -1003,12 +1003,20 @@ 33.4 // create input type (domain) 33.5 int num_args = 7; 33.6 int argcnt = num_args; 33.7 + if (CCallingConventionRequiresIntsAsLongs) { 33.8 + argcnt++; // additional placeholder 33.9 + } 33.10 const Type** fields = TypeTuple::fields(argcnt); 33.11 int argp = TypeFunc::Parms; 33.12 fields[argp++] = TypePtr::NOTNULL; // a 33.13 fields[argp++] = TypePtr::NOTNULL; // b 33.14 fields[argp++] = TypePtr::NOTNULL; // n 33.15 - fields[argp++] = TypeInt::INT; // len 33.16 + if (CCallingConventionRequiresIntsAsLongs) { 33.17 + fields[argp++] = TypeLong::LONG; // len 33.18 + fields[argp++] = TypeLong::HALF; // placeholder 33.19 + } else { 33.20 + fields[argp++] = TypeInt::INT; // len 33.21 + } 33.22 fields[argp++] = TypeLong::LONG; // inv 33.23 fields[argp++] = Type::HALF; 33.24 fields[argp++] = TypePtr::NOTNULL; // result 33.25 @@ -1027,11 +1035,19 @@ 33.26 // create input type (domain) 33.27 int num_args = 6; 33.28 int argcnt = num_args; 33.29 + if (CCallingConventionRequiresIntsAsLongs) { 33.30 + argcnt++; // additional placeholder 33.31 + } 33.32 const Type** fields = TypeTuple::fields(argcnt); 33.33 int argp = TypeFunc::Parms; 33.34 fields[argp++] = TypePtr::NOTNULL; // a 33.35 fields[argp++] = TypePtr::NOTNULL; // n 33.36 - fields[argp++] = TypeInt::INT; // len 33.37 + if (CCallingConventionRequiresIntsAsLongs) { 33.38 + fields[argp++] = TypeLong::LONG; // len 33.39 + fields[argp++] = TypeLong::HALF; // placeholder 33.40 + } else { 33.41 + fields[argp++] = TypeInt::INT; // len 33.42 + } 33.43 fields[argp++] = TypeLong::LONG; // inv 33.44 fields[argp++] = Type::HALF; 33.45 fields[argp++] = TypePtr::NOTNULL; // result
34.1 --- a/src/share/vm/opto/type.cpp Fri Oct 27 20:39:22 2017 +0100 34.2 +++ b/src/share/vm/opto/type.cpp Thu Nov 02 11:31:01 2017 -0700 34.3 @@ -1001,21 +1001,10 @@ 34.4 34.5 //------------------------------eq--------------------------------------------- 34.6 // Structural equality check for Type representations 34.7 -bool TypeF::eq( const Type *t ) const { 34.8 - if( g_isnan(_f) || 34.9 - g_isnan(t->getf()) ) { 34.10 - // One or both are NANs. If both are NANs return true, else false. 34.11 - return (g_isnan(_f) && g_isnan(t->getf())); 34.12 - } 34.13 - if (_f == t->getf()) { 34.14 - // (NaN is impossible at this point, since it is not equal even to itself) 34.15 - if (_f == 0.0) { 34.16 - // difference between positive and negative zero 34.17 - if (jint_cast(_f) != jint_cast(t->getf())) return false; 34.18 - } 34.19 - return true; 34.20 - } 34.21 - return false; 34.22 +bool TypeF::eq(const Type *t) const { 34.23 + // Bitwise comparison to distinguish between +/-0. These values must be treated 34.24 + // as different to be consistent with C1 and the interpreter. 34.25 + return (jint_cast(_f) == jint_cast(t->getf())); 34.26 } 34.27 34.28 //------------------------------hash------------------------------------------- 34.29 @@ -1116,21 +1105,10 @@ 34.30 34.31 //------------------------------eq--------------------------------------------- 34.32 // Structural equality check for Type representations 34.33 -bool TypeD::eq( const Type *t ) const { 34.34 - if( g_isnan(_d) || 34.35 - g_isnan(t->getd()) ) { 34.36 - // One or both are NANs. If both are NANs return true, else false. 34.37 - return (g_isnan(_d) && g_isnan(t->getd())); 34.38 - } 34.39 - if (_d == t->getd()) { 34.40 - // (NaN is impossible at this point, since it is not equal even to itself) 34.41 - if (_d == 0.0) { 34.42 - // difference between positive and negative zero 34.43 - if (jlong_cast(_d) != jlong_cast(t->getd())) return false; 34.44 - } 34.45 - return true; 34.46 - } 34.47 - return false; 34.48 +bool TypeD::eq(const Type *t) const { 34.49 + // Bitwise comparison to distinguish between +/-0. These values must be treated 34.50 + // as different to be consistent with C1 and the interpreter. 34.51 + return (jlong_cast(_d) == jlong_cast(t->getd())); 34.52 } 34.53 34.54 //------------------------------hash-------------------------------------------
35.1 --- a/src/share/vm/runtime/arguments.cpp Fri Oct 27 20:39:22 2017 +0100 35.2 +++ b/src/share/vm/runtime/arguments.cpp Thu Nov 02 11:31:01 2017 -0700 35.3 @@ -4156,6 +4156,11 @@ 35.4 warning("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used"); 35.5 } 35.6 35.7 + if (UseOnStackReplacement && !UseLoopCounter) { 35.8 + warning("On-stack-replacement requires loop counters; enabling loop counters"); 35.9 + FLAG_SET_DEFAULT(UseLoopCounter, true); 35.10 + } 35.11 + 35.12 #ifndef PRODUCT 35.13 if (CompileTheWorld) { 35.14 // Force NmethodSweeper to sweep whole CodeCache each time.
36.1 --- a/src/share/vm/runtime/jniHandles.cpp Fri Oct 27 20:39:22 2017 +0100 36.2 +++ b/src/share/vm/runtime/jniHandles.cpp Thu Nov 02 11:31:01 2017 -0700 36.3 @@ -1,5 +1,5 @@ 36.4 /* 36.5 - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 36.6 + * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 36.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 36.8 * 36.9 * This code is free software; you can redistribute it and/or modify it 36.10 @@ -196,8 +196,10 @@ 36.11 int _count; 36.12 public: 36.13 CountHandleClosure(): _count(0) {} 36.14 - virtual void do_oop(oop* unused) { 36.15 - _count++; 36.16 + virtual void do_oop(oop* ooph) { 36.17 + if (*ooph != JNIHandles::deleted_handle()) { 36.18 + _count++; 36.19 + } 36.20 } 36.21 virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 36.22 int count() { return _count; }
37.1 --- a/src/share/vm/runtime/objectMonitor.cpp Fri Oct 27 20:39:22 2017 +0100 37.2 +++ b/src/share/vm/runtime/objectMonitor.cpp Thu Nov 02 11:31:01 2017 -0700 37.3 @@ -381,6 +381,8 @@ 37.4 { // Change java thread status to indicate blocked on monitor enter. 37.5 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); 37.6 37.7 + Self->set_current_pending_monitor(this); 37.8 + 37.9 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt); 37.10 if (JvmtiExport::should_post_monitor_contended_enter()) { 37.11 JvmtiExport::post_monitor_contended_enter(jt, this); 37.12 @@ -395,8 +397,6 @@ 37.13 OSThreadContendState osts(Self->osthread()); 37.14 ThreadBlockInVM tbivm(jt); 37.15 37.16 - Self->set_current_pending_monitor(this); 37.17 - 37.18 // TODO-FIXME: change the following for(;;) loop to straight-line code. 37.19 for (;;) { 37.20 jt->set_suspend_equivalent();
38.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 38.2 +++ b/test/compiler/c1/Test8172751.java Thu Nov 02 11:31:01 2017 -0700 38.3 @@ -0,0 +1,77 @@ 38.4 +/* 38.5 + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 38.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 38.7 + * 38.8 + * This code is free software; you can redistribute it and/or modify it 38.9 + * under the terms of the GNU General Public License version 2 only, as 38.10 + * published by the Free Software Foundation. 38.11 + * 38.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 38.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 38.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 38.15 + * version 2 for more details (a copy is included in the LICENSE file that 38.16 + * accompanied this code). 38.17 + * 38.18 + * You should have received a copy of the GNU General Public License version 38.19 + * 2 along with this work; if not, write to the Free Software Foundation, 38.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 38.21 + * 38.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 38.23 + * or visit www.oracle.com if you need additional information or have any 38.24 + * questions. 38.25 + */ 38.26 + 38.27 +/** 38.28 + * @test 38.29 + * @bug 8172751 38.30 + * @summary OSR compilation at unreachable bci causes C1 crash 38.31 + * 38.32 + * @run main/othervm -XX:-BackgroundCompilation compiler.c1.Test8172751 38.33 + */ 38.34 + 38.35 +package compiler.c1; 38.36 + 38.37 +import java.lang.invoke.MethodHandle; 38.38 +import java.lang.invoke.MethodHandles; 38.39 +import java.lang.invoke.MutableCallSite; 38.40 + 38.41 +public class Test8172751 { 38.42 + private static final MethodHandle CONSTANT_TRUE = MethodHandles.constant(boolean.class, true); 38.43 + private static final MethodHandle CONSTANT_FALSE = MethodHandles.constant(boolean.class, false); 38.44 + private static final MutableCallSite CALL_SITE = new MutableCallSite(CONSTANT_FALSE); 38.45 + private static final int LIMIT = 1_000_000; 38.46 + private static volatile int counter; 38.47 + 38.48 + private static boolean doSomething() { 38.49 + return counter++ < LIMIT; 38.50 + } 38.51 + 38.52 + private static void executeLoop() { 38.53 + /* 38.54 + * Start off with executing the first loop, then change the call site 38.55 + * target so as to switch over to the second loop but continue running 38.56 + * in the first loop. Eventually, an OSR compilation of the first loop 38.57 + * is triggered. Yet C1 will not find the OSR entry, since it will 38.58 + * have optimized out the first loop already during parsing. 38.59 + */ 38.60 + if (CALL_SITE.getTarget() == CONSTANT_FALSE) { 38.61 + int count = 0; 38.62 + while (doSomething()) { 38.63 + if (count++ == 1) { 38.64 + flipSwitch(); 38.65 + } 38.66 + } 38.67 + } else { 38.68 + while (doSomething()) { 38.69 + } 38.70 + } 38.71 + } 38.72 + 38.73 + private static void flipSwitch() { 38.74 + CALL_SITE.setTarget(CONSTANT_TRUE); 38.75 + } 38.76 + 38.77 + public static void main(String[] args) { 38.78 + executeLoop(); 38.79 + } 38.80 +}
39.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 39.2 +++ b/test/compiler/c1/TestPinnedIntrinsics.java Thu Nov 02 11:31:01 2017 -0700 39.3 @@ -0,0 +1,68 @@ 39.4 +/* 39.5 + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 39.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 39.7 + * 39.8 + * This code is free software; you can redistribute it and/or modify it 39.9 + * under the terms of the GNU General Public License version 2 only, as 39.10 + * published by the Free Software Foundation. 39.11 + * 39.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 39.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 39.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 39.15 + * version 2 for more details (a copy is included in the LICENSE file that 39.16 + * accompanied this code). 39.17 + * 39.18 + * You should have received a copy of the GNU General Public License version 39.19 + * 2 along with this work; if not, write to the Free Software Foundation, 39.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 39.21 + * 39.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 39.23 + * or visit www.oracle.com if you need additional information or have any 39.24 + * questions. 39.25 + */ 39.26 + 39.27 +/* 39.28 + * @test 39.29 + * @bug 8184271 39.30 + * @summary Test correct scheduling of System.nanoTime and System.currentTimeMillis C1 intrinsics. 39.31 + * @run main/othervm -XX:TieredStopAtLevel=1 -Xbatch 39.32 + * -XX:CompileCommand=dontinline,compiler.c1.TestPinnedIntrinsics::checkNanoTime 39.33 + * -XX:CompileCommand=dontinline,compiler.c1.TestPinnedIntrinsics::checkCurrentTimeMillis 39.34 + * compiler.c1.TestPinnedIntrinsics 39.35 + */ 39.36 + 39.37 +package compiler.c1; 39.38 + 39.39 +public class TestPinnedIntrinsics { 39.40 + 39.41 + private static void testNanoTime() { 39.42 + long start = System.nanoTime(); 39.43 + long end = System.nanoTime(); 39.44 + checkNanoTime(end - start); 39.45 + } 39.46 + 39.47 + private static void checkNanoTime(long diff) { 39.48 + if (diff < 0) { 39.49 + throw new RuntimeException("testNanoTime failed with " + diff); 39.50 + } 39.51 + } 39.52 + 39.53 + private static void testCurrentTimeMillis() { 39.54 + long start = System.currentTimeMillis(); 39.55 + long end = System.currentTimeMillis(); 39.56 + checkCurrentTimeMillis(end - start); 39.57 + } 39.58 + 39.59 + private static void checkCurrentTimeMillis(long diff) { 39.60 + if (diff < 0) { 39.61 + throw new RuntimeException("testCurrentTimeMillis failed with " + diff); 39.62 + } 39.63 + } 39.64 + 39.65 + public static void main(String[] args) { 39.66 + for (int i = 0; i < 100_000; ++i) { 39.67 + testNanoTime(); 39.68 + testCurrentTimeMillis(); 39.69 + } 39.70 + } 39.71 +}
40.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 40.2 +++ b/test/compiler/c2/FloatingPointFoldingTest.java Thu Nov 02 11:31:01 2017 -0700 40.3 @@ -0,0 +1,163 @@ 40.4 +/* 40.5 + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. 40.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 40.7 + * 40.8 + * This code is free software; you can redistribute it and/or modify it 40.9 + * under the terms of the GNU General Public License version 2 only, as 40.10 + * published by the Free Software Foundation. 40.11 + * 40.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 40.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 40.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 40.15 + * version 2 for more details (a copy is included in the LICENSE file that 40.16 + * accompanied this code). 40.17 + * 40.18 + * You should have received a copy of the GNU General Public License version 40.19 + * 2 along with this work; if not, write to the Free Software Foundation, 40.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 40.21 + * 40.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 40.23 + * or visit www.oracle.com if you need additional information or have any 40.24 + * questions. 40.25 + * 40.26 + */ 40.27 + 40.28 +/** 40.29 + * @test 40.30 + * @bug 8073670 40.31 + * @summary Test that causes C2 to fold two NaNs with different values into a single NaN. 40.32 + * @run main/othervm -XX:-TieredCompilation -Xcomp -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_double_inf -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_double_zero -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_double_nan -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_float_inf -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_float_zero -XX:CompileCommand=compileonly,FloatingPointFoldingTest.test_float_nan FloatingPointFoldingTest 40.33 + */ 40.34 + 40.35 +public class FloatingPointFoldingTest { 40.36 + // Double values. 40.37 + public static final long MINUS_INF_LONGBITS = 0xfff0000000000000L; 40.38 + public static final double DOUBLE_MINUS_INF = Double.longBitsToDouble(MINUS_INF_LONGBITS); 40.39 + 40.40 + public static final long PLUS_INF_LONGBITS = 0x7ff0000000000000L; 40.41 + public static final double DOUBLE_PLUS_INF = Double.longBitsToDouble(PLUS_INF_LONGBITS); 40.42 + 40.43 + public static final long MINUS_ZERO_LONGBITS = 0x8000000000000000L; 40.44 + public static final double DOUBLE_MINUS_ZERO = Double.longBitsToDouble(MINUS_ZERO_LONGBITS); 40.45 + 40.46 + // We need two different NaN values. A floating point number is 40.47 + // considered to be NaN is the sign bit is 0, all exponent bits 40.48 + // are set to 1, and at least one bit of the exponent is not zero. 40.49 + // 40.50 + // As java.lang.Double.NaN is 0x7ff8000000000000L, we use 40.51 + // 0x7ffc000000000000L as a second NaN double value. 40.52 + public static final long NAN_LONGBITS = 0x7ffc000000000000L; 40.53 + public static final double DOUBLE_NAN = Double.longBitsToDouble(NAN_LONGBITS); 40.54 + 40.55 + // Float values. 40.56 + public static final int MINUS_INF_INTBITS = 0xff800000; 40.57 + public static final float FLOAT_MINUS_INF = Float.intBitsToFloat(MINUS_INF_INTBITS); 40.58 + 40.59 + public static final int PLUS_INF_INTBITS = 0x7f800000; 40.60 + public static final float FLOAT_PLUS_INF = Float.intBitsToFloat(PLUS_INF_INTBITS); 40.61 + 40.62 + public static final int MINUS_ZERO_INTBITS = 0x80000000; 40.63 + public static final float FLOAT_MINUS_ZERO = Float.intBitsToFloat(MINUS_ZERO_INTBITS); 40.64 + 40.65 + // As java.lang.Float.NaN is 0x7fc00000, we use 0x7fe00000 40.66 + // as a second NaN float value. 40.67 + public static final int NAN_INTBITS = 0x7fe00000; 40.68 + public static final float FLOAT_NAN = Float.intBitsToFloat(NAN_INTBITS); 40.69 + 40.70 + 40.71 + // Double tests. 40.72 + static void test_double_inf(long[] result) { 40.73 + double d1 = DOUBLE_MINUS_INF; 40.74 + double d2 = DOUBLE_PLUS_INF; 40.75 + result[0] = Double.doubleToRawLongBits(d1); 40.76 + result[1] = Double.doubleToRawLongBits(d2); 40.77 + } 40.78 + 40.79 + static void test_double_zero(long[] result) { 40.80 + double d1 = DOUBLE_MINUS_ZERO; 40.81 + double d2 = 0; 40.82 + result[0] = Double.doubleToRawLongBits(d1); 40.83 + result[1] = Double.doubleToRawLongBits(d2); 40.84 + } 40.85 + 40.86 + static void test_double_nan(long[] result) { 40.87 + double d1 = DOUBLE_NAN; 40.88 + double d2 = Double.NaN; 40.89 + result[0] = Double.doubleToRawLongBits(d1); 40.90 + result[1] = Double.doubleToRawLongBits(d2); 40.91 + } 40.92 + 40.93 + // Float tests. 40.94 + static void test_float_inf(int[] result) { 40.95 + float f1 = FLOAT_MINUS_INF; 40.96 + float f2 = FLOAT_PLUS_INF; 40.97 + result[0] = Float.floatToRawIntBits(f1); 40.98 + result[1] = Float.floatToRawIntBits(f2); 40.99 + } 40.100 + 40.101 + static void test_float_zero(int[] result) { 40.102 + float f1 = FLOAT_MINUS_ZERO; 40.103 + float f2 = 0; 40.104 + result[0] = Float.floatToRawIntBits(f1); 40.105 + result[1] = Float.floatToRawIntBits(f2); 40.106 + } 40.107 + 40.108 + static void test_float_nan(int[] result) { 40.109 + float f1 = FLOAT_NAN; 40.110 + float f2 = Float.NaN; 40.111 + result[0] = Float.floatToRawIntBits(f1); 40.112 + result[1] = Float.floatToRawIntBits(f2); 40.113 + } 40.114 + 40.115 + // Check doubles. 40.116 + static void check_double(long[] result, double d1, double d2) { 40.117 + if (result[0] == result[1]) { 40.118 + throw new RuntimeException("ERROR: Two different double values are considered equal. \n" 40.119 + + String.format("\toriginal values: 0x%x 0x%x\n", Double.doubleToRawLongBits(d1), Double.doubleToRawLongBits(d2)) 40.120 + + String.format("\tvalues after execution of method test(): 0x%x 0x%x", result[0], result[1])); 40.121 + } 40.122 + } 40.123 + 40.124 + // Check floats. 40.125 + static void check_float(int[] result, float f1, float f2) { 40.126 + if (result[0] == result[1]) { 40.127 + throw new RuntimeException("ERROR: Two different float values are considered equal. \n" 40.128 + + String.format("\toriginal values: 0x%x 0x%x\n", Float.floatToRawIntBits(f1), Float.floatToRawIntBits(f2)) 40.129 + + String.format("\tvalues after execution of method test(): 0x%x 0x%x", result[0], result[1])); 40.130 + } 40.131 + } 40.132 + 40.133 + public static void main(String[] args) { 40.134 + // Float tests. 40.135 + 40.136 + int[] iresult = new int[2]; 40.137 + 40.138 + // -Inf and +Inf. 40.139 + test_float_inf(iresult); 40.140 + check_float(iresult, FLOAT_MINUS_INF, FLOAT_PLUS_INF); 40.141 + 40.142 + // 0 and -0. 40.143 + test_float_zero(iresult); 40.144 + check_float(iresult, FLOAT_MINUS_ZERO, 0); 40.145 + 40.146 + // Diferrent NaNs. 40.147 + test_float_nan(iresult); 40.148 + check_float(iresult, FLOAT_NAN, Float.NaN); 40.149 + 40.150 + // Double tests. 40.151 + 40.152 + long[] lresult = new long[2]; 40.153 + 40.154 + // -Inf and +Inf. 40.155 + test_double_inf(lresult); 40.156 + check_double(lresult, DOUBLE_MINUS_INF, DOUBLE_PLUS_INF); 40.157 + 40.158 + // 0 and -0. 40.159 + test_double_zero(lresult); 40.160 + check_double(lresult, DOUBLE_MINUS_ZERO, 0); 40.161 + 40.162 + // Diferrent NaNs. 40.163 + test_double_nan(lresult); 40.164 + check_double(lresult, DOUBLE_NAN, Double.NaN); 40.165 + } 40.166 +}
41.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 41.2 +++ b/test/compiler/loopopts/TestImpossibleIV.java Thu Nov 02 11:31:01 2017 -0700 41.3 @@ -0,0 +1,51 @@ 41.4 +/* 41.5 + * Copyright 2016 Google, Inc. All Rights Reserved. 41.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 41.7 + * 41.8 + * This code is free software; you can redistribute it and/or modify it 41.9 + * under the terms of the GNU General Public License version 2 only, as 41.10 + * published by the Free Software Foundation. 41.11 + * 41.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 41.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 41.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 41.15 + * version 2 for more details (a copy is included in the LICENSE file that 41.16 + * accompanied this code). 41.17 + * 41.18 + * You should have received a copy of the GNU General Public License version 41.19 + * 2 along with this work; if not, write to the Free Software Foundation, 41.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 41.21 + * 41.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 41.23 + * or visit www.oracle.com if you need additional information or have any 41.24 + * questions. 41.25 + */ 41.26 + 41.27 +/* 41.28 + * @test 41.29 + * @bug 8166742 41.30 + * @summary C2 IV elimination throws FPE 41.31 + * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation TestImpossibleIV 41.32 + * @author Chuck Rasbold rasbold@google.com 41.33 + */ 41.34 + 41.35 +/* 41.36 + * Use -XX:-TieredCompilation to get C2 only. 41.37 + * Use -XX:-BackgroundCompilation to wait for compilation before test exit. 41.38 + */ 41.39 + 41.40 +public class TestImpossibleIV { 41.41 + 41.42 + static private void testMethod() { 41.43 + int sum = 0; 41.44 + // A unit count-down loop which has an induction variable with 41.45 + // MIN_VALUE stride. 41.46 + for (int i = 100000; i >= 0; i--) { 41.47 + sum += Integer.MIN_VALUE; 41.48 + } 41.49 + } 41.50 + 41.51 + public static void main(String[] args) { 41.52 + testMethod(); 41.53 + } 41.54 +}
42.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 42.2 +++ b/test/compiler/unsafe/TestRawAliasing.java Thu Nov 02 11:31:01 2017 -0700 42.3 @@ -0,0 +1,70 @@ 42.4 +/* 42.5 + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 42.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 42.7 + * 42.8 + * This code is free software; you can redistribute it and/or modify it 42.9 + * under the terms of the GNU General Public License version 2 only, as 42.10 + * published by the Free Software Foundation. 42.11 + * 42.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 42.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 42.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 42.15 + * version 2 for more details (a copy is included in the LICENSE file that 42.16 + * accompanied this code). 42.17 + * 42.18 + * You should have received a copy of the GNU General Public License version 42.19 + * 2 along with this work; if not, write to the Free Software Foundation, 42.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 42.21 + * 42.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 42.23 + * or visit www.oracle.com if you need additional information or have any 42.24 + * questions. 42.25 + */ 42.26 + 42.27 +/* 42.28 + * @test 42.29 + * @bug 8178047 42.30 + * @run main/othervm -XX:CompileCommand=exclude,*.main -XX:-TieredCompilation -XX:-BackgroundCompilation compiler.unsafe.TestRawAliasing 42.31 + */ 42.32 + 42.33 +package compiler.unsafe; 42.34 + 42.35 +import java.lang.reflect.Field; 42.36 + 42.37 +public class TestRawAliasing { 42.38 + static private final sun.misc.Unsafe UNSAFE; 42.39 + static { 42.40 + try { 42.41 + Field f = sun.misc.Unsafe.class.getDeclaredField("theUnsafe"); 42.42 + f.setAccessible(true); 42.43 + UNSAFE = (sun.misc.Unsafe) f.get(null); 42.44 + } catch (Exception e) { 42.45 + throw new RuntimeException("Unable to get Unsafe instance.", e); 42.46 + } 42.47 + } 42.48 + 42.49 + static private final int OFFSET_X = 50; 42.50 + static private final int OFFSET_Y = 100; 42.51 + 42.52 + private static int test(long base_plus_offset_x, long base_plus_offset_y, int magic_value) { 42.53 + // write 0 to a location 42.54 + UNSAFE.putByte(base_plus_offset_x - OFFSET_X, (byte)0); 42.55 + // write unfoldable value to really the same location with another base 42.56 + UNSAFE.putByte(base_plus_offset_y - OFFSET_Y, (byte)magic_value); 42.57 + // read the value back, should be equal to "unfoldable_value" 42.58 + return UNSAFE.getByte(base_plus_offset_x - OFFSET_X); 42.59 + } 42.60 + 42.61 + private static final int OFF_HEAP_AREA_SIZE = 128; 42.62 + private static final byte MAGIC = 123; 42.63 + 42.64 + // main is excluded from compilation since we don't want the test method to inline and make base values fold 42.65 + public static void main(String... args) { 42.66 + long base = UNSAFE.allocateMemory(OFF_HEAP_AREA_SIZE); 42.67 + for (int i = 0; i < 100_000; i++) { 42.68 + if (test(base + OFFSET_X, base + OFFSET_Y, MAGIC) != MAGIC) { 42.69 + throw new RuntimeException("Unexpected magic value"); 42.70 + } 42.71 + } 42.72 + } 42.73 +}