Tue, 04 Feb 2020 18:13:14 +0800
Merge
1.1 --- a/.hgtags Tue Feb 04 17:38:01 2020 +0800 1.2 +++ b/.hgtags Tue Feb 04 18:13:14 2020 +0800 1.3 @@ -1300,8 +1300,18 @@ 1.4 fa7fe6dae563edaae8a8bbe8ac4bd4fa942bde0c jdk8u232-b03 1.5 921c5ee7965fdfde75f578ddda24d5cd16f124dc jdk8u232-b04 1.6 b13d7942036329f64c77a93cffc25e1b52523a3c jdk8u232-b05 1.7 +760b28d871785cd508239a5f635cfb45451f9202 jdk8u242-b00 1.8 fea2c7f50ce8e6aee1e946eaec7b834193747d82 jdk8u232-b06 1.9 c751303497d539aa85c6373aa0fa85580d3f3044 jdk8u232-b07 1.10 4170228e11e6313e948e6ddcae9af3eed06b1fbe jdk8u232-b08 1.11 +12177d88b89c12c14daa5ad681030d7551e8a5a0 jdk8u232-b09 1.12 +12177d88b89c12c14daa5ad681030d7551e8a5a0 jdk8u232-ga 1.13 2be326848943a7f2359d78d203193045a2d773b3 mips64el-jdk8u232-b10 1.14 17884ee5d05398bbd16013b0e73338aa69479cbf mips64el-jdk8u232-b11 1.15 +ce42ae95d4d671f74246091f89bd101d5bcbfd91 jdk8u242-b01 1.16 +775e2bf92114e41365cc6baf1480c818454256a4 jdk8u242-b02 1.17 +ee19c358e3b8deeda2f64d660a0870df7b1abd49 jdk8u242-b03 1.18 +20258ba5a788da55485c0648bcc073f8ad2c26ef jdk8u242-b04 1.19 +2c1e9fab6964647f4eeffe55fe5592da6399a3ce jdk8u242-b05 1.20 +81ddc1072b923330f84c0ace3124226f63877582 jdk8u242-b06 1.21 +8b80409d5840142a27e274d33948f483a6406a50 jdk8u242-b07
2.1 --- a/THIRD_PARTY_README Tue Feb 04 17:38:01 2020 +0800 2.2 +++ b/THIRD_PARTY_README Tue Feb 04 18:13:14 2020 +0800 2.3 @@ -1334,11 +1334,13 @@ 2.4 2.5 -------------------------------------------------------------------------------- 2.6 2.7 -%% This notice is provided with respect to Joni v1.1.9, which may be 2.8 +%% This notice is provided with respect to Joni v2.1.16, which may be 2.9 included with JRE 8, JDK 8, and OpenJDK 8. 2.10 2.11 --- begin of LICENSE --- 2.12 2.13 +Copyright (c) 2017 JRuby Team 2.14 + 2.15 Permission is hereby granted, free of charge, to any person obtaining a copy 2.16 of this software and associated documentation files (the "Software"), to deal 2.17 in the Software without restriction, including without limitation the rights
3.1 --- a/src/cpu/ppc/vm/vm_version_ppc.cpp Tue Feb 04 17:38:01 2020 +0800 3.2 +++ b/src/cpu/ppc/vm/vm_version_ppc.cpp Tue Feb 04 18:13:14 2020 +0800 3.3 @@ -194,6 +194,11 @@ 3.4 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 3.5 } 3.6 3.7 + if (UseGHASHIntrinsics) { 3.8 + warning("GHASH intrinsics are not available on this CPU"); 3.9 + FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 3.10 + } 3.11 + 3.12 if (has_vshasig()) { 3.13 if (FLAG_IS_DEFAULT(UseSHA)) { 3.14 UseSHA = true;
4.1 --- a/src/cpu/sparc/vm/assembler_sparc.hpp Tue Feb 04 17:38:01 2020 +0800 4.2 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp Tue Feb 04 18:13:14 2020 +0800 4.3 @@ -1,5 +1,5 @@ 4.4 /* 4.5 - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 4.6 + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 4.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4.8 * 4.9 * This code is free software; you can redistribute it and/or modify it 4.10 @@ -129,6 +129,7 @@ 4.11 flog3_op3 = 0x36, 4.12 edge_op3 = 0x36, 4.13 fsrc_op3 = 0x36, 4.14 + xmulx_op3 = 0x36, 4.15 impdep2_op3 = 0x37, 4.16 stpartialf_op3 = 0x37, 4.17 jmpl_op3 = 0x38, 4.18 @@ -220,6 +221,8 @@ 4.19 mdtox_opf = 0x110, 4.20 mstouw_opf = 0x111, 4.21 mstosw_opf = 0x113, 4.22 + xmulx_opf = 0x115, 4.23 + xmulxhi_opf = 0x116, 4.24 mxtod_opf = 0x118, 4.25 mwtos_opf = 0x119, 4.26 4.27 @@ -1212,6 +1215,9 @@ 4.28 void movwtos( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); } 4.29 void movxtod( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); } 4.30 4.31 + void xmulx(Register s1, Register s2, Register d) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulx_opf) | rs2(s2)); } 4.32 + void xmulxhi(Register s1, Register s2, Register d) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulxhi_opf) | rs2(s2)); } 4.33 + 4.34 // Crypto SHA instructions 4.35 4.36 void sha1() { sha1_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha1_opf)); }
5.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Tue Feb 04 17:38:01 2020 +0800 5.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Tue Feb 04 18:13:14 2020 +0800 5.3 @@ -4788,6 +4788,130 @@ 5.4 return start; 5.5 } 5.6 5.7 + /* Single and multi-block ghash operations */ 5.8 + address generate_ghash_processBlocks() { 5.9 + __ align(CodeEntryAlignment); 5.10 + Label L_ghash_loop, L_aligned, L_main; 5.11 + StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 5.12 + address start = __ pc(); 5.13 + 5.14 + Register state = I0; 5.15 + Register subkeyH = I1; 5.16 + Register data = I2; 5.17 + Register len = I3; 5.18 + 5.19 + __ save_frame(0); 5.20 + 5.21 + __ ldx(state, 0, O0); 5.22 + __ ldx(state, 8, O1); 5.23 + 5.24 + // Loop label for multiblock operations 5.25 + __ BIND(L_ghash_loop); 5.26 + 5.27 + // Check if 'data' is unaligned 5.28 + __ andcc(data, 7, G1); 5.29 + __ br(Assembler::zero, false, Assembler::pt, L_aligned); 5.30 + __ delayed()->nop(); 5.31 + 5.32 + Register left_shift = L1; 5.33 + Register right_shift = L2; 5.34 + Register data_ptr = L3; 5.35 + 5.36 + // Get left and right shift values in bits 5.37 + __ sll(G1, LogBitsPerByte, left_shift); 5.38 + __ mov(64, right_shift); 5.39 + __ sub(right_shift, left_shift, right_shift); 5.40 + 5.41 + // Align to read 'data' 5.42 + __ sub(data, G1, data_ptr); 5.43 + 5.44 + // Load first 8 bytes of 'data' 5.45 + __ ldx(data_ptr, 0, O4); 5.46 + __ sllx(O4, left_shift, O4); 5.47 + __ ldx(data_ptr, 8, O5); 5.48 + __ srlx(O5, right_shift, G4); 5.49 + __ bset(G4, O4); 5.50 + 5.51 + // Load second 8 bytes of 'data' 5.52 + __ sllx(O5, left_shift, O5); 5.53 + __ ldx(data_ptr, 16, G4); 5.54 + __ srlx(G4, right_shift, G4); 5.55 + __ ba(L_main); 5.56 + __ delayed()->bset(G4, O5); 5.57 + 5.58 + // If 'data' is aligned, load normally 5.59 + __ BIND(L_aligned); 5.60 + __ ldx(data, 0, O4); 5.61 + __ ldx(data, 8, O5); 5.62 + 5.63 + __ BIND(L_main); 5.64 + __ ldx(subkeyH, 0, O2); 5.65 + __ ldx(subkeyH, 8, O3); 5.66 + 5.67 + __ xor3(O0, O4, O0); 5.68 + __ xor3(O1, O5, O1); 5.69 + 5.70 + __ xmulxhi(O0, O3, G3); 5.71 + __ xmulx(O0, O2, O5); 5.72 + __ xmulxhi(O1, O2, G4); 5.73 + __ xmulxhi(O1, O3, G5); 5.74 + __ xmulx(O0, O3, G1); 5.75 + __ xmulx(O1, O3, G2); 5.76 + __ xmulx(O1, O2, O3); 5.77 + __ xmulxhi(O0, O2, O4); 5.78 + 5.79 + __ mov(0xE1, O0); 5.80 + __ sllx(O0, 56, O0); 5.81 + 5.82 + __ xor3(O5, G3, O5); 5.83 + __ xor3(O5, G4, O5); 5.84 + __ xor3(G5, G1, G1); 5.85 + __ xor3(G1, O3, G1); 5.86 + __ srlx(G2, 63, O1); 5.87 + __ srlx(G1, 63, G3); 5.88 + __ sllx(G2, 63, O3); 5.89 + __ sllx(G2, 58, O2); 5.90 + __ xor3(O3, O2, O2); 5.91 + 5.92 + __ sllx(G1, 1, G1); 5.93 + __ or3(G1, O1, G1); 5.94 + 5.95 + __ xor3(G1, O2, G1); 5.96 + 5.97 + __ sllx(G2, 1, G2); 5.98 + 5.99 + __ xmulxhi(G1, O0, O1); 5.100 + __ xmulx(G1, O0, O2); 5.101 + __ xmulxhi(G2, O0, O3); 5.102 + __ xmulx(G2, O0, G1); 5.103 + 5.104 + __ xor3(O4, O1, O4); 5.105 + __ xor3(O5, O2, O5); 5.106 + __ xor3(O5, O3, O5); 5.107 + 5.108 + __ sllx(O4, 1, O2); 5.109 + __ srlx(O5, 63, O3); 5.110 + 5.111 + __ or3(O2, O3, O0); 5.112 + 5.113 + __ sllx(O5, 1, O1); 5.114 + __ srlx(G1, 63, O2); 5.115 + __ or3(O1, O2, O1); 5.116 + __ xor3(O1, G3, O1); 5.117 + 5.118 + __ deccc(len); 5.119 + __ br(Assembler::notZero, true, Assembler::pt, L_ghash_loop); 5.120 + __ delayed()->add(data, 16, data); 5.121 + 5.122 + __ stx(O0, I0, 0); 5.123 + __ stx(O1, I0, 8); 5.124 + 5.125 + __ ret(); 5.126 + __ delayed()->restore(); 5.127 + 5.128 + return start; 5.129 + } 5.130 + 5.131 void generate_initial() { 5.132 // Generates all stubs and initializes the entry points 5.133 5.134 @@ -4860,6 +4984,10 @@ 5.135 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5.136 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5.137 } 5.138 + // generate GHASH intrinsics code 5.139 + if (UseGHASHIntrinsics) { 5.140 + StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5.141 + } 5.142 5.143 // generate SHA1/SHA256/SHA512 intrinsics code 5.144 if (UseSHA1Intrinsics) {
6.1 --- a/src/cpu/sparc/vm/vm_version_sparc.cpp Tue Feb 04 17:38:01 2020 +0800 6.2 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp Tue Feb 04 18:13:14 2020 +0800 6.3 @@ -286,39 +286,50 @@ 6.4 6.5 // SPARC T4 and above should have support for AES instructions 6.6 if (has_aes()) { 6.7 - if (UseVIS > 2) { // AES intrinsics use MOVxTOd/MOVdTOx which are VIS3 6.8 - if (FLAG_IS_DEFAULT(UseAES)) { 6.9 - FLAG_SET_DEFAULT(UseAES, true); 6.10 + if (FLAG_IS_DEFAULT(UseAES)) { 6.11 + FLAG_SET_DEFAULT(UseAES, true); 6.12 + } 6.13 + if (!UseAES) { 6.14 + if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 6.15 + warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); 6.16 } 6.17 - if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 6.18 - FLAG_SET_DEFAULT(UseAESIntrinsics, true); 6.19 - } 6.20 - // we disable both the AES flags if either of them is disabled on the command line 6.21 - if (!UseAES || !UseAESIntrinsics) { 6.22 - FLAG_SET_DEFAULT(UseAES, false); 6.23 + FLAG_SET_DEFAULT(UseAESIntrinsics, false); 6.24 + } else { 6.25 + // The AES intrinsic stubs require AES instruction support (of course) 6.26 + // but also require VIS3 mode or higher for instructions it use. 6.27 + if (UseVIS > 2) { 6.28 + if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 6.29 + FLAG_SET_DEFAULT(UseAESIntrinsics, true); 6.30 + } 6.31 + } else { 6.32 + if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 6.33 + warning("SPARC AES intrinsics require VIS3 instructions. Intrinsics will be disabled."); 6.34 + } 6.35 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 6.36 } 6.37 - } else { 6.38 - if (UseAES || UseAESIntrinsics) { 6.39 - warning("SPARC AES intrinsics require VIS3 instruction support. Intrinsics will be disabled."); 6.40 - if (UseAES) { 6.41 - FLAG_SET_DEFAULT(UseAES, false); 6.42 - } 6.43 - if (UseAESIntrinsics) { 6.44 - FLAG_SET_DEFAULT(UseAESIntrinsics, false); 6.45 - } 6.46 - } 6.47 } 6.48 } else if (UseAES || UseAESIntrinsics) { 6.49 - warning("AES instructions are not available on this CPU"); 6.50 - if (UseAES) { 6.51 + if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 6.52 + warning("AES instructions are not available on this CPU"); 6.53 FLAG_SET_DEFAULT(UseAES, false); 6.54 } 6.55 - if (UseAESIntrinsics) { 6.56 + if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 6.57 + warning("AES intrinsics are not available on this CPU"); 6.58 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 6.59 } 6.60 } 6.61 6.62 + // GHASH/GCM intrinsics 6.63 + if (has_vis3() && (UseVIS > 2)) { 6.64 + if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 6.65 + UseGHASHIntrinsics = true; 6.66 + } 6.67 + } else if (UseGHASHIntrinsics) { 6.68 + if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) 6.69 + warning("GHASH intrinsics require VIS3 insructions support. Intriniscs will be disabled"); 6.70 + FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 6.71 + } 6.72 + 6.73 // SHA1, SHA256, and SHA512 instructions were added to SPARC T-series at different times 6.74 if (has_sha1() || has_sha256() || has_sha512()) { 6.75 if (UseVIS > 0) { // SHA intrinsics use VIS1 instructions
7.1 --- a/src/cpu/x86/vm/assembler_x86.cpp Tue Feb 04 17:38:01 2020 +0800 7.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp Tue Feb 04 18:13:14 2020 +0800 7.3 @@ -2575,6 +2575,15 @@ 7.4 emit_int8(shift); 7.5 } 7.6 7.7 +void Assembler::pslldq(XMMRegister dst, int shift) { 7.8 + // Shift left 128 bit value in xmm register by number of bytes. 7.9 + NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7.10 + int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66); 7.11 + emit_int8(0x73); 7.12 + emit_int8((unsigned char)(0xC0 | encode)); 7.13 + emit_int8(shift); 7.14 +} 7.15 + 7.16 void Assembler::ptest(XMMRegister dst, Address src) { 7.17 assert(VM_Version::supports_sse4_1(), ""); 7.18 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
8.1 --- a/src/cpu/x86/vm/assembler_x86.hpp Tue Feb 04 17:38:01 2020 +0800 8.2 +++ b/src/cpu/x86/vm/assembler_x86.hpp Tue Feb 04 18:13:14 2020 +0800 8.3 @@ -1527,6 +1527,8 @@ 8.4 8.5 // Shift Right by bytes Logical DoubleQuadword Immediate 8.6 void psrldq(XMMRegister dst, int shift); 8.7 + // Shift Left by bytes Logical DoubleQuadword Immediate 8.8 + void pslldq(XMMRegister dst, int shift); 8.9 8.10 // Logical Compare 128bit 8.11 void ptest(XMMRegister dst, XMMRegister src);
9.1 --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Tue Feb 04 17:38:01 2020 +0800 9.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Tue Feb 04 18:13:14 2020 +0800 9.3 @@ -2719,6 +2719,169 @@ 9.4 return start; 9.5 } 9.6 9.7 + // byte swap x86 long 9.8 + address generate_ghash_long_swap_mask() { 9.9 + __ align(CodeEntryAlignment); 9.10 + StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 9.11 + address start = __ pc(); 9.12 + __ emit_data(0x0b0a0908, relocInfo::none, 0); 9.13 + __ emit_data(0x0f0e0d0c, relocInfo::none, 0); 9.14 + __ emit_data(0x03020100, relocInfo::none, 0); 9.15 + __ emit_data(0x07060504, relocInfo::none, 0); 9.16 + 9.17 + return start; 9.18 + } 9.19 + 9.20 + // byte swap x86 byte array 9.21 + address generate_ghash_byte_swap_mask() { 9.22 + __ align(CodeEntryAlignment); 9.23 + StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 9.24 + address start = __ pc(); 9.25 + __ emit_data(0x0c0d0e0f, relocInfo::none, 0); 9.26 + __ emit_data(0x08090a0b, relocInfo::none, 0); 9.27 + __ emit_data(0x04050607, relocInfo::none, 0); 9.28 + __ emit_data(0x00010203, relocInfo::none, 0); 9.29 + return start; 9.30 + } 9.31 + 9.32 + /* Single and multi-block ghash operations */ 9.33 + address generate_ghash_processBlocks() { 9.34 + assert(UseGHASHIntrinsics, "need GHASH intrinsics and CLMUL support"); 9.35 + __ align(CodeEntryAlignment); 9.36 + Label L_ghash_loop, L_exit; 9.37 + StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 9.38 + address start = __ pc(); 9.39 + 9.40 + const Register state = rdi; 9.41 + const Register subkeyH = rsi; 9.42 + const Register data = rdx; 9.43 + const Register blocks = rcx; 9.44 + 9.45 + const Address state_param(rbp, 8+0); 9.46 + const Address subkeyH_param(rbp, 8+4); 9.47 + const Address data_param(rbp, 8+8); 9.48 + const Address blocks_param(rbp, 8+12); 9.49 + 9.50 + const XMMRegister xmm_temp0 = xmm0; 9.51 + const XMMRegister xmm_temp1 = xmm1; 9.52 + const XMMRegister xmm_temp2 = xmm2; 9.53 + const XMMRegister xmm_temp3 = xmm3; 9.54 + const XMMRegister xmm_temp4 = xmm4; 9.55 + const XMMRegister xmm_temp5 = xmm5; 9.56 + const XMMRegister xmm_temp6 = xmm6; 9.57 + const XMMRegister xmm_temp7 = xmm7; 9.58 + 9.59 + __ enter(); 9.60 + handleSOERegisters(true); // Save registers 9.61 + 9.62 + __ movptr(state, state_param); 9.63 + __ movptr(subkeyH, subkeyH_param); 9.64 + __ movptr(data, data_param); 9.65 + __ movptr(blocks, blocks_param); 9.66 + 9.67 + __ movdqu(xmm_temp0, Address(state, 0)); 9.68 + __ pshufb(xmm_temp0, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 9.69 + 9.70 + __ movdqu(xmm_temp1, Address(subkeyH, 0)); 9.71 + __ pshufb(xmm_temp1, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 9.72 + 9.73 + __ BIND(L_ghash_loop); 9.74 + __ movdqu(xmm_temp2, Address(data, 0)); 9.75 + __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 9.76 + 9.77 + __ pxor(xmm_temp0, xmm_temp2); 9.78 + 9.79 + // 9.80 + // Multiply with the hash key 9.81 + // 9.82 + __ movdqu(xmm_temp3, xmm_temp0); 9.83 + __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 9.84 + __ movdqu(xmm_temp4, xmm_temp0); 9.85 + __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 9.86 + 9.87 + __ movdqu(xmm_temp5, xmm_temp0); 9.88 + __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 9.89 + __ movdqu(xmm_temp6, xmm_temp0); 9.90 + __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 9.91 + 9.92 + __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 9.93 + 9.94 + __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 9.95 + __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 9.96 + __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 9.97 + __ pxor(xmm_temp3, xmm_temp5); 9.98 + __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 9.99 + // of the carry-less multiplication of 9.100 + // xmm0 by xmm1. 9.101 + 9.102 + // We shift the result of the multiplication by one bit position 9.103 + // to the left to cope for the fact that the bits are reversed. 9.104 + __ movdqu(xmm_temp7, xmm_temp3); 9.105 + __ movdqu(xmm_temp4, xmm_temp6); 9.106 + __ pslld (xmm_temp3, 1); 9.107 + __ pslld(xmm_temp6, 1); 9.108 + __ psrld(xmm_temp7, 31); 9.109 + __ psrld(xmm_temp4, 31); 9.110 + __ movdqu(xmm_temp5, xmm_temp7); 9.111 + __ pslldq(xmm_temp4, 4); 9.112 + __ pslldq(xmm_temp7, 4); 9.113 + __ psrldq(xmm_temp5, 12); 9.114 + __ por(xmm_temp3, xmm_temp7); 9.115 + __ por(xmm_temp6, xmm_temp4); 9.116 + __ por(xmm_temp6, xmm_temp5); 9.117 + 9.118 + // 9.119 + // First phase of the reduction 9.120 + // 9.121 + // Move xmm3 into xmm4, xmm5, xmm7 in order to perform the shifts 9.122 + // independently. 9.123 + __ movdqu(xmm_temp7, xmm_temp3); 9.124 + __ movdqu(xmm_temp4, xmm_temp3); 9.125 + __ movdqu(xmm_temp5, xmm_temp3); 9.126 + __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 9.127 + __ pslld(xmm_temp4, 30); // packed right shift shifting << 30 9.128 + __ pslld(xmm_temp5, 25); // packed right shift shifting << 25 9.129 + __ pxor(xmm_temp7, xmm_temp4); // xor the shifted versions 9.130 + __ pxor(xmm_temp7, xmm_temp5); 9.131 + __ movdqu(xmm_temp4, xmm_temp7); 9.132 + __ pslldq(xmm_temp7, 12); 9.133 + __ psrldq(xmm_temp4, 4); 9.134 + __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 9.135 + 9.136 + // 9.137 + // Second phase of the reduction 9.138 + // 9.139 + // Make 3 copies of xmm3 in xmm2, xmm5, xmm7 for doing these 9.140 + // shift operations. 9.141 + __ movdqu(xmm_temp2, xmm_temp3); 9.142 + __ movdqu(xmm_temp7, xmm_temp3); 9.143 + __ movdqu(xmm_temp5, xmm_temp3); 9.144 + __ psrld(xmm_temp2, 1); // packed left shifting >> 1 9.145 + __ psrld(xmm_temp7, 2); // packed left shifting >> 2 9.146 + __ psrld(xmm_temp5, 7); // packed left shifting >> 7 9.147 + __ pxor(xmm_temp2, xmm_temp7); // xor the shifted versions 9.148 + __ pxor(xmm_temp2, xmm_temp5); 9.149 + __ pxor(xmm_temp2, xmm_temp4); 9.150 + __ pxor(xmm_temp3, xmm_temp2); 9.151 + __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 9.152 + 9.153 + __ decrement(blocks); 9.154 + __ jcc(Assembler::zero, L_exit); 9.155 + __ movdqu(xmm_temp0, xmm_temp6); 9.156 + __ addptr(data, 16); 9.157 + __ jmp(L_ghash_loop); 9.158 + 9.159 + __ BIND(L_exit); 9.160 + // Byte swap 16-byte result 9.161 + __ pshufb(xmm_temp6, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 9.162 + __ movdqu(Address(state, 0), xmm_temp6); // store the result 9.163 + 9.164 + handleSOERegisters(false); // restore registers 9.165 + __ leave(); 9.166 + __ ret(0); 9.167 + return start; 9.168 + } 9.169 + 9.170 /** 9.171 * Arguments: 9.172 * 9.173 @@ -3018,6 +3181,13 @@ 9.174 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt(); 9.175 } 9.176 9.177 + // Generate GHASH intrinsics code 9.178 + if (UseGHASHIntrinsics) { 9.179 + StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 9.180 + StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 9.181 + StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 9.182 + } 9.183 + 9.184 // Safefetch stubs. 9.185 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 9.186 &StubRoutines::_safefetch32_fault_pc,
10.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Feb 04 17:38:01 2020 +0800 10.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Feb 04 18:13:14 2020 +0800 10.3 @@ -3639,6 +3639,175 @@ 10.4 return start; 10.5 } 10.6 10.7 + 10.8 + // byte swap x86 long 10.9 + address generate_ghash_long_swap_mask() { 10.10 + __ align(CodeEntryAlignment); 10.11 + StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 10.12 + address start = __ pc(); 10.13 + __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 10.14 + __ emit_data64(0x0706050403020100, relocInfo::none ); 10.15 + return start; 10.16 + } 10.17 + 10.18 + // byte swap x86 byte array 10.19 + address generate_ghash_byte_swap_mask() { 10.20 + __ align(CodeEntryAlignment); 10.21 + StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 10.22 + address start = __ pc(); 10.23 + __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 10.24 + __ emit_data64(0x0001020304050607, relocInfo::none ); 10.25 + return start; 10.26 + } 10.27 + 10.28 + /* Single and multi-block ghash operations */ 10.29 + address generate_ghash_processBlocks() { 10.30 + __ align(CodeEntryAlignment); 10.31 + Label L_ghash_loop, L_exit; 10.32 + StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 10.33 + address start = __ pc(); 10.34 + 10.35 + const Register state = c_rarg0; 10.36 + const Register subkeyH = c_rarg1; 10.37 + const Register data = c_rarg2; 10.38 + const Register blocks = c_rarg3; 10.39 + 10.40 +#ifdef _WIN64 10.41 + const int XMM_REG_LAST = 10; 10.42 +#endif 10.43 + 10.44 + const XMMRegister xmm_temp0 = xmm0; 10.45 + const XMMRegister xmm_temp1 = xmm1; 10.46 + const XMMRegister xmm_temp2 = xmm2; 10.47 + const XMMRegister xmm_temp3 = xmm3; 10.48 + const XMMRegister xmm_temp4 = xmm4; 10.49 + const XMMRegister xmm_temp5 = xmm5; 10.50 + const XMMRegister xmm_temp6 = xmm6; 10.51 + const XMMRegister xmm_temp7 = xmm7; 10.52 + const XMMRegister xmm_temp8 = xmm8; 10.53 + const XMMRegister xmm_temp9 = xmm9; 10.54 + const XMMRegister xmm_temp10 = xmm10; 10.55 + 10.56 + __ enter(); 10.57 + 10.58 +#ifdef _WIN64 10.59 + // save the xmm registers which must be preserved 6-10 10.60 + __ subptr(rsp, -rsp_after_call_off * wordSize); 10.61 + for (int i = 6; i <= XMM_REG_LAST; i++) { 10.62 + __ movdqu(xmm_save(i), as_XMMRegister(i)); 10.63 + } 10.64 +#endif 10.65 + 10.66 + __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 10.67 + 10.68 + __ movdqu(xmm_temp0, Address(state, 0)); 10.69 + __ pshufb(xmm_temp0, xmm_temp10); 10.70 + 10.71 + 10.72 + __ BIND(L_ghash_loop); 10.73 + __ movdqu(xmm_temp2, Address(data, 0)); 10.74 + __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 10.75 + 10.76 + __ movdqu(xmm_temp1, Address(subkeyH, 0)); 10.77 + __ pshufb(xmm_temp1, xmm_temp10); 10.78 + 10.79 + __ pxor(xmm_temp0, xmm_temp2); 10.80 + 10.81 + // 10.82 + // Multiply with the hash key 10.83 + // 10.84 + __ movdqu(xmm_temp3, xmm_temp0); 10.85 + __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 10.86 + __ movdqu(xmm_temp4, xmm_temp0); 10.87 + __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 10.88 + 10.89 + __ movdqu(xmm_temp5, xmm_temp0); 10.90 + __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 10.91 + __ movdqu(xmm_temp6, xmm_temp0); 10.92 + __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 10.93 + 10.94 + __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 10.95 + 10.96 + __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 10.97 + __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 10.98 + __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 10.99 + __ pxor(xmm_temp3, xmm_temp5); 10.100 + __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 10.101 + // of the carry-less multiplication of 10.102 + // xmm0 by xmm1. 10.103 + 10.104 + // We shift the result of the multiplication by one bit position 10.105 + // to the left to cope for the fact that the bits are reversed. 10.106 + __ movdqu(xmm_temp7, xmm_temp3); 10.107 + __ movdqu(xmm_temp8, xmm_temp6); 10.108 + __ pslld(xmm_temp3, 1); 10.109 + __ pslld(xmm_temp6, 1); 10.110 + __ psrld(xmm_temp7, 31); 10.111 + __ psrld(xmm_temp8, 31); 10.112 + __ movdqu(xmm_temp9, xmm_temp7); 10.113 + __ pslldq(xmm_temp8, 4); 10.114 + __ pslldq(xmm_temp7, 4); 10.115 + __ psrldq(xmm_temp9, 12); 10.116 + __ por(xmm_temp3, xmm_temp7); 10.117 + __ por(xmm_temp6, xmm_temp8); 10.118 + __ por(xmm_temp6, xmm_temp9); 10.119 + 10.120 + // 10.121 + // First phase of the reduction 10.122 + // 10.123 + // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 10.124 + // independently. 10.125 + __ movdqu(xmm_temp7, xmm_temp3); 10.126 + __ movdqu(xmm_temp8, xmm_temp3); 10.127 + __ movdqu(xmm_temp9, xmm_temp3); 10.128 + __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 10.129 + __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 10.130 + __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 10.131 + __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 10.132 + __ pxor(xmm_temp7, xmm_temp9); 10.133 + __ movdqu(xmm_temp8, xmm_temp7); 10.134 + __ pslldq(xmm_temp7, 12); 10.135 + __ psrldq(xmm_temp8, 4); 10.136 + __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 10.137 + 10.138 + // 10.139 + // Second phase of the reduction 10.140 + // 10.141 + // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 10.142 + // shift operations. 10.143 + __ movdqu(xmm_temp2, xmm_temp3); 10.144 + __ movdqu(xmm_temp4, xmm_temp3); 10.145 + __ movdqu(xmm_temp5, xmm_temp3); 10.146 + __ psrld(xmm_temp2, 1); // packed left shifting >> 1 10.147 + __ psrld(xmm_temp4, 2); // packed left shifting >> 2 10.148 + __ psrld(xmm_temp5, 7); // packed left shifting >> 7 10.149 + __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 10.150 + __ pxor(xmm_temp2, xmm_temp5); 10.151 + __ pxor(xmm_temp2, xmm_temp8); 10.152 + __ pxor(xmm_temp3, xmm_temp2); 10.153 + __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 10.154 + 10.155 + __ decrement(blocks); 10.156 + __ jcc(Assembler::zero, L_exit); 10.157 + __ movdqu(xmm_temp0, xmm_temp6); 10.158 + __ addptr(data, 16); 10.159 + __ jmp(L_ghash_loop); 10.160 + 10.161 + __ BIND(L_exit); 10.162 + __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 10.163 + __ movdqu(Address(state, 0), xmm_temp6); // store the result 10.164 + 10.165 +#ifdef _WIN64 10.166 + // restore xmm regs belonging to calling function 10.167 + for (int i = 6; i <= XMM_REG_LAST; i++) { 10.168 + __ movdqu(as_XMMRegister(i), xmm_save(i)); 10.169 + } 10.170 +#endif 10.171 + __ leave(); 10.172 + __ ret(0); 10.173 + return start; 10.174 + } 10.175 + 10.176 /** 10.177 * Arguments: 10.178 * 10.179 @@ -4077,6 +4246,13 @@ 10.180 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 10.181 } 10.182 10.183 + // Generate GHASH intrinsics code 10.184 + if (UseGHASHIntrinsics) { 10.185 + StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 10.186 + StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 10.187 + StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 10.188 + } 10.189 + 10.190 // Safefetch stubs. 10.191 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 10.192 &StubRoutines::_safefetch32_fault_pc,
11.1 --- a/src/cpu/x86/vm/stubRoutines_x86.cpp Tue Feb 04 17:38:01 2020 +0800 11.2 +++ b/src/cpu/x86/vm/stubRoutines_x86.cpp Tue Feb 04 18:13:14 2020 +0800 11.3 @@ -1,5 +1,5 @@ 11.4 /* 11.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 11.6 + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. 11.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 11.8 * 11.9 * This code is free software; you can redistribute it and/or modify it 11.10 @@ -33,6 +33,8 @@ 11.11 11.12 address StubRoutines::x86::_verify_mxcsr_entry = NULL; 11.13 address StubRoutines::x86::_key_shuffle_mask_addr = NULL; 11.14 +address StubRoutines::x86::_ghash_long_swap_mask_addr = NULL; 11.15 +address StubRoutines::x86::_ghash_byte_swap_mask_addr = NULL; 11.16 11.17 uint64_t StubRoutines::x86::_crc_by128_masks[] = 11.18 {
12.1 --- a/src/cpu/x86/vm/stubRoutines_x86.hpp Tue Feb 04 17:38:01 2020 +0800 12.2 +++ b/src/cpu/x86/vm/stubRoutines_x86.hpp Tue Feb 04 18:13:14 2020 +0800 12.3 @@ -1,5 +1,5 @@ 12.4 /* 12.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 12.6 + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. 12.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 12.8 * 12.9 * This code is free software; you can redistribute it and/or modify it 12.10 @@ -36,10 +36,15 @@ 12.11 // masks and table for CRC32 12.12 static uint64_t _crc_by128_masks[]; 12.13 static juint _crc_table[]; 12.14 + // swap mask for ghash 12.15 + static address _ghash_long_swap_mask_addr; 12.16 + static address _ghash_byte_swap_mask_addr; 12.17 12.18 public: 12.19 static address verify_mxcsr_entry() { return _verify_mxcsr_entry; } 12.20 static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; } 12.21 static address crc_by128_masks_addr() { return (address)_crc_by128_masks; } 12.22 + static address ghash_long_swap_mask_addr() { return _ghash_long_swap_mask_addr; } 12.23 + static address ghash_byte_swap_mask_addr() { return _ghash_byte_swap_mask_addr; } 12.24 12.25 #endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
13.1 --- a/src/cpu/x86/vm/vm_version_x86.cpp Tue Feb 04 17:38:01 2020 +0800 13.2 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Tue Feb 04 18:13:14 2020 +0800 13.3 @@ -553,12 +553,36 @@ 13.4 // Use AES instructions if available. 13.5 if (supports_aes()) { 13.6 if (FLAG_IS_DEFAULT(UseAES)) { 13.7 - UseAES = true; 13.8 + FLAG_SET_DEFAULT(UseAES, true); 13.9 } 13.10 - } else if (UseAES) { 13.11 - if (!FLAG_IS_DEFAULT(UseAES)) 13.12 + if (!UseAES) { 13.13 + if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 13.14 + warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled."); 13.15 + } 13.16 + FLAG_SET_DEFAULT(UseAESIntrinsics, false); 13.17 + } else { 13.18 + if (UseSSE > 2) { 13.19 + if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 13.20 + FLAG_SET_DEFAULT(UseAESIntrinsics, true); 13.21 + } 13.22 + } else { 13.23 + // The AES intrinsic stubs require AES instruction support (of course) 13.24 + // but also require sse3 mode or higher for instructions it use. 13.25 + if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 13.26 + warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled."); 13.27 + } 13.28 + FLAG_SET_DEFAULT(UseAESIntrinsics, false); 13.29 + } 13.30 + } 13.31 + } else if (UseAES || UseAESIntrinsics) { 13.32 + if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 13.33 warning("AES instructions are not available on this CPU"); 13.34 - FLAG_SET_DEFAULT(UseAES, false); 13.35 + FLAG_SET_DEFAULT(UseAES, false); 13.36 + } 13.37 + if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 13.38 + warning("AES intrinsics are not available on this CPU"); 13.39 + FLAG_SET_DEFAULT(UseAESIntrinsics, false); 13.40 + } 13.41 } 13.42 13.43 // Use CLMUL instructions if available. 13.44 @@ -582,16 +606,15 @@ 13.45 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 13.46 } 13.47 13.48 - // The AES intrinsic stubs require AES instruction support (of course) 13.49 - // but also require sse3 mode for instructions it use. 13.50 - if (UseAES && (UseSSE > 2)) { 13.51 - if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 13.52 - UseAESIntrinsics = true; 13.53 + // GHASH/GCM intrinsics 13.54 + if (UseCLMUL && (UseSSE > 2)) { 13.55 + if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 13.56 + UseGHASHIntrinsics = true; 13.57 } 13.58 - } else if (UseAESIntrinsics) { 13.59 - if (!FLAG_IS_DEFAULT(UseAESIntrinsics)) 13.60 - warning("AES intrinsics are not available on this CPU"); 13.61 - FLAG_SET_DEFAULT(UseAESIntrinsics, false); 13.62 + } else if (UseGHASHIntrinsics) { 13.63 + if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics)) 13.64 + warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU"); 13.65 + FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 13.66 } 13.67 13.68 if (UseSHA) {
14.1 --- a/src/share/vm/c1/c1_LIR.cpp Tue Feb 04 17:38:01 2020 +0800 14.2 +++ b/src/share/vm/c1/c1_LIR.cpp Tue Feb 04 18:13:14 2020 +0800 14.3 @@ -2370,7 +2370,7 @@ 14.4 // LIR_OpProfileType 14.5 void LIR_OpProfileType::print_instr(outputStream* out) const { 14.6 out->print("exact = "); 14.7 - if (exact_klass() == NULL) { 14.8 + if (exact_klass() == NULL) { 14.9 out->print("unknown"); 14.10 } else { 14.11 exact_klass()->print_name_on(out);
15.1 --- a/src/share/vm/classfile/vmSymbols.hpp Tue Feb 04 17:38:01 2020 +0800 15.2 +++ b/src/share/vm/classfile/vmSymbols.hpp Tue Feb 04 18:13:14 2020 +0800 15.3 @@ -863,6 +863,12 @@ 15.4 do_name( implCompressMB_name, "implCompressMultiBlock0") \ 15.5 do_signature(implCompressMB_signature, "([BII)I") \ 15.6 \ 15.7 + /* support for com.sun.crypto.provider.GHASH */ \ 15.8 + do_class(com_sun_crypto_provider_ghash, "com/sun/crypto/provider/GHASH") \ 15.9 + do_intrinsic(_ghash_processBlocks, com_sun_crypto_provider_ghash, processBlocks_name, ghash_processBlocks_signature, F_S) \ 15.10 + do_name(processBlocks_name, "processBlocks") \ 15.11 + do_signature(ghash_processBlocks_signature, "([BII[J[J)V") \ 15.12 + \ 15.13 /* support for java.util.zip */ \ 15.14 do_class(java_util_zip_CRC32, "java/util/zip/CRC32") \ 15.15 do_intrinsic(_updateCRC32, java_util_zip_CRC32, update_name, int2_int_signature, F_SN) \
16.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Feb 04 17:38:01 2020 +0800 16.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Feb 04 18:13:14 2020 +0800 16.3 @@ -161,6 +161,8 @@ 16.4 } 16.5 _dictionary->set_par_lock(&_parDictionaryAllocLock); 16.6 } 16.7 + 16.8 + _used_stable = 0; 16.9 } 16.10 16.11 // Like CompactibleSpace forward() but always calls cross_threshold() to 16.12 @@ -377,6 +379,14 @@ 16.13 return capacity() - free(); 16.14 } 16.15 16.16 +size_t CompactibleFreeListSpace::used_stable() const { 16.17 + return _used_stable; 16.18 +} 16.19 + 16.20 +void CompactibleFreeListSpace::recalculate_used_stable() { 16.21 + _used_stable = used(); 16.22 +} 16.23 + 16.24 size_t CompactibleFreeListSpace::free() const { 16.25 // "MT-safe, but not MT-precise"(TM), if you will: i.e. 16.26 // if you do this while the structures are in flux you 16.27 @@ -1218,6 +1228,13 @@ 16.28 debug_only(fc->mangleAllocated(size)); 16.29 } 16.30 16.31 + // During GC we do not need to recalculate the stable used value for 16.32 + // every allocation in old gen. It is done once at the end of GC instead 16.33 + // for performance reasons. 16.34 + if (!Universe::heap()->is_gc_active()) { 16.35 + recalculate_used_stable(); 16.36 + } 16.37 + 16.38 return res; 16.39 } 16.40
17.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Feb 04 17:38:01 2020 +0800 17.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Feb 04 18:13:14 2020 +0800 17.3 @@ -148,6 +148,9 @@ 17.4 // Used to keep track of limit of sweep for the space 17.5 HeapWord* _sweep_limit; 17.6 17.7 + // Stable value of used(). 17.8 + size_t _used_stable; 17.9 + 17.10 // Support for compacting cms 17.11 HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 17.12 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); 17.13 @@ -343,6 +346,17 @@ 17.14 // which overestimates the region by returning the entire 17.15 // committed region (this is safe, but inefficient). 17.16 17.17 + // Returns monotonically increasing stable used space bytes for CMS. 17.18 + // This is required for jstat and other memory monitoring tools 17.19 + // that might otherwise see inconsistent used space values during a garbage 17.20 + // collection, promotion or allocation into compactibleFreeListSpace. 17.21 + // The value returned by this function might be smaller than the 17.22 + // actual value. 17.23 + size_t used_stable() const; 17.24 + // Recalculate and cache the current stable used() value. Only to be called 17.25 + // in places where we can be sure that the result is stable. 17.26 + void recalculate_used_stable(); 17.27 + 17.28 // Returns a subregion of the space containing all the objects in 17.29 // the space. 17.30 MemRegion used_region() const {
18.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Feb 04 17:38:01 2020 +0800 18.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Feb 04 18:13:14 2020 +0800 18.3 @@ -869,6 +869,10 @@ 18.4 return _cmsSpace->max_alloc_in_words() * HeapWordSize; 18.5 } 18.6 18.7 +size_t ConcurrentMarkSweepGeneration::used_stable() const { 18.8 + return cmsSpace()->used_stable(); 18.9 +} 18.10 + 18.11 size_t ConcurrentMarkSweepGeneration::max_available() const { 18.12 return free() + _virtual_space.uncommitted_size(); 18.13 } 18.14 @@ -1955,6 +1959,8 @@ 18.15 FreelistLocker z(this); 18.16 MetaspaceGC::compute_new_size(); 18.17 _cmsGen->compute_new_size_free_list(); 18.18 + // recalculate CMS used space after CMS collection 18.19 + _cmsGen->cmsSpace()->recalculate_used_stable(); 18.20 } 18.21 18.22 // A work method used by foreground collection to determine 18.23 @@ -2768,6 +2774,7 @@ 18.24 18.25 _capacity_at_prologue = capacity(); 18.26 _used_at_prologue = used(); 18.27 + _cmsSpace->recalculate_used_stable(); 18.28 18.29 // Delegate to CMScollector which knows how to coordinate between 18.30 // this and any other CMS generations that it is responsible for 18.31 @@ -2837,6 +2844,7 @@ 18.32 _eden_chunk_index = 0; 18.33 18.34 size_t cms_used = _cmsGen->cmsSpace()->used(); 18.35 + _cmsGen->cmsSpace()->recalculate_used_stable(); 18.36 18.37 // update performance counters - this uses a special version of 18.38 // update_counters() that allows the utilization to be passed as a 18.39 @@ -3672,6 +3680,7 @@ 18.40 _collectorState = Marking; 18.41 } 18.42 SpecializationStats::print(); 18.43 + _cmsGen->cmsSpace()->recalculate_used_stable(); 18.44 } 18.45 18.46 void CMSCollector::checkpointRootsInitialWork(bool asynch) { 18.47 @@ -5066,10 +5075,12 @@ 18.48 Mutex::_no_safepoint_check_flag); 18.49 assert(!init_mark_was_synchronous, "but that's impossible!"); 18.50 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false); 18.51 + _cmsGen->cmsSpace()->recalculate_used_stable(); 18.52 } else { 18.53 // already have all the locks 18.54 checkpointRootsFinalWork(asynch, clear_all_soft_refs, 18.55 init_mark_was_synchronous); 18.56 + _cmsGen->cmsSpace()->recalculate_used_stable(); 18.57 } 18.58 verify_work_stacks_empty(); 18.59 verify_overflow_empty(); 18.60 @@ -6368,6 +6379,10 @@ 18.61 // Update heap occupancy information which is used as 18.62 // input to soft ref clearing policy at the next gc. 18.63 Universe::update_heap_info_at_gc(); 18.64 + 18.65 + // recalculate CMS used space after CMS collection 18.66 + _cmsGen->cmsSpace()->recalculate_used_stable(); 18.67 + 18.68 _collectorState = Resizing; 18.69 } 18.70 } else { 18.71 @@ -6467,6 +6482,7 @@ 18.72 // Gather statistics on the young generation collection. 18.73 collector()->stats().record_gc0_end(used()); 18.74 } 18.75 + _cmsSpace->recalculate_used_stable(); 18.76 } 18.77 18.78 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
19.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Tue Feb 04 17:38:01 2020 +0800 19.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Tue Feb 04 18:13:14 2020 +0800 19.3 @@ -1190,6 +1190,7 @@ 19.4 double occupancy() const { return ((double)used())/((double)capacity()); } 19.5 size_t contiguous_available() const; 19.6 size_t unsafe_max_alloc_nogc() const; 19.7 + size_t used_stable() const; 19.8 19.9 // over-rides 19.10 MemRegion used_region() const;
20.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Feb 04 17:38:01 2020 +0800 20.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Feb 04 18:13:14 2020 +0800 20.3 @@ -2520,6 +2520,12 @@ 20.4 } 20.5 } 20.6 } 20.7 + } else if (GC_locker::should_discard(cause, gc_count_before)) { 20.8 + // Return to be consistent with VMOp failure due to another 20.9 + // collection slipping in after our gc_count but before our 20.10 + // request is processed. _gc_locker collections upgraded by 20.11 + // GCLockerInvokesConcurrent are handled above and never discarded. 20.12 + return; 20.13 } else { 20.14 if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc 20.15 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
21.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Feb 04 17:38:01 2020 +0800 21.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Feb 04 18:13:14 2020 +0800 21.3 @@ -376,7 +376,7 @@ 21.4 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 21.5 1U); 21.6 _sizer_kind = SizerMaxAndNewSize; 21.7 - _adaptive_size = _min_desired_young_length == _max_desired_young_length; 21.8 + _adaptive_size = _min_desired_young_length != _max_desired_young_length; 21.9 } else { 21.10 _sizer_kind = SizerNewSizeOnly; 21.11 }
22.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Feb 04 17:38:01 2020 +0800 22.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Feb 04 18:13:14 2020 +0800 22.3 @@ -133,7 +133,11 @@ 22.4 SizerKind _sizer_kind; 22.5 uint _min_desired_young_length; 22.6 uint _max_desired_young_length; 22.7 + 22.8 + // False when using a fixed young generation size due to command-line options, 22.9 + // true otherwise. 22.10 bool _adaptive_size; 22.11 + 22.12 uint calculate_default_min_length(uint new_number_of_heap_regions); 22.13 uint calculate_default_max_length(uint new_number_of_heap_regions); 22.14
23.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Feb 04 17:38:01 2020 +0800 23.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Feb 04 18:13:14 2020 +0800 23.3 @@ -530,6 +530,10 @@ 23.4 full_gc_count = Universe::heap()->total_full_collections(); 23.5 } 23.6 23.7 + if (GC_locker::should_discard(cause, gc_count)) { 23.8 + return; 23.9 + } 23.10 + 23.11 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 23.12 VMThread::execute(&op); 23.13 }
24.1 --- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp Tue Feb 04 17:38:01 2020 +0800 24.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp Tue Feb 04 18:13:14 2020 +0800 24.3 @@ -52,11 +52,16 @@ 24.4 } 24.5 } 24.6 24.7 +static bool is_cause_full(GCCause::Cause cause) { 24.8 + return (cause != GCCause::_gc_locker) && (cause != GCCause::_wb_young_gc) 24.9 + DEBUG_ONLY(&& (cause != GCCause::_scavenge_alot)); 24.10 +} 24.11 + 24.12 // Only used for System.gc() calls 24.13 VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count, 24.14 uint full_gc_count, 24.15 GCCause::Cause gc_cause) : 24.16 - VM_GC_Operation(gc_count, gc_cause, full_gc_count, true /* full */) 24.17 + VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause)) 24.18 { 24.19 } 24.20 24.21 @@ -68,8 +73,7 @@ 24.22 "must be a ParallelScavengeHeap"); 24.23 24.24 GCCauseSetter gccs(heap, _gc_cause); 24.25 - if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc 24.26 - DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) { 24.27 + if (!_full) { 24.28 // If (and only if) the scavenge fails, this will invoke a full gc. 24.29 heap->invoke_scavenge(); 24.30 } else {
25.1 --- a/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp Tue Feb 04 17:38:01 2020 +0800 25.2 +++ b/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp Tue Feb 04 18:13:14 2020 +0800 25.3 @@ -63,7 +63,7 @@ 25.4 } 25.5 25.6 inline void update_used() { 25.7 - _used->set_value(_gen->used()); 25.8 + _used->set_value(_gen->used_stable()); 25.9 } 25.10 25.11 // special version of update_used() to allow the used value to be 25.12 @@ -107,7 +107,7 @@ 25.13 GenerationUsedHelper(Generation* g) : _gen(g) { } 25.14 25.15 inline jlong take_sample() { 25.16 - return _gen->used(); 25.17 + return _gen->used_stable(); 25.18 } 25.19 }; 25.20
26.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Tue Feb 04 17:38:01 2020 +0800 26.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Tue Feb 04 18:13:14 2020 +0800 26.3 @@ -201,6 +201,19 @@ 26.4 } 26.5 } 26.6 26.7 +static bool is_full_gc(int max_level) { 26.8 + // Return true if max_level is all generations 26.9 + return (max_level == (GenCollectedHeap::heap()->n_gens() - 1)); 26.10 +} 26.11 + 26.12 +VM_GenCollectFull::VM_GenCollectFull(uint gc_count_before, 26.13 + uint full_gc_count_before, 26.14 + GCCause::Cause gc_cause, 26.15 + int max_level) : 26.16 + VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, 26.17 + is_full_gc(max_level) /* full */), 26.18 + _max_level(max_level) { } 26.19 + 26.20 void VM_GenCollectFull::doit() { 26.21 SvcGCMarker sgcm(SvcGCMarker::FULL); 26.22
27.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Tue Feb 04 17:38:01 2020 +0800 27.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Tue Feb 04 18:13:14 2020 +0800 27.3 @@ -201,9 +201,7 @@ 27.4 VM_GenCollectFull(uint gc_count_before, 27.5 uint full_gc_count_before, 27.6 GCCause::Cause gc_cause, 27.7 - int max_level) 27.8 - : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */), 27.9 - _max_level(max_level) { } 27.10 + int max_level); 27.11 ~VM_GenCollectFull() {} 27.12 virtual VMOp_Type type() const { return VMOp_GenCollectFull; } 27.13 virtual void doit();
28.1 --- a/src/share/vm/memory/gcLocker.cpp Tue Feb 04 17:38:01 2020 +0800 28.2 +++ b/src/share/vm/memory/gcLocker.cpp Tue Feb 04 18:13:14 2020 +0800 28.3 @@ -31,6 +31,7 @@ 28.4 volatile jint GC_locker::_jni_lock_count = 0; 28.5 volatile bool GC_locker::_needs_gc = false; 28.6 volatile bool GC_locker::_doing_gc = false; 28.7 +unsigned int GC_locker::_total_collections = 0; 28.8 28.9 #ifdef ASSERT 28.10 volatile jint GC_locker::_debug_jni_lock_count = 0; 28.11 @@ -94,6 +95,11 @@ 28.12 } 28.13 } 28.14 28.15 +bool GC_locker::should_discard(GCCause::Cause cause, uint total_collections) { 28.16 + return (cause == GCCause::_gc_locker) && 28.17 + (_total_collections != total_collections); 28.18 +} 28.19 + 28.20 void GC_locker::jni_lock(JavaThread* thread) { 28.21 assert(!thread->in_critical(), "shouldn't currently be in a critical region"); 28.22 MutexLocker mu(JNICritical_lock); 28.23 @@ -117,7 +123,13 @@ 28.24 decrement_debug_jni_lock_count(); 28.25 thread->exit_critical(); 28.26 if (needs_gc() && !is_active_internal()) { 28.27 - // We're the last thread out. Cause a GC to occur. 28.28 + // We're the last thread out. Request a GC. 28.29 + // Capture the current total collections, to allow detection of 28.30 + // other collections that make this one unnecessary. The value of 28.31 + // total_collections() is only changed at a safepoint, so there 28.32 + // must not be a safepoint between the lock becoming inactive and 28.33 + // getting the count, else there may be unnecessary GCLocker GCs. 28.34 + _total_collections = Universe::heap()->total_collections(); 28.35 _doing_gc = true; 28.36 { 28.37 // Must give up the lock while at a safepoint
29.1 --- a/src/share/vm/memory/gcLocker.hpp Tue Feb 04 17:38:01 2020 +0800 29.2 +++ b/src/share/vm/memory/gcLocker.hpp Tue Feb 04 18:13:14 2020 +0800 29.3 @@ -26,6 +26,7 @@ 29.4 #define SHARE_VM_MEMORY_GCLOCKER_HPP 29.5 29.6 #include "gc_interface/collectedHeap.hpp" 29.7 +#include "gc_interface/gcCause.hpp" 29.8 #include "memory/genCollectedHeap.hpp" 29.9 #include "memory/universe.hpp" 29.10 #include "oops/oop.hpp" 29.11 @@ -57,6 +58,7 @@ 29.12 static volatile bool _needs_gc; // heap is filling, we need a GC 29.13 // note: bool is typedef'd as jint 29.14 static volatile bool _doing_gc; // unlock_critical() is doing a GC 29.15 + static uint _total_collections; // value for _gc_locker collection 29.16 29.17 #ifdef ASSERT 29.18 // This lock count is updated for all operations and is used to 29.19 @@ -116,6 +118,12 @@ 29.20 // Sets _needs_gc if is_active() is true. Returns is_active(). 29.21 static bool check_active_before_gc(); 29.22 29.23 + // Return true if the designated collection is a GCLocker request 29.24 + // that should be discarded. Returns true if cause == GCCause::_gc_locker 29.25 + // and the given total collection value indicates a collection has been 29.26 + // done since the GCLocker request was made. 29.27 + static bool should_discard(GCCause::Cause cause, uint total_collections); 29.28 + 29.29 // Stalls the caller (who should not be in a jni critical section) 29.30 // until needs_gc() clears. Note however that needs_gc() may be 29.31 // set at a subsequent safepoint and/or cleared under the
30.1 --- a/src/share/vm/memory/genCollectedHeap.cpp Tue Feb 04 17:38:01 2020 +0800 30.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp Tue Feb 04 18:13:14 2020 +0800 30.3 @@ -796,8 +796,11 @@ 30.4 #else // INCLUDE_ALL_GCS 30.5 ShouldNotReachHere(); 30.6 #endif // INCLUDE_ALL_GCS 30.7 - } else if (cause == GCCause::_wb_young_gc) { 30.8 - // minor collection for WhiteBox API 30.9 + } else if ((cause == GCCause::_wb_young_gc) || 30.10 + (cause == GCCause::_gc_locker)) { 30.11 + // minor collection for WhiteBox or GCLocker. 30.12 + // _gc_locker collections upgraded by GCLockerInvokesConcurrent 30.13 + // are handled above and never discarded. 30.14 collect(cause, 0); 30.15 } else { 30.16 #ifdef ASSERT 30.17 @@ -835,6 +838,11 @@ 30.18 // Read the GC count while holding the Heap_lock 30.19 unsigned int gc_count_before = total_collections(); 30.20 unsigned int full_gc_count_before = total_full_collections(); 30.21 + 30.22 + if (GC_locker::should_discard(cause, gc_count_before)) { 30.23 + return; 30.24 + } 30.25 + 30.26 { 30.27 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 30.28 VM_GenCollectFull op(gc_count_before, full_gc_count_before, 30.29 @@ -887,24 +895,16 @@ 30.30 30.31 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, 30.32 int max_level) { 30.33 - int local_max_level; 30.34 - if (!incremental_collection_will_fail(false /* don't consult_young */) && 30.35 - gc_cause() == GCCause::_gc_locker) { 30.36 - local_max_level = 0; 30.37 - } else { 30.38 - local_max_level = max_level; 30.39 - } 30.40 30.41 do_collection(true /* full */, 30.42 clear_all_soft_refs /* clear_all_soft_refs */, 30.43 0 /* size */, 30.44 false /* is_tlab */, 30.45 - local_max_level /* max_level */); 30.46 + max_level /* max_level */); 30.47 // Hack XXX FIX ME !!! 30.48 // A scavenge may not have been attempted, or may have 30.49 // been attempted and failed, because the old gen was too full 30.50 - if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && 30.51 - incremental_collection_will_fail(false /* don't consult_young */)) { 30.52 + if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) { 30.53 if (PrintGCDetails) { 30.54 gclog_or_tty->print_cr("GC locker: Trying a full collection " 30.55 "because scavenge failed");
31.1 --- a/src/share/vm/memory/generation.cpp Tue Feb 04 17:38:01 2020 +0800 31.2 +++ b/src/share/vm/memory/generation.cpp Tue Feb 04 18:13:14 2020 +0800 31.3 @@ -68,6 +68,12 @@ 31.4 return gch->_gen_specs[level()]; 31.5 } 31.6 31.7 +// This is for CMS. It returns stable monotonic used space size. 31.8 +// Remove this when CMS is removed. 31.9 +size_t Generation::used_stable() const { 31.10 + return used(); 31.11 +} 31.12 + 31.13 size_t Generation::max_capacity() const { 31.14 return reserved().byte_size(); 31.15 }
32.1 --- a/src/share/vm/memory/generation.hpp Tue Feb 04 17:38:01 2020 +0800 32.2 +++ b/src/share/vm/memory/generation.hpp Tue Feb 04 18:13:14 2020 +0800 32.3 @@ -168,6 +168,7 @@ 32.4 virtual size_t capacity() const = 0; // The maximum number of object bytes the 32.5 // generation can currently hold. 32.6 virtual size_t used() const = 0; // The number of used bytes in the gen. 32.7 + virtual size_t used_stable() const; // The number of used bytes for memory monitoring tools. 32.8 virtual size_t free() const = 0; // The number of free bytes in the gen. 32.9 32.10 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
33.1 --- a/src/share/vm/oops/instanceKlass.cpp Tue Feb 04 17:38:01 2020 +0800 33.2 +++ b/src/share/vm/oops/instanceKlass.cpp Tue Feb 04 18:13:14 2020 +0800 33.3 @@ -294,6 +294,7 @@ 33.4 set_has_unloaded_dependent(false); 33.5 set_init_state(InstanceKlass::allocated); 33.6 set_init_thread(NULL); 33.7 + set_init_state(allocated); 33.8 set_reference_type(rt); 33.9 set_oop_map_cache(NULL); 33.10 set_jni_ids(NULL); 33.11 @@ -978,11 +979,13 @@ 33.12 oop init_lock = this_oop->init_lock(); 33.13 if (init_lock != NULL) { 33.14 ObjectLocker ol(init_lock, THREAD); 33.15 + this_oop->set_init_thread(NULL); // reset _init_thread before changing _init_state 33.16 this_oop->set_init_state(state); 33.17 this_oop->fence_and_clear_init_lock(); 33.18 ol.notify_all(CHECK); 33.19 } else { 33.20 assert(init_lock != NULL, "The initialization state should never be set twice"); 33.21 + this_oop->set_init_thread(NULL); // reset _init_thread before changing _init_state 33.22 this_oop->set_init_state(state); 33.23 } 33.24 } 33.25 @@ -3602,6 +3605,7 @@ 33.26 bool good_state = is_shared() ? (_init_state <= state) 33.27 : (_init_state < state); 33.28 assert(good_state || state == allocated, "illegal state transition"); 33.29 + assert(_init_thread == NULL, "should be cleared before state change"); 33.30 _init_state = (u1)state; 33.31 } 33.32 #endif
34.1 --- a/src/share/vm/oops/instanceKlass.hpp Tue Feb 04 17:38:01 2020 +0800 34.2 +++ b/src/share/vm/oops/instanceKlass.hpp Tue Feb 04 18:13:14 2020 +0800 34.3 @@ -241,7 +241,7 @@ 34.4 u2 _misc_flags; 34.5 u2 _minor_version; // minor version number of class file 34.6 u2 _major_version; // major version number of class file 34.7 - Thread* _init_thread; // Pointer to current thread doing initialization (to handle recusive initialization) 34.8 + Thread* _init_thread; // Pointer to current thread doing initialization (to handle recursive initialization) 34.9 int _vtable_len; // length of Java vtable (in words) 34.10 int _itable_len; // length of Java itable (in words) 34.11 OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily)
35.1 --- a/src/share/vm/oops/klassVtable.cpp Tue Feb 04 17:38:01 2020 +0800 35.2 +++ b/src/share/vm/oops/klassVtable.cpp Tue Feb 04 18:13:14 2020 +0800 35.3 @@ -289,22 +289,25 @@ 35.4 int vtable_index, Handle target_loader, Symbol* target_classname, Thread * THREAD) { 35.5 InstanceKlass* superk = initialsuper; 35.6 while (superk != NULL && superk->super() != NULL) { 35.7 - InstanceKlass* supersuperklass = InstanceKlass::cast(superk->super()); 35.8 - klassVtable* ssVtable = supersuperklass->vtable(); 35.9 + klassVtable* ssVtable = (superk->super())->vtable(); 35.10 if (vtable_index < ssVtable->length()) { 35.11 Method* super_method = ssVtable->method_at(vtable_index); 35.12 + // get the class holding the matching method 35.13 + // make sure you use that class for is_override 35.14 + InstanceKlass* supermethodholder = super_method->method_holder(); 35.15 #ifndef PRODUCT 35.16 Symbol* name= target_method()->name(); 35.17 Symbol* signature = target_method()->signature(); 35.18 assert(super_method->name() == name && super_method->signature() == signature, "vtable entry name/sig mismatch"); 35.19 #endif 35.20 - if (supersuperklass->is_override(super_method, target_loader, target_classname, THREAD)) { 35.21 + 35.22 + if (supermethodholder->is_override(super_method, target_loader, target_classname, THREAD)) { 35.23 #ifndef PRODUCT 35.24 if (PrintVtables && Verbose) { 35.25 ResourceMark rm(THREAD); 35.26 char* sig = target_method()->name_and_sig_as_C_string(); 35.27 tty->print("transitive overriding superclass %s with %s::%s index %d, original flags: ", 35.28 - supersuperklass->internal_name(), 35.29 + supermethodholder->internal_name(), 35.30 _klass->internal_name(), sig, vtable_index); 35.31 super_method->access_flags().print_on(tty); 35.32 if (super_method->is_default_method()) { 35.33 @@ -656,7 +659,7 @@ 35.34 35.35 // search through the super class hierarchy to see if we need 35.36 // a new entry 35.37 - ResourceMark rm; 35.38 + ResourceMark rm(THREAD); 35.39 Symbol* name = target_method()->name(); 35.40 Symbol* signature = target_method()->signature(); 35.41 Klass* k = super;
36.1 --- a/src/share/vm/opto/escape.cpp Tue Feb 04 17:38:01 2020 +0800 36.2 +++ b/src/share/vm/opto/escape.cpp Tue Feb 04 18:13:14 2020 +0800 36.3 @@ -952,6 +952,7 @@ 36.4 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 36.5 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 36.6 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 36.7 + strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 36.8 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 36.9 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 36.10 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 36.11 @@ -2114,6 +2115,9 @@ 36.12 return false; 36.13 } 36.14 PointsToNode* ptn = ptnode_adr(idx); 36.15 + if (ptn == NULL) { 36.16 + return false; // not in congraph (e.g. ConI) 36.17 + } 36.18 PointsToNode::EscapeState es = ptn->escape_state(); 36.19 // If we have already computed a value, return it. 36.20 if (es >= PointsToNode::GlobalEscape)
37.1 --- a/src/share/vm/opto/library_call.cpp Tue Feb 04 17:38:01 2020 +0800 37.2 +++ b/src/share/vm/opto/library_call.cpp Tue Feb 04 18:13:14 2020 +0800 37.3 @@ -311,6 +311,7 @@ 37.4 Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting); 37.5 Node* get_key_start_from_aescrypt_object(Node* aescrypt_object); 37.6 Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object); 37.7 + bool inline_ghash_processBlocks(); 37.8 bool inline_sha_implCompress(vmIntrinsics::ID id); 37.9 bool inline_digestBase_implCompressMB(int predicate); 37.10 bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA, 37.11 @@ -570,6 +571,10 @@ 37.12 predicates = 3; 37.13 break; 37.14 37.15 + case vmIntrinsics::_ghash_processBlocks: 37.16 + if (!UseGHASHIntrinsics) return NULL; 37.17 + break; 37.18 + 37.19 case vmIntrinsics::_updateCRC32: 37.20 case vmIntrinsics::_updateBytesCRC32: 37.21 case vmIntrinsics::_updateByteBufferCRC32: 37.22 @@ -957,6 +962,9 @@ 37.23 case vmIntrinsics::_montgomerySquare: 37.24 return inline_montgomerySquare(); 37.25 37.26 + case vmIntrinsics::_ghash_processBlocks: 37.27 + return inline_ghash_processBlocks(); 37.28 + 37.29 case vmIntrinsics::_encodeISOArray: 37.30 return inline_encodeISOArray(); 37.31 37.32 @@ -6599,6 +6607,35 @@ 37.33 return _gvn.transform(region); 37.34 } 37.35 37.36 +//------------------------------inline_ghash_processBlocks 37.37 +bool LibraryCallKit::inline_ghash_processBlocks() { 37.38 + address stubAddr; 37.39 + const char *stubName; 37.40 + assert(UseGHASHIntrinsics, "need GHASH intrinsics support"); 37.41 + 37.42 + stubAddr = StubRoutines::ghash_processBlocks(); 37.43 + stubName = "ghash_processBlocks"; 37.44 + 37.45 + Node* data = argument(0); 37.46 + Node* offset = argument(1); 37.47 + Node* len = argument(2); 37.48 + Node* state = argument(3); 37.49 + Node* subkeyH = argument(4); 37.50 + 37.51 + Node* state_start = array_element_address(state, intcon(0), T_LONG); 37.52 + assert(state_start, "state is NULL"); 37.53 + Node* subkeyH_start = array_element_address(subkeyH, intcon(0), T_LONG); 37.54 + assert(subkeyH_start, "subkeyH is NULL"); 37.55 + Node* data_start = array_element_address(data, offset, T_BYTE); 37.56 + assert(data_start, "data is NULL"); 37.57 + 37.58 + Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP, 37.59 + OptoRuntime::ghash_processBlocks_Type(), 37.60 + stubAddr, stubName, TypePtr::BOTTOM, 37.61 + state_start, subkeyH_start, data_start, len); 37.62 + return true; 37.63 +} 37.64 + 37.65 //------------------------------inline_sha_implCompress----------------------- 37.66 // 37.67 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
38.1 --- a/src/share/vm/opto/loopTransform.cpp Tue Feb 04 17:38:01 2020 +0800 38.2 +++ b/src/share/vm/opto/loopTransform.cpp Tue Feb 04 18:13:14 2020 +0800 38.3 @@ -1,5 +1,5 @@ 38.4 /* 38.5 - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 38.6 + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 38.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 38.8 * 38.9 * This code is free software; you can redistribute it and/or modify it 38.10 @@ -2231,6 +2231,13 @@ 38.11 // We also need to replace the original limit to collapse loop exit. 38.12 Node* cmp = cl->loopexit()->cmp_node(); 38.13 assert(cl->limit() == cmp->in(2), "sanity"); 38.14 + // Duplicate cmp node if it has other users 38.15 + if (cmp->outcnt() > 1) { 38.16 + cmp = cmp->clone(); 38.17 + cmp = phase->_igvn.register_new_node_with_optimizer(cmp); 38.18 + BoolNode *bol = cl->loopexit()->in(CountedLoopEndNode::TestValue)->as_Bool(); 38.19 + phase->_igvn.replace_input_of(bol, 1, cmp); // put bol on worklist 38.20 + } 38.21 phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist 38.22 phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist 38.23 }
39.1 --- a/src/share/vm/opto/loopnode.hpp Tue Feb 04 17:38:01 2020 +0800 39.2 +++ b/src/share/vm/opto/loopnode.hpp Tue Feb 04 18:13:14 2020 +0800 39.3 @@ -279,6 +279,7 @@ 39.4 if (iv_phi == NULL) { 39.5 return NULL; 39.6 } 39.7 + assert(iv_phi->is_Phi(), "should be PhiNode"); 39.8 Node *ln = iv_phi->in(0); 39.9 if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit() == this) { 39.10 return (CountedLoopNode*)ln;
40.1 --- a/src/share/vm/opto/loopopts.cpp Tue Feb 04 17:38:01 2020 +0800 40.2 +++ b/src/share/vm/opto/loopopts.cpp Tue Feb 04 18:13:14 2020 +0800 40.3 @@ -309,7 +309,7 @@ 40.4 } 40.5 return NULL; 40.6 } 40.7 - assert(m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control"); 40.8 + assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control"); 40.9 } 40.10 40.11 return n_ctrl;
41.1 --- a/src/share/vm/opto/runtime.cpp Tue Feb 04 17:38:01 2020 +0800 41.2 +++ b/src/share/vm/opto/runtime.cpp Tue Feb 04 18:13:14 2020 +0800 41.3 @@ -100,7 +100,25 @@ 41.4 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 41.5 41.6 41.7 +// GHASH block processing 41.8 +const TypeFunc* OptoRuntime::ghash_processBlocks_Type() { 41.9 + int argcnt = 4; 41.10 41.11 + const Type** fields = TypeTuple::fields(argcnt); 41.12 + int argp = TypeFunc::Parms; 41.13 + fields[argp++] = TypePtr::NOTNULL; // state 41.14 + fields[argp++] = TypePtr::NOTNULL; // subkeyH 41.15 + fields[argp++] = TypePtr::NOTNULL; // data 41.16 + fields[argp++] = TypeInt::INT; // blocks 41.17 + assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 41.18 + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 41.19 + 41.20 + // result type needed 41.21 + fields = TypeTuple::fields(1); 41.22 + fields[TypeFunc::Parms+0] = NULL; // void 41.23 + const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 41.24 + return TypeFunc::make(domain, range); 41.25 +} 41.26 41.27 // Compiled code entry points 41.28 address OptoRuntime::_new_instance_Java = NULL;
42.1 --- a/src/share/vm/opto/runtime.hpp Tue Feb 04 17:38:01 2020 +0800 42.2 +++ b/src/share/vm/opto/runtime.hpp Tue Feb 04 18:13:14 2020 +0800 42.3 @@ -311,6 +311,8 @@ 42.4 static const TypeFunc* montgomeryMultiply_Type(); 42.5 static const TypeFunc* montgomerySquare_Type(); 42.6 42.7 + static const TypeFunc* ghash_processBlocks_Type(); 42.8 + 42.9 static const TypeFunc* updateBytesCRC32_Type(); 42.10 42.11 // leaf on stack replacement interpreter accessor types
43.1 --- a/src/share/vm/opto/superword.cpp Tue Feb 04 17:38:01 2020 +0800 43.2 +++ b/src/share/vm/opto/superword.cpp Tue Feb 04 18:13:14 2020 +0800 43.3 @@ -448,6 +448,7 @@ 43.4 return true; // no induction variable 43.5 } 43.6 CountedLoopEndNode* pre_end = get_pre_loop_end(lp()->as_CountedLoop()); 43.7 + assert(pre_end != NULL, "we must have a correct pre-loop"); 43.8 assert(pre_end->stride_is_con(), "pre loop stride is constant"); 43.9 int preloop_stride = pre_end->stride_con(); 43.10 43.11 @@ -2052,7 +2053,7 @@ 43.12 CountedLoopNode *main_head = lp()->as_CountedLoop(); 43.13 assert(main_head->is_main_loop(), ""); 43.14 CountedLoopEndNode* pre_end = get_pre_loop_end(main_head); 43.15 - assert(pre_end != NULL, ""); 43.16 + assert(pre_end != NULL, "we must have a correct pre-loop"); 43.17 Node *pre_opaq1 = pre_end->limit(); 43.18 assert(pre_opaq1->Opcode() == Op_Opaque1, ""); 43.19 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; 43.20 @@ -2207,16 +2208,27 @@ 43.21 43.22 //----------------------------get_pre_loop_end--------------------------- 43.23 // Find pre loop end from main loop. Returns null if none. 43.24 -CountedLoopEndNode* SuperWord::get_pre_loop_end(CountedLoopNode *cl) { 43.25 - Node *ctrl = cl->in(LoopNode::EntryControl); 43.26 +CountedLoopEndNode* SuperWord::get_pre_loop_end(CountedLoopNode* cl) { 43.27 + Node* ctrl = cl->in(LoopNode::EntryControl); 43.28 if (!ctrl->is_IfTrue() && !ctrl->is_IfFalse()) return NULL; 43.29 - Node *iffm = ctrl->in(0); 43.30 + Node* iffm = ctrl->in(0); 43.31 if (!iffm->is_If()) return NULL; 43.32 - Node *p_f = iffm->in(0); 43.33 + Node* bolzm = iffm->in(1); 43.34 + if (!bolzm->is_Bool()) return NULL; 43.35 + Node* cmpzm = bolzm->in(1); 43.36 + if (!cmpzm->is_Cmp()) return NULL; 43.37 + Node* opqzm = cmpzm->in(2); 43.38 + // Can not optimize a loop if zero-trip Opaque1 node is optimized 43.39 + // away and then another round of loop opts attempted. 43.40 + if (opqzm->Opcode() != Op_Opaque1) { 43.41 + return NULL; 43.42 + } 43.43 + Node* p_f = iffm->in(0); 43.44 if (!p_f->is_IfFalse()) return NULL; 43.45 if (!p_f->in(0)->is_CountedLoopEnd()) return NULL; 43.46 - CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); 43.47 - if (!pre_end->loopnode()->is_pre_loop()) return NULL; 43.48 + CountedLoopEndNode* pre_end = p_f->in(0)->as_CountedLoopEnd(); 43.49 + CountedLoopNode* loop_node = pre_end->loopnode(); 43.50 + if (loop_node == NULL || !loop_node->is_pre_loop()) return NULL; 43.51 return pre_end; 43.52 } 43.53
44.1 --- a/src/share/vm/runtime/globals.hpp Tue Feb 04 17:38:01 2020 +0800 44.2 +++ b/src/share/vm/runtime/globals.hpp Tue Feb 04 18:13:14 2020 +0800 44.3 @@ -614,6 +614,9 @@ 44.4 product(bool, UseSHA, false, \ 44.5 "Control whether SHA instructions can be used on SPARC") \ 44.6 \ 44.7 + product(bool, UseGHASHIntrinsics, false, \ 44.8 + "Use intrinsics for GHASH versions of crypto") \ 44.9 + \ 44.10 product(uintx, LargePageSizeInBytes, 0, \ 44.11 "Large page size (0 to let VM choose the page size)") \ 44.12 \
45.1 --- a/src/share/vm/runtime/safepoint.cpp Tue Feb 04 17:38:01 2020 +0800 45.2 +++ b/src/share/vm/runtime/safepoint.cpp Tue Feb 04 18:13:14 2020 +0800 45.3 @@ -547,6 +547,7 @@ 45.4 45.5 // rotate log files? 45.6 if (UseGCLogFileRotation) { 45.7 + TraceTime t8("rotating gc logs", TraceSafepointCleanupTime); 45.8 gclog_or_tty->rotate_log(false); 45.9 } 45.10
46.1 --- a/src/share/vm/runtime/stubRoutines.cpp Tue Feb 04 17:38:01 2020 +0800 46.2 +++ b/src/share/vm/runtime/stubRoutines.cpp Tue Feb 04 18:13:14 2020 +0800 46.3 @@ -124,6 +124,7 @@ 46.4 address StubRoutines::_aescrypt_decryptBlock = NULL; 46.5 address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL; 46.6 address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL; 46.7 +address StubRoutines::_ghash_processBlocks = NULL; 46.8 46.9 address StubRoutines::_sha1_implCompress = NULL; 46.10 address StubRoutines::_sha1_implCompressMB = NULL; 46.11 @@ -176,7 +177,7 @@ 46.12 StubGenerator_generate(&buffer, false); 46.13 // When new stubs added we need to make sure there is some space left 46.14 // to catch situation when we should increase size again. 46.15 - assert(buffer.insts_remaining() > 200, "increase code_size1"); 46.16 + assert(code_size1 == 0 || buffer.insts_remaining() > 200, "increase code_size1"); 46.17 } 46.18 } 46.19 46.20 @@ -231,7 +232,7 @@ 46.21 StubGenerator_generate(&buffer, true); 46.22 // When new stubs added we need to make sure there is some space left 46.23 // to catch situation when we should increase size again. 46.24 - assert(buffer.insts_remaining() > 200, "increase code_size2"); 46.25 + assert(code_size2 == 0 || buffer.insts_remaining() > 200, "increase code_size2"); 46.26 } 46.27 46.28 #ifdef ASSERT
47.1 --- a/src/share/vm/runtime/stubRoutines.hpp Tue Feb 04 17:38:01 2020 +0800 47.2 +++ b/src/share/vm/runtime/stubRoutines.hpp Tue Feb 04 18:13:14 2020 +0800 47.3 @@ -208,6 +208,7 @@ 47.4 static address _aescrypt_decryptBlock; 47.5 static address _cipherBlockChaining_encryptAESCrypt; 47.6 static address _cipherBlockChaining_decryptAESCrypt; 47.7 + static address _ghash_processBlocks; 47.8 47.9 static address _sha1_implCompress; 47.10 static address _sha1_implCompressMB; 47.11 @@ -370,6 +371,7 @@ 47.12 static address aescrypt_decryptBlock() { return _aescrypt_decryptBlock; } 47.13 static address cipherBlockChaining_encryptAESCrypt() { return _cipherBlockChaining_encryptAESCrypt; } 47.14 static address cipherBlockChaining_decryptAESCrypt() { return _cipherBlockChaining_decryptAESCrypt; } 47.15 + static address ghash_processBlocks() { return _ghash_processBlocks; } 47.16 47.17 static address sha1_implCompress() { return _sha1_implCompress; } 47.18 static address sha1_implCompressMB() { return _sha1_implCompressMB; }
48.1 --- a/src/share/vm/runtime/vmStructs.cpp Tue Feb 04 17:38:01 2020 +0800 48.2 +++ b/src/share/vm/runtime/vmStructs.cpp Tue Feb 04 18:13:14 2020 +0800 48.3 @@ -824,6 +824,7 @@ 48.4 static_field(StubRoutines, _aescrypt_decryptBlock, address) \ 48.5 static_field(StubRoutines, _cipherBlockChaining_encryptAESCrypt, address) \ 48.6 static_field(StubRoutines, _cipherBlockChaining_decryptAESCrypt, address) \ 48.7 + static_field(StubRoutines, _ghash_processBlocks, address) \ 48.8 static_field(StubRoutines, _updateBytesCRC32, address) \ 48.9 static_field(StubRoutines, _crc_table_adr, address) \ 48.10 static_field(StubRoutines, _multiplyToLen, address) \
49.1 --- a/src/share/vm/services/allocationSite.hpp Tue Feb 04 17:38:01 2020 +0800 49.2 +++ b/src/share/vm/services/allocationSite.hpp Tue Feb 04 18:13:14 2020 +0800 49.3 @@ -34,8 +34,9 @@ 49.4 private: 49.5 NativeCallStack _call_stack; 49.6 E e; 49.7 + MEMFLAGS _flag; 49.8 public: 49.9 - AllocationSite(const NativeCallStack& stack) : _call_stack(stack) { } 49.10 + AllocationSite(const NativeCallStack& stack, MEMFLAGS flag) : _call_stack(stack), _flag(flag) { } 49.11 int hash() const { return _call_stack.hash(); } 49.12 bool equals(const NativeCallStack& stack) const { 49.13 return _call_stack.equals(stack); 49.14 @@ -52,6 +53,8 @@ 49.15 // Information regarding this allocation 49.16 E* data() { return &e; } 49.17 const E* peek() const { return &e; } 49.18 + 49.19 + MEMFLAGS flag() const { return _flag; } 49.20 }; 49.21 49.22 #endif // SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
50.1 --- a/src/share/vm/services/mallocSiteTable.cpp Tue Feb 04 17:38:01 2020 +0800 50.2 +++ b/src/share/vm/services/mallocSiteTable.cpp Tue Feb 04 18:13:14 2020 +0800 50.3 @@ -1,5 +1,5 @@ 50.4 /* 50.5 - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. 50.6 + * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. 50.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 50.8 * 50.9 * This code is free software; you can redistribute it and/or modify it 50.10 @@ -84,12 +84,18 @@ 50.11 // Create pseudo call stack for hashtable entry allocation 50.12 address pc[3]; 50.13 if (NMT_TrackingStackDepth >= 3) { 50.14 - pc[2] = (address)MallocSiteTable::allocation_at; 50.15 + uintx *fp = (uintx*)MallocSiteTable::allocation_at; 50.16 + // On ppc64, 'fp' is a pointer to a function descriptor which is a struct of 50.17 + // three native pointers where the first pointer is the real function address. 50.18 + // See: http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.html#FUNC-DES 50.19 + pc[2] = (address)(fp PPC64_ONLY(BIG_ENDIAN_ONLY([0]))); 50.20 } 50.21 if (NMT_TrackingStackDepth >= 2) { 50.22 - pc[1] = (address)MallocSiteTable::lookup_or_add; 50.23 + uintx *fp = (uintx*)MallocSiteTable::lookup_or_add; 50.24 + pc[1] = (address)(fp PPC64_ONLY(BIG_ENDIAN_ONLY([0]))); 50.25 } 50.26 - pc[0] = (address)MallocSiteTable::new_entry; 50.27 + uintx *fp = (uintx*)MallocSiteTable::new_entry; 50.28 + pc[0] = (address)(fp PPC64_ONLY(BIG_ENDIAN_ONLY([0]))); 50.29 50.30 // Instantiate NativeCallStack object, have to use placement new operator. (see comments above) 50.31 NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack) 50.32 @@ -158,7 +164,7 @@ 50.33 MallocSiteHashtableEntry* head = _table[index]; 50.34 while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) { 50.35 MallocSite* site = head->data(); 50.36 - if (site->flags() == flags && site->equals(key)) { 50.37 + if (site->flag() == flags && site->equals(key)) { 50.38 return head->data(); 50.39 } 50.40
51.1 --- a/src/share/vm/services/mallocSiteTable.hpp Tue Feb 04 17:38:01 2020 +0800 51.2 +++ b/src/share/vm/services/mallocSiteTable.hpp Tue Feb 04 18:13:14 2020 +0800 51.3 @@ -37,15 +37,12 @@ 51.4 // MallocSite represents a code path that eventually calls 51.5 // os::malloc() to allocate memory 51.6 class MallocSite : public AllocationSite<MemoryCounter> { 51.7 - private: 51.8 - MEMFLAGS _flags; 51.9 - 51.10 public: 51.11 MallocSite() : 51.12 - AllocationSite<MemoryCounter>(NativeCallStack::empty_stack()), _flags(mtNone) {} 51.13 + AllocationSite<MemoryCounter>(NativeCallStack::empty_stack(), mtNone) {} 51.14 51.15 MallocSite(const NativeCallStack& stack, MEMFLAGS flags) : 51.16 - AllocationSite<MemoryCounter>(stack), _flags(flags) {} 51.17 + AllocationSite<MemoryCounter>(stack, flags) {} 51.18 51.19 51.20 void allocate(size_t size) { data()->allocate(size); } 51.21 @@ -55,7 +52,6 @@ 51.22 size_t size() const { return peek()->size(); } 51.23 // The number of calls were made 51.24 size_t count() const { return peek()->count(); } 51.25 - MEMFLAGS flags() const { return (MEMFLAGS)_flags; } 51.26 }; 51.27 51.28 // Malloc site hashtable entry
52.1 --- a/src/share/vm/services/memBaseline.cpp Tue Feb 04 17:38:01 2020 +0800 52.2 +++ b/src/share/vm/services/memBaseline.cpp Tue Feb 04 18:13:14 2020 +0800 52.3 @@ -1,5 +1,5 @@ 52.4 /* 52.5 - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. 52.6 + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. 52.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 52.8 * 52.9 * This code is free software; you can redistribute it and/or modify it 52.10 @@ -63,7 +63,7 @@ 52.11 int compare_malloc_site_and_type(const MallocSite& s1, const MallocSite& s2) { 52.12 int res = compare_malloc_site(s1, s2); 52.13 if (res == 0) { 52.14 - res = (int)(s1.flags() - s2.flags()); 52.15 + res = (int)(s1.flag() - s2.flag()); 52.16 } 52.17 52.18 return res; 52.19 @@ -209,7 +209,7 @@ 52.20 const ReservedMemoryRegion* rgn; 52.21 VirtualMemoryAllocationSite* site; 52.22 while ((rgn = itr.next()) != NULL) { 52.23 - VirtualMemoryAllocationSite tmp(*rgn->call_stack()); 52.24 + VirtualMemoryAllocationSite tmp(*rgn->call_stack(), rgn->flag()); 52.25 site = allocation_sites.find(tmp); 52.26 if (site == NULL) { 52.27 LinkedListNode<VirtualMemoryAllocationSite>* node =
53.1 --- a/src/share/vm/services/memReporter.cpp Tue Feb 04 17:38:01 2020 +0800 53.2 +++ b/src/share/vm/services/memReporter.cpp Tue Feb 04 18:13:14 2020 +0800 53.3 @@ -1,5 +1,5 @@ 53.4 /* 53.5 - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. 53.6 + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. 53.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 53.8 * 53.9 * This code is free software; you can redistribute it and/or modify it 53.10 @@ -205,7 +205,7 @@ 53.11 const NativeCallStack* stack = malloc_site->call_stack(); 53.12 stack->print_on(out); 53.13 out->print("%29s", " "); 53.14 - MEMFLAGS flag = malloc_site->flags(); 53.15 + MEMFLAGS flag = malloc_site->flag(); 53.16 assert((flag >= 0 && flag < (int)mt_number_of_types) && flag != mtNone, 53.17 "Must have a valid memory type"); 53.18 print_malloc(malloc_site->size(), malloc_site->count(),flag); 53.19 @@ -231,6 +231,10 @@ 53.20 stack->print_on(out); 53.21 out->print("%28s (", " "); 53.22 print_total(virtual_memory_site->reserved(), virtual_memory_site->committed()); 53.23 + MEMFLAGS flag = virtual_memory_site->flag(); 53.24 + if (flag != mtNone) { 53.25 + out->print(" Type=%s", NMTUtil::flag_to_name(flag)); 53.26 + } 53.27 out->print_cr(")\n"); 53.28 } 53.29 } 53.30 @@ -562,24 +566,24 @@ 53.31 53.32 void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const { 53.33 diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(), 53.34 - 0, 0, malloc_site->flags()); 53.35 + 0, 0, malloc_site->flag()); 53.36 } 53.37 53.38 void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const { 53.39 diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(), 53.40 - malloc_site->count(), malloc_site->flags()); 53.41 + malloc_site->count(), malloc_site->flag()); 53.42 } 53.43 53.44 void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early, 53.45 const MallocSite* current) const { 53.46 - if (early->flags() != current->flags()) { 53.47 + if (early->flag() != current->flag()) { 53.48 // If malloc site type changed, treat it as deallocation of old type and 53.49 // allocation of new type. 53.50 old_malloc_site(early); 53.51 new_malloc_site(current); 53.52 } else { 53.53 diff_malloc_site(current->call_stack(), current->size(), current->count(), 53.54 - early->size(), early->count(), early->flags()); 53.55 + early->size(), early->count(), early->flag()); 53.56 } 53.57 } 53.58 53.59 @@ -603,21 +607,22 @@ 53.60 53.61 53.62 void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const { 53.63 - diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0); 53.64 + diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0, site->flag()); 53.65 } 53.66 53.67 void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const { 53.68 - diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed()); 53.69 + diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed(), site->flag()); 53.70 } 53.71 53.72 void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early, 53.73 const VirtualMemoryAllocationSite* current) const { 53.74 + assert(early->flag() == current->flag(), "Should be the same"); 53.75 diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(), 53.76 - early->reserved(), early->committed()); 53.77 + early->reserved(), early->committed(), current->flag()); 53.78 } 53.79 53.80 void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved, 53.81 - size_t current_committed, size_t early_reserved, size_t early_committed) const { 53.82 + size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const { 53.83 outputStream* out = output(); 53.84 53.85 // no change 53.86 @@ -631,6 +636,10 @@ 53.87 print_virtual_memory_diff(current_reserved, current_committed, 53.88 early_reserved, early_committed); 53.89 53.90 + if (flag != mtNone) { 53.91 + out->print(" Type=%s", NMTUtil::flag_to_name(flag)); 53.92 + } 53.93 + 53.94 out->print_cr(")\n"); 53.95 } 53.96
54.1 --- a/src/share/vm/services/memReporter.hpp Tue Feb 04 17:38:01 2020 +0800 54.2 +++ b/src/share/vm/services/memReporter.hpp Tue Feb 04 18:13:14 2020 +0800 54.3 @@ -218,7 +218,7 @@ 54.4 void diff_malloc_site(const NativeCallStack* stack, size_t current_size, 54.5 size_t currrent_count, size_t early_size, size_t early_count, MEMFLAGS flags) const; 54.6 void diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved, 54.7 - size_t current_committed, size_t early_reserved, size_t early_committed) const; 54.8 + size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const; 54.9 }; 54.10 54.11 #endif // INCLUDE_NMT
55.1 --- a/src/share/vm/services/memoryPool.hpp Tue Feb 04 17:38:01 2020 +0800 55.2 +++ b/src/share/vm/services/memoryPool.hpp Tue Feb 04 18:13:14 2020 +0800 55.3 @@ -198,7 +198,7 @@ 55.4 bool support_usage_threshold); 55.5 55.6 MemoryUsage get_memory_usage(); 55.7 - size_t used_in_bytes() { return _space->used(); } 55.8 + size_t used_in_bytes() { return _space->used_stable(); } 55.9 }; 55.10 #endif // INCLUDE_ALL_GCS 55.11
56.1 --- a/src/share/vm/services/virtualMemoryTracker.hpp Tue Feb 04 17:38:01 2020 +0800 56.2 +++ b/src/share/vm/services/virtualMemoryTracker.hpp Tue Feb 04 18:13:14 2020 +0800 56.3 @@ -69,8 +69,8 @@ 56.4 // Virtual memory allocation site, keeps track where the virtual memory is reserved. 56.5 class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> { 56.6 public: 56.7 - VirtualMemoryAllocationSite(const NativeCallStack& stack) : 56.8 - AllocationSite<VirtualMemory>(stack) { } 56.9 + VirtualMemoryAllocationSite(const NativeCallStack& stack, MEMFLAGS flag) : 56.10 + AllocationSite<VirtualMemory>(stack, flag) { } 56.11 56.12 inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); } 56.13 inline void commit_memory (size_t sz) { data()->commit_memory(sz); }
57.1 --- a/src/share/vm/utilities/macros.hpp Tue Feb 04 17:38:01 2020 +0800 57.2 +++ b/src/share/vm/utilities/macros.hpp Tue Feb 04 18:13:14 2020 +0800 57.3 @@ -434,6 +434,14 @@ 57.4 #define NOT_EMBEDDED(code) code 57.5 #endif 57.6 57.7 +#ifdef VM_LITTLE_ENDIAN 57.8 +#define LITTLE_ENDIAN_ONLY(code) code 57.9 +#define BIG_ENDIAN_ONLY(code) 57.10 +#else 57.11 +#define LITTLE_ENDIAN_ONLY(code) 57.12 +#define BIG_ENDIAN_ONLY(code) code 57.13 +#endif 57.14 + 57.15 #define define_pd_global(type, name, value) const type pd_##name = value; 57.16 57.17 #endif // SHARE_VM_UTILITIES_MACROS_HPP
58.1 --- a/src/share/vm/utilities/taskqueue.hpp Tue Feb 04 17:38:01 2020 +0800 58.2 +++ b/src/share/vm/utilities/taskqueue.hpp Tue Feb 04 18:13:14 2020 +0800 58.3 @@ -791,6 +791,11 @@ 58.4 } else { 58.5 // Otherwise, the queue contained exactly one element; we take the slow 58.6 // path. 58.7 + 58.8 + // The barrier is required to prevent reordering the two reads of _age: 58.9 + // one is the _age.get() below, and the other is _age.top() above the if-stmt. 58.10 + // The algorithm may fail if _age.get() reads an older value than _age.top(). 58.11 + OrderAccess::loadload(); 58.12 return pop_local_slow(localBot, _age.get()); 58.13 } 58.14 }
59.1 --- a/src/share/vm/utilities/workgroup.cpp Tue Feb 04 17:38:01 2020 +0800 59.2 +++ b/src/share/vm/utilities/workgroup.cpp Tue Feb 04 18:13:14 2020 +0800 59.3 @@ -464,7 +464,6 @@ 59.4 if (old == 0) { 59.5 old = Atomic::cmpxchg(1, &_tasks[t], 0); 59.6 } 59.7 - assert(_tasks[t] == 1, "What else?"); 59.8 bool res = old != 0; 59.9 #ifdef ASSERT 59.10 if (!res) {
60.1 --- a/test/compiler/7184394/TestAESBase.java Tue Feb 04 17:38:01 2020 +0800 60.2 +++ b/test/compiler/7184394/TestAESBase.java Tue Feb 04 18:13:14 2020 +0800 60.3 @@ -29,6 +29,7 @@ 60.4 import javax.crypto.Cipher; 60.5 import javax.crypto.KeyGenerator; 60.6 import javax.crypto.SecretKey; 60.7 +import javax.crypto.spec.GCMParameterSpec; 60.8 import javax.crypto.spec.IvParameterSpec; 60.9 import javax.crypto.spec.SecretKeySpec; 60.10 import java.security.AlgorithmParameters; 60.11 @@ -62,8 +63,12 @@ 60.12 Random random = new Random(0); 60.13 Cipher cipher; 60.14 Cipher dCipher; 60.15 - AlgorithmParameters algParams; 60.16 + AlgorithmParameters algParams = null; 60.17 SecretKey key; 60.18 + GCMParameterSpec gcm_spec; 60.19 + byte[] aad = { 0x11, 0x22, 0x33, 0x44, 0x55 }; 60.20 + int tlen = 12; 60.21 + byte[] iv = new byte[16]; 60.22 60.23 static int numThreads = 0; 60.24 int threadId; 60.25 @@ -77,7 +82,10 @@ 60.26 60.27 public void prepare() { 60.28 try { 60.29 - System.out.println("\nalgorithm=" + algorithm + ", mode=" + mode + ", paddingStr=" + paddingStr + ", msgSize=" + msgSize + ", keySize=" + keySize + ", noReinit=" + noReinit + ", checkOutput=" + checkOutput + ", encInputOffset=" + encInputOffset + ", encOutputOffset=" + encOutputOffset + ", decOutputOffset=" + decOutputOffset + ", lastChunkSize=" +lastChunkSize ); 60.30 + System.out.println("\nalgorithm=" + algorithm + ", mode=" + mode + ", paddingStr=" + paddingStr + 60.31 + ", msgSize=" + msgSize + ", keySize=" + keySize + ", noReinit=" + noReinit + 60.32 + ", checkOutput=" + checkOutput + ", encInputOffset=" + encInputOffset + ", encOutputOffset=" + 60.33 + encOutputOffset + ", decOutputOffset=" + decOutputOffset + ", lastChunkSize=" +lastChunkSize ); 60.34 60.35 if (encInputOffset % ALIGN != 0 || encOutputOffset % ALIGN != 0 || decOutputOffset % ALIGN !=0 ) 60.36 testingMisalignment = true; 60.37 @@ -98,16 +106,24 @@ 60.38 cipher = Cipher.getInstance(algorithm + "/" + mode + "/" + paddingStr, "SunJCE"); 60.39 dCipher = Cipher.getInstance(algorithm + "/" + mode + "/" + paddingStr, "SunJCE"); 60.40 60.41 + // CBC init 60.42 if (mode.equals("CBC")) { 60.43 - int ivLen = (algorithm.equals("AES") ? 16 : algorithm.equals("DES") ? 8 : 0); 60.44 - IvParameterSpec initVector = new IvParameterSpec(new byte[ivLen]); 60.45 + IvParameterSpec initVector = new IvParameterSpec(iv); 60.46 cipher.init(Cipher.ENCRYPT_MODE, key, initVector); 60.47 + algParams = cipher.getParameters(); 60.48 + dCipher.init(Cipher.DECRYPT_MODE, key, initVector); 60.49 + 60.50 + // GCM init 60.51 + } else if (mode.equals("GCM")) { 60.52 + gcm_init(true); 60.53 + gcm_init(false); 60.54 + 60.55 + // ECB init 60.56 } else { 60.57 - algParams = cipher.getParameters(); 60.58 cipher.init(Cipher.ENCRYPT_MODE, key, algParams); 60.59 + dCipher.init(Cipher.DECRYPT_MODE, key, algParams); 60.60 } 60.61 - algParams = cipher.getParameters(); 60.62 - dCipher.init(Cipher.DECRYPT_MODE, key, algParams); 60.63 + 60.64 if (threadId == 0) { 60.65 childShowCipher(); 60.66 } 60.67 @@ -188,4 +204,19 @@ 60.68 } 60.69 60.70 abstract void childShowCipher(); 60.71 + 60.72 + void gcm_init(boolean encrypt) throws Exception { 60.73 + gcm_spec = new GCMParameterSpec(tlen * 8, iv); 60.74 + if (encrypt) { 60.75 + // Get a new instance everytime because of reuse IV restrictions 60.76 + cipher = Cipher.getInstance(algorithm + "/" + mode + "/" + paddingStr, "SunJCE"); 60.77 + cipher.init(Cipher.ENCRYPT_MODE, key, gcm_spec); 60.78 + cipher.updateAAD(aad); 60.79 + } else { 60.80 + dCipher.init(Cipher.DECRYPT_MODE, key, gcm_spec); 60.81 + dCipher.updateAAD(aad); 60.82 + 60.83 + 60.84 + } 60.85 + } 60.86 }
61.1 --- a/test/compiler/7184394/TestAESDecode.java Tue Feb 04 17:38:01 2020 +0800 61.2 +++ b/test/compiler/7184394/TestAESDecode.java Tue Feb 04 18:13:14 2020 +0800 61.3 @@ -32,7 +32,11 @@ 61.4 @Override 61.5 public void run() { 61.6 try { 61.7 - if (!noReinit) dCipher.init(Cipher.DECRYPT_MODE, key, algParams); 61.8 + if (mode.equals("GCM")) { 61.9 + gcm_init(false); 61.10 + } else if (!noReinit) { 61.11 + dCipher.init(Cipher.DECRYPT_MODE, key, algParams); 61.12 + } 61.13 decode = new byte[decodeLength]; 61.14 if (testingMisalignment) { 61.15 int tempSize = dCipher.update(encode, encOutputOffset, (decodeMsgSize - lastChunkSize), decode, decOutputOffset);
62.1 --- a/test/compiler/7184394/TestAESEncode.java Tue Feb 04 17:38:01 2020 +0800 62.2 +++ b/test/compiler/7184394/TestAESEncode.java Tue Feb 04 18:13:14 2020 +0800 62.3 @@ -1,5 +1,5 @@ 62.4 /* 62.5 - * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. 62.6 + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. 62.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 62.8 * 62.9 * This code is free software; you can redistribute it and/or modify it 62.10 @@ -32,7 +32,11 @@ 62.11 @Override 62.12 public void run() { 62.13 try { 62.14 - if (!noReinit) cipher.init(Cipher.ENCRYPT_MODE, key, algParams); 62.15 + if (mode.equals("GCM")) { 62.16 + gcm_init(true); 62.17 + } else if (!noReinit) { 62.18 + cipher.init(Cipher.ENCRYPT_MODE, key, algParams); 62.19 + } 62.20 encode = new byte[encodeLength]; 62.21 if (testingMisalignment) { 62.22 int tempSize = cipher.update(input, encInputOffset, (msgSize - lastChunkSize), encode, encOutputOffset);
63.1 --- a/test/compiler/7184394/TestAESMain.java Tue Feb 04 17:38:01 2020 +0800 63.2 +++ b/test/compiler/7184394/TestAESMain.java Tue Feb 04 18:13:14 2020 +0800 63.3 @@ -41,6 +41,13 @@ 63.4 * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencInputOffset=1 -DencOutputOffset=1 TestAESMain 63.5 * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 TestAESMain 63.6 * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 -DpaddingStr=NoPadding -DmsgSize=640 TestAESMain 63.7 + * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM TestAESMain 63.8 + * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DencInputOffset=1 TestAESMain 63.9 + * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DencOutputOffset=1 TestAESMain 63.10 + * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DdecOutputOffset=1 TestAESMain 63.11 + * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DencInputOffset=1 -DencOutputOffset=1 TestAESMain 63.12 + * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 TestAESMain 63.13 + * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 -DpaddingStr=NoPadding -DmsgSize=640 TestAESMain 63.14 * 63.15 * @author Tom Deneau 63.16 */
64.1 --- a/test/compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java Tue Feb 04 17:38:01 2020 +0800 64.2 +++ b/test/compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java Tue Feb 04 18:13:14 2020 +0800 64.3 @@ -22,9 +22,10 @@ 64.4 */ 64.5 64.6 import sun.hotspot.WhiteBox; 64.7 +import sun.misc.IOUtils; 64.8 import sun.misc.Unsafe; 64.9 -import sun.misc.IOUtils; 64.10 64.11 +import java.io.IOException; 64.12 import java.lang.reflect.Method; 64.13 import java.net.URL; 64.14 import java.net.URLConnection; 64.15 @@ -108,7 +109,13 @@ 64.16 // (1) Load an anonymous version of this class using the corresponding Unsafe method 64.17 URL classUrl = TestAnonymousClassUnloading.class.getResource("TestAnonymousClassUnloading.class"); 64.18 URLConnection connection = classUrl.openConnection(); 64.19 - byte[] classBytes = IOUtils.readFully(connection.getInputStream(), connection.getContentLength(), true); 64.20 + 64.21 + int length = connection.getContentLength(); 64.22 + byte[] classBytes = IOUtils.readAllBytes(connection.getInputStream()); 64.23 + if (length != -1 && classBytes.length != length) { 64.24 + throw new IOException("Expected:" + length + ", actual: " + classBytes.length); 64.25 + } 64.26 + 64.27 Class<?> anonymousClass = UNSAFE.defineAnonymousClass(TestAnonymousClassUnloading.class, classBytes, null); 64.28 64.29 // (2) Make sure all paths of doWork are profiled and compiled
65.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 65.2 +++ b/test/compiler/loopopts/StrangeControl.jasm Tue Feb 04 18:13:14 2020 +0800 65.3 @@ -0,0 +1,48 @@ 65.4 +/* 65.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 65.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 65.7 + * 65.8 + * This code is free software; you can redistribute it and/or modify it 65.9 + * under the terms of the GNU General Public License version 2 only, as 65.10 + * published by the Free Software Foundation. 65.11 + * 65.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 65.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 65.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 65.15 + * version 2 for more details (a copy is included in the LICENSE file that 65.16 + * accompanied this code). 65.17 + * 65.18 + * You should have received a copy of the GNU General Public License version 65.19 + * 2 along with this work; if not, write to the Free Software Foundation, 65.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 65.21 + * 65.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 65.23 + * or visit www.oracle.com if you need additional information or have any 65.24 + * questions. 65.25 + * 65.26 + */ 65.27 + 65.28 +super public class compiler/loopopts/StrangeControl 65.29 + version 51:0 65.30 +{ 65.31 + 65.32 +static Field field:"I"; 65.33 + 65.34 +public static Method test:"(I)V" 65.35 + stack 2 locals 2 65.36 +{ 65.37 + iconst_0; 65.38 + istore 1; 65.39 + L1: stack_frame_type append; 65.40 + locals_map int; 65.41 + iinc 1, 1; 65.42 + iload 1; 65.43 + iconst_2; 65.44 + if_icmple L1; 65.45 + L2: stack_frame_type same; 65.46 + iload_0; 65.47 + putstatic Field field:"I"; 65.48 + goto L1; 65.49 +} 65.50 + 65.51 +} // end Class StrangeControl
66.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 66.2 +++ b/test/compiler/loopopts/TestRemoveEmptyLoop.java Tue Feb 04 18:13:14 2020 +0800 66.3 @@ -0,0 +1,53 @@ 66.4 +/* 66.5 + * Copyright (c) 2019, Huawei Technologies Co. Ltd. All rights reserved. 66.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 66.7 + * 66.8 + * This code is free software; you can redistribute it and/or modify it 66.9 + * under the terms of the GNU General Public License version 2 only, as 66.10 + * published by the Free Software Foundation. 66.11 + * 66.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 66.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 66.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 66.15 + * version 2 for more details (a copy is included in the LICENSE file that 66.16 + * accompanied this code). 66.17 + * 66.18 + * You should have received a copy of the GNU General Public License version 66.19 + * 2 along with this work; if not, write to the Free Software Foundation, 66.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 66.21 + * 66.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 66.23 + * or visit www.oracle.com if you need additional information or have any 66.24 + * questions. 66.25 + */ 66.26 + 66.27 +/** 66.28 + * @test 66.29 + * @bug 8231988 66.30 + * @summary Unexpected test result caused by C2 IdealLoopTree::do_remove_empty_loop 66.31 + * 66.32 + * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation 66.33 + * TestRemoveEmptyLoop 66.34 + */ 66.35 + 66.36 +public class TestRemoveEmptyLoop { 66.37 + 66.38 + public void test() { 66.39 + int i = 34; 66.40 + for (; i > 0; i -= 11); 66.41 + if (i < 0) { 66.42 + // do nothing 66.43 + } else { 66.44 + throw new RuntimeException("Test failed."); 66.45 + } 66.46 + } 66.47 + 66.48 + public static void main(String[] args) { 66.49 + TestRemoveEmptyLoop _instance = new TestRemoveEmptyLoop(); 66.50 + for (int i = 0; i < 50000; i++) { 66.51 + _instance.test(); 66.52 + } 66.53 + System.out.println("Test passed."); 66.54 + } 66.55 + 66.56 +}
67.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 67.2 +++ b/test/compiler/loopopts/TestStrangeControl.java Tue Feb 04 18:13:14 2020 +0800 67.3 @@ -0,0 +1,49 @@ 67.4 +/* 67.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 67.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 67.7 + * 67.8 + * This code is free software; you can redistribute it and/or modify it 67.9 + * under the terms of the GNU General Public License version 2 only, as 67.10 + * published by the Free Software Foundation. 67.11 + * 67.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 67.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 67.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 67.15 + * version 2 for more details (a copy is included in the LICENSE file that 67.16 + * accompanied this code). 67.17 + * 67.18 + * You should have received a copy of the GNU General Public License version 67.19 + * 2 along with this work; if not, write to the Free Software Foundation, 67.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 67.21 + * 67.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 67.23 + * or visit www.oracle.com if you need additional information or have any 67.24 + * questions. 67.25 + * 67.26 + */ 67.27 + 67.28 +/* 67.29 + * @test 67.30 + * @bug 8228888 67.31 + * @summary Test PhaseIdealLoop::has_local_phi_input() with phi input with non-dominating control. 67.32 + * @compile StrangeControl.jasm 67.33 + * @run main/othervm -Xbatch -XX:CompileCommand=inline,compiler.loopopts.StrangeControl::test 67.34 + * compiler.loopopts.TestStrangeControl 67.35 + */ 67.36 + 67.37 +package compiler.loopopts; 67.38 + 67.39 +public class TestStrangeControl { 67.40 + 67.41 + public static void main(String[] args) throws Exception { 67.42 + Thread thread = new Thread() { 67.43 + public void run() { 67.44 + // Run this in an own thread because it's basically an endless loop 67.45 + StrangeControl.test(42); 67.46 + } 67.47 + }; 67.48 + thread.start(); 67.49 + // Give thread executing strange control loop enough time to trigger OSR compilation 67.50 + Thread.sleep(4000); 67.51 + } 67.52 +}
68.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 68.2 +++ b/test/compiler/loopopts/superword/TestFuzzPreLoop.java Tue Feb 04 18:13:14 2020 +0800 68.3 @@ -0,0 +1,65 @@ 68.4 +/* 68.5 + * Copyright (c) 2019, Red Hat, Inc. All rights reserved. 68.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 68.7 + * 68.8 + * This code is free software; you can redistribute it and/or modify it 68.9 + * under the terms of the GNU General Public License version 2 only, as 68.10 + * published by the Free Software Foundation. 68.11 + * 68.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 68.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 68.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 68.15 + * version 2 for more details (a copy is included in the LICENSE file that 68.16 + * accompanied this code). 68.17 + * 68.18 + * You should have received a copy of the GNU General Public License version 68.19 + * 2 along with this work; if not, write to the Free Software Foundation, 68.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 68.21 + * 68.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 68.23 + * or visit www.oracle.com if you need additional information or have any 68.24 + * questions. 68.25 + */ 68.26 + 68.27 +/* 68.28 + * @test 68.29 + * @bug 8134739 8010500 68.30 + * @summary SEGV in SuperWord::get_pre_loop_end 68.31 + * @run main/othervm compiler.loopopts.superword.TestFuzzPreLoop 68.32 + */ 68.33 + 68.34 +package compiler.loopopts.superword; 68.35 + 68.36 +public class TestFuzzPreLoop { 68.37 + static Object sink; 68.38 + short sFld = -19206; 68.39 + 68.40 + void doTest() { 68.41 + int[] arr = new int[400]; 68.42 + 68.43 + for (int i1 = 0; i1 < 200; i1++) { 68.44 + for (int i2 = 0; i2 < 100; i2++) { 68.45 + sink = new int[400]; 68.46 + } 68.47 + arr[i1] = 0; 68.48 + } 68.49 + 68.50 + float f1 = 0; 68.51 + for (int i3 = 0; i3 < 200; i3++) { 68.52 + f1 += i3 * i3; 68.53 + } 68.54 + for (int i4 = 0; i4 < 200; i4++) { 68.55 + f1 += i4 - sFld; 68.56 + } 68.57 + 68.58 + System.out.println(arr); 68.59 + System.out.println(f1); 68.60 + } 68.61 + 68.62 + public static void main(String... args) throws Exception { 68.63 + TestFuzzPreLoop test = new TestFuzzPreLoop(); 68.64 + for (int i = 0; i < 100; i++) { 68.65 + test.doTest(); 68.66 + } 68.67 + } 68.68 +}
69.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 69.2 +++ b/test/compiler/print/TestProfileReturnTypePrinting.java Tue Feb 04 18:13:14 2020 +0800 69.3 @@ -0,0 +1,68 @@ 69.4 +/* 69.5 + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. 69.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 69.7 + * 69.8 + * This code is free software; you can redistribute it and/or modify it 69.9 + * under the terms of the GNU General Public License version 2 only, as 69.10 + * published by the Free Software Foundation. 69.11 + * 69.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 69.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 69.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 69.15 + * version 2 for more details (a copy is included in the LICENSE file that 69.16 + * accompanied this code). 69.17 + * 69.18 + * You should have received a copy of the GNU General Public License version 69.19 + * 2 along with this work; if not, write to the Free Software Foundation, 69.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 69.21 + * 69.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 69.23 + * or visit www.oracle.com if you need additional information or have any 69.24 + * questions. 69.25 + */ 69.26 + 69.27 +/** 69.28 + * @test 69.29 + * @bug 8073154 69.30 + * @build TestProfileReturnTypePrinting 69.31 + * @run main/othervm -XX:TypeProfileLevel=020 69.32 + * -XX:CompileOnly=TestProfileReturnTypePrinting.testMethod 69.33 + * -XX:+IgnoreUnrecognizedVMOptions -XX:+PrintLIR 69.34 + * TestProfileReturnTypePrinting 69.35 + * @summary Verify that c1's LIR that contains ProfileType node could be dumped 69.36 + * without a crash disregard to an exact class knowledge. 69.37 + */ 69.38 +public class TestProfileReturnTypePrinting { 69.39 + private static final int ITERATIONS = 1_000_000; 69.40 + 69.41 + public static void main(String args[]) { 69.42 + for (int i = 0; i < ITERATIONS; i++) { 69.43 + TestProfileReturnTypePrinting.testMethod(i); 69.44 + } 69.45 + } 69.46 + 69.47 + private static int testMethod(int i) { 69.48 + return TestProfileReturnTypePrinting.foo().hashCode() 69.49 + + TestProfileReturnTypePrinting.bar(i).hashCode(); 69.50 + } 69.51 + 69.52 + /* Exact class of returned value is known statically. */ 69.53 + private static B foo() { 69.54 + return new B(); 69.55 + } 69.56 + 69.57 + /* Exact class of returned value is not known statically. */ 69.58 + private static Object bar(int i) { 69.59 + if (i % 2 == 0) { 69.60 + return new A(); 69.61 + } else { 69.62 + return new B(); 69.63 + } 69.64 + } 69.65 + 69.66 + private static class A { 69.67 + } 69.68 + 69.69 + private static class B extends A { 69.70 + } 69.71 +}
70.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 70.2 +++ b/test/gc/stress/gclocker/TestExcessGCLockerCollections.java Tue Feb 04 18:13:14 2020 +0800 70.3 @@ -0,0 +1,285 @@ 70.4 +/* 70.5 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 70.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 70.7 + * 70.8 + * This code is free software; you can redistribute it and/or modify it 70.9 + * under the terms of the GNU General Public License version 2 only, as 70.10 + * published by the Free Software Foundation. 70.11 + * 70.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 70.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 70.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 70.15 + * version 2 for more details (a copy is included in the LICENSE file that 70.16 + * accompanied this code). 70.17 + * 70.18 + * You should have received a copy of the GNU General Public License version 70.19 + * 2 along with this work; if not, write to the Free Software Foundation, 70.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 70.21 + * 70.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 70.23 + * or visit www.oracle.com if you need additional information or have any 70.24 + * questions. 70.25 + */ 70.26 + 70.27 +package gc.stress.gclocker; 70.28 + 70.29 +// Based on Kim Barrett;s test for JDK-8048556 70.30 + 70.31 +/* 70.32 + * @test TestExcessGCLockerCollections 70.33 + * @key gc 70.34 + * @bug 8048556 70.35 + * @summary Check for GC Locker initiated GCs that immediately follow another 70.36 + * GC and so have very little needing to be collected. 70.37 + * @library /testlibrary 70.38 + * @run driver/timeout=1000 gc.stress.gclocker.TestExcessGCLockerCollections 300 4 2 70.39 + */ 70.40 + 70.41 +import java.util.HashMap; 70.42 +import java.util.Map; 70.43 + 70.44 +import java.util.zip.Deflater; 70.45 + 70.46 +import java.util.ArrayList; 70.47 +import java.util.Arrays; 70.48 + 70.49 +import javax.management.MBeanServer; 70.50 +import javax.management.Notification; 70.51 +import javax.management.NotificationListener; 70.52 +import javax.management.openmbean.CompositeData; 70.53 +import java.lang.management.ManagementFactory; 70.54 +import java.lang.management.GarbageCollectorMXBean; 70.55 +import java.lang.management.MemoryUsage; 70.56 +import java.util.List; 70.57 +import com.sun.management.GarbageCollectionNotificationInfo; 70.58 +import com.sun.management.GcInfo; 70.59 + 70.60 +import com.oracle.java.testlibrary.Asserts; 70.61 +import com.oracle.java.testlibrary.ProcessTools; 70.62 +import com.oracle.java.testlibrary.OutputAnalyzer; 70.63 + 70.64 +class TestExcessGCLockerCollectionsStringConstants { 70.65 + // Some constant strings used in both GC logging and error detection 70.66 + static public final String GCLOCKER_CAUSE = "GCLocker Initiated GC"; 70.67 + static public final String USED_TOO_LOW = "TOO LOW"; 70.68 + static public final String USED_OK = "OK"; 70.69 +} 70.70 + 70.71 +class TestExcessGCLockerCollectionsAux { 70.72 + static private final int LARGE_MAP_SIZE = 64 * 1024; 70.73 + 70.74 + static private final int MAP_ARRAY_LENGTH = 4; 70.75 + static private final int MAP_SIZE = 1024; 70.76 + 70.77 + static private final int BYTE_ARRAY_LENGTH = 128 * 1024; 70.78 + 70.79 + static private void println(String str) { System.out.println(str); } 70.80 + static private void println() { System.out.println(); } 70.81 + 70.82 + static private volatile boolean keepRunning = true; 70.83 + 70.84 + static Map<Integer,String> populateMap(int size) { 70.85 + Map<Integer,String> map = new HashMap<Integer,String>(); 70.86 + for (int i = 0; i < size; i += 1) { 70.87 + Integer keyInt = Integer.valueOf(i); 70.88 + String valStr = "value is [" + i + "]"; 70.89 + map.put(keyInt,valStr); 70.90 + } 70.91 + return map; 70.92 + } 70.93 + 70.94 + static private class AllocatingWorker implements Runnable { 70.95 + private final Object[] array = new Object[MAP_ARRAY_LENGTH]; 70.96 + private int arrayIndex = 0; 70.97 + 70.98 + private void doStep() { 70.99 + Map<Integer,String> map = populateMap(MAP_SIZE); 70.100 + array[arrayIndex] = map; 70.101 + arrayIndex = (arrayIndex + 1) % MAP_ARRAY_LENGTH; 70.102 + } 70.103 + 70.104 + public void run() { 70.105 + while (keepRunning) { 70.106 + doStep(); 70.107 + } 70.108 + } 70.109 + } 70.110 + 70.111 + static private class JNICriticalWorker implements Runnable { 70.112 + private int count; 70.113 + 70.114 + private void doStep() { 70.115 + byte[] inputArray = new byte[BYTE_ARRAY_LENGTH]; 70.116 + for (int i = 0; i < inputArray.length; i += 1) { 70.117 + inputArray[i] = (byte) (count + i); 70.118 + } 70.119 + 70.120 + Deflater deflater = new Deflater(); 70.121 + deflater.setInput(inputArray); 70.122 + deflater.finish(); 70.123 + 70.124 + byte[] outputArray = new byte[2 * inputArray.length]; 70.125 + deflater.deflate(outputArray); 70.126 + 70.127 + count += 1; 70.128 + } 70.129 + 70.130 + public void run() { 70.131 + while (keepRunning) { 70.132 + doStep(); 70.133 + } 70.134 + } 70.135 + } 70.136 + 70.137 + static class GCNotificationListener implements NotificationListener { 70.138 + static private final double MIN_USED_PERCENT = 40.0; 70.139 + 70.140 + static private final List<String> newGenPoolNames = Arrays.asList( 70.141 + "G1 Eden Space", // OpenJDK G1GC: -XX:+UseG1GC 70.142 + "PS Eden Space", // OpenJDK ParallelGC: -XX:+ParallelGC 70.143 + "Par Eden Space", // OpenJDK ConcMarkSweepGC: -XX:+ConcMarkSweepGC 70.144 + "Eden Space" // OpenJDK SerialGC: -XX:+UseSerialGC 70.145 + // OpenJDK ConcMarkSweepGC: -XX:+ConcMarkSweepGC -XX:-UseParNewGC 70.146 + ); 70.147 + 70.148 + @Override 70.149 + public void handleNotification(Notification notification, Object handback) { 70.150 + try { 70.151 + if (notification.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) { 70.152 + GarbageCollectionNotificationInfo info = 70.153 + GarbageCollectionNotificationInfo.from((CompositeData) notification.getUserData()); 70.154 + 70.155 + String gc_cause = info.getGcCause(); 70.156 + 70.157 + if (gc_cause.equals(TestExcessGCLockerCollectionsStringConstants.GCLOCKER_CAUSE)) { 70.158 + Map<String, MemoryUsage> memory_before_gc = info.getGcInfo().getMemoryUsageBeforeGc(); 70.159 + 70.160 + for (String newGenPoolName : newGenPoolNames) { 70.161 + MemoryUsage usage = memory_before_gc.get(newGenPoolName); 70.162 + if (usage == null) continue; 70.163 + 70.164 + double startTime = ((double) info.getGcInfo().getStartTime()) / 1000.0; 70.165 + long used = usage.getUsed(); 70.166 + long committed = usage.getCommitted(); 70.167 + long max = usage.getMax(); 70.168 + double used_percent = (((double) used) / Math.max(committed, max)) * 100.0; 70.169 + 70.170 + System.out.printf("%6.3f: (%s) %d/%d/%d, %8.4f%% (%s)\n", 70.171 + startTime, gc_cause, used, committed, max, used_percent, 70.172 + ((used_percent < MIN_USED_PERCENT) ? TestExcessGCLockerCollectionsStringConstants.USED_TOO_LOW 70.173 + : TestExcessGCLockerCollectionsStringConstants.USED_OK)); 70.174 + } 70.175 + } 70.176 + } 70.177 + } catch (RuntimeException ex) { 70.178 + System.err.println("Exception during notification processing:" + ex); 70.179 + ex.printStackTrace(); 70.180 + } 70.181 + } 70.182 + 70.183 + public static boolean register() { 70.184 + try { 70.185 + MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); 70.186 + 70.187 + // Get the list of MX 70.188 + List<GarbageCollectorMXBean> gc_mxbeans = ManagementFactory.getGarbageCollectorMXBeans(); 70.189 + 70.190 + // Create the notification listener 70.191 + GCNotificationListener gcNotificationListener = new GCNotificationListener(); 70.192 + 70.193 + for (GarbageCollectorMXBean gcbean : gc_mxbeans) { 70.194 + // Add notification listener for the MXBean 70.195 + mbeanServer.addNotificationListener(gcbean.getObjectName(), gcNotificationListener, null, null); 70.196 + } 70.197 + } catch (Exception ex) { 70.198 + System.err.println("Exception during mbean registration:" + ex); 70.199 + ex.printStackTrace(); 70.200 + // We've failed to set up, terminate 70.201 + return false; 70.202 + } 70.203 + 70.204 + return true; 70.205 + } 70.206 + } 70.207 + 70.208 + static public Map<Integer,String> largeMap; 70.209 + 70.210 + static public void main(String args[]) { 70.211 + long durationSec = Long.parseLong(args[0]); 70.212 + int allocThreadNum = Integer.parseInt(args[1]); 70.213 + int jniCriticalThreadNum = Integer.parseInt(args[2]); 70.214 + 70.215 + println("Running for " + durationSec + " secs"); 70.216 + 70.217 + if (!GCNotificationListener.register()) { 70.218 + println("failed to register GC notification listener"); 70.219 + System.exit(-1); 70.220 + } 70.221 + 70.222 + largeMap = populateMap(LARGE_MAP_SIZE); 70.223 + 70.224 + println("Starting " + allocThreadNum + " allocating threads"); 70.225 + for (int i = 0; i < allocThreadNum; i += 1) { 70.226 + new Thread(new AllocatingWorker()).start(); 70.227 + } 70.228 + 70.229 + println("Starting " + jniCriticalThreadNum + " jni critical threads"); 70.230 + for (int i = 0; i < jniCriticalThreadNum; i += 1) { 70.231 + new Thread(new JNICriticalWorker()).start(); 70.232 + } 70.233 + 70.234 + long durationMS = (long) (1000 * durationSec); 70.235 + long start = System.currentTimeMillis(); 70.236 + long now = start; 70.237 + long soFar = now - start; 70.238 + while (soFar < durationMS) { 70.239 + try { 70.240 + Thread.sleep(durationMS - soFar); 70.241 + } catch (Exception e) { 70.242 + } 70.243 + now = System.currentTimeMillis(); 70.244 + soFar = now - start; 70.245 + } 70.246 + println("Done."); 70.247 + keepRunning = false; 70.248 + } 70.249 +} 70.250 + 70.251 +public class TestExcessGCLockerCollections { 70.252 + private static final String USED_OK_LINE = 70.253 + "\\(" + TestExcessGCLockerCollectionsStringConstants.GCLOCKER_CAUSE + "\\)" 70.254 + + " .* " + 70.255 + "\\(" + TestExcessGCLockerCollectionsStringConstants.USED_OK + "\\)"; 70.256 + private static final String USED_TOO_LOW_LINE = 70.257 + "\\(" + TestExcessGCLockerCollectionsStringConstants.GCLOCKER_CAUSE + "\\)" 70.258 + + " .* " + 70.259 + "\\(" + TestExcessGCLockerCollectionsStringConstants.USED_TOO_LOW + "\\)"; 70.260 + 70.261 + private static final String[] COMMON_OPTIONS = new String[] { 70.262 + "-Xmx1G", "-Xms1G", "-Xmn256M" }; 70.263 + 70.264 + public static void main(String args[]) throws Exception { 70.265 + if (args.length < 3) { 70.266 + System.out.println("usage: TestExcessGCLockerCollections" + 70.267 + " <duration sec> <alloc threads>" + 70.268 + " <jni critical threads>"); 70.269 + throw new RuntimeException("Invalid arguments"); 70.270 + } 70.271 + 70.272 + ArrayList<String> finalArgs = new ArrayList<String>(); 70.273 + finalArgs.addAll(Arrays.asList(COMMON_OPTIONS)); 70.274 + finalArgs.add(TestExcessGCLockerCollectionsAux.class.getName()); 70.275 + finalArgs.addAll(Arrays.asList(args)); 70.276 + 70.277 + // GC and other options obtained from test framework. 70.278 + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( 70.279 + true, finalArgs.toArray(new String[0])); 70.280 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 70.281 + output.shouldHaveExitValue(0); 70.282 + //System.out.println("------------- begin stdout ----------------"); 70.283 + //System.out.println(output.getStdout()); 70.284 + //System.out.println("------------- end stdout ----------------"); 70.285 + output.stdoutShouldMatch(USED_OK_LINE); 70.286 + output.stdoutShouldNotMatch(USED_TOO_LOW_LINE); 70.287 + } 70.288 +}
71.1 --- a/test/runtime/8003720/VictimClassLoader.java Tue Feb 04 17:38:01 2020 +0800 71.2 +++ b/test/runtime/8003720/VictimClassLoader.java Tue Feb 04 18:13:14 2020 +0800 71.3 @@ -22,6 +22,8 @@ 71.4 * 71.5 */ 71.6 71.7 +import sun.misc.IOUtils; 71.8 + 71.9 public class VictimClassLoader extends ClassLoader { 71.10 public static long counter = 0; 71.11 71.12 @@ -72,8 +74,10 @@ 71.13 } 71.14 71.15 static byte[] readFully(java.io.InputStream in, int len) throws java.io.IOException { 71.16 - // Warning here: 71.17 - return sun.misc.IOUtils.readFully(in, len, true); 71.18 + byte[] b = IOUtils.readAllBytes(in); 71.19 + if (len != -1 && b.length != len) 71.20 + throw new java.io.IOException("Expected:" + len + ", actual:" + b.length); 71.21 + return b; 71.22 } 71.23 71.24 public void finalize() {