Fri, 16 Aug 2013 10:06:58 -0700
Merge
src/share/vm/utilities/globalDefinitions.hpp | file | annotate | diff | comparison | revisions |
1.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Fri Aug 16 04:24:07 2013 -0700 1.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Fri Aug 16 10:06:58 2013 -0700 1.3 @@ -1,5 +1,5 @@ 1.4 /* 1.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 1.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 1.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.8 * 1.9 * This code is free software; you can redistribute it and/or modify it 1.10 @@ -29,6 +29,7 @@ 1.11 #include "interpreter/interpreter.hpp" 1.12 #include "memory/cardTableModRefBS.hpp" 1.13 #include "memory/resourceArea.hpp" 1.14 +#include "memory/universe.hpp" 1.15 #include "prims/methodHandles.hpp" 1.16 #include "runtime/biasedLocking.hpp" 1.17 #include "runtime/interfaceSupport.hpp" 1.18 @@ -1145,7 +1146,7 @@ 1.19 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1.20 int klass_index = oop_recorder()->find_index(k); 1.21 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1.22 - narrowOop encoded_k = oopDesc::encode_klass(k); 1.23 + narrowOop encoded_k = Klass::encode_klass(k); 1.24 1.25 assert_not_delayed(); 1.26 // Relocation with special format (see relocInfo_sparc.hpp). 1.27 @@ -1419,7 +1420,6 @@ 1.28 load_klass(O0_obj, O0_obj); 1.29 // assert((klass != NULL) 1.30 br_null_short(O0_obj, pn, fail); 1.31 - // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers 1.32 1.33 wrccr( O5_save_flags ); // Restore CCR's 1.34 1.35 @@ -4089,52 +4089,91 @@ 1.36 } 1.37 1.38 void MacroAssembler::encode_klass_not_null(Register r) { 1.39 - assert(Metaspace::is_initialized(), "metaspace should be initialized"); 1.40 assert (UseCompressedKlassPointers, "must be compressed"); 1.41 - assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 1.42 - if (Universe::narrow_klass_base() != NULL) 1.43 - sub(r, G6_heapbase, r); 1.44 - srlx(r, LogKlassAlignmentInBytes, r); 1.45 + assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); 1.46 + assert(r != G6_heapbase, "bad register choice"); 1.47 + set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 1.48 + sub(r, G6_heapbase, r); 1.49 + if (Universe::narrow_klass_shift() != 0) { 1.50 + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 1.51 + srlx(r, LogKlassAlignmentInBytes, r); 1.52 + } 1.53 + reinit_heapbase(); 1.54 } 1.55 1.56 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 1.57 - assert(Metaspace::is_initialized(), "metaspace should be initialized"); 1.58 - assert (UseCompressedKlassPointers, "must be compressed"); 1.59 - assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 1.60 - if (Universe::narrow_klass_base() == NULL) { 1.61 - srlx(src, LogKlassAlignmentInBytes, dst); 1.62 + if (src == dst) { 1.63 + encode_klass_not_null(src); 1.64 } else { 1.65 - sub(src, G6_heapbase, dst); 1.66 - srlx(dst, LogKlassAlignmentInBytes, dst); 1.67 + assert (UseCompressedKlassPointers, "must be compressed"); 1.68 + assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); 1.69 + set((intptr_t)Universe::narrow_klass_base(), dst); 1.70 + sub(src, dst, dst); 1.71 + if (Universe::narrow_klass_shift() != 0) { 1.72 + srlx(dst, LogKlassAlignmentInBytes, dst); 1.73 + } 1.74 } 1.75 } 1.76 1.77 +// Function instr_size_for_decode_klass_not_null() counts the instructions 1.78 +// generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 1.79 +// the instructions they generate change, then this method needs to be updated. 1.80 +int MacroAssembler::instr_size_for_decode_klass_not_null() { 1.81 + assert (UseCompressedKlassPointers, "only for compressed klass ptrs"); 1.82 + // set + add + set 1.83 + int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 + 1.84 + insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); 1.85 + if (Universe::narrow_klass_shift() == 0) { 1.86 + return num_instrs * BytesPerInstWord; 1.87 + } else { // sllx 1.88 + return (num_instrs + 1) * BytesPerInstWord; 1.89 + } 1.90 +} 1.91 + 1.92 +// !!! If the instructions that get generated here change then function 1.93 +// instr_size_for_decode_klass_not_null() needs to get updated. 1.94 void MacroAssembler::decode_klass_not_null(Register r) { 1.95 - assert(Metaspace::is_initialized(), "metaspace should be initialized"); 1.96 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 1.97 // pd_code_size_limit. 1.98 assert (UseCompressedKlassPointers, "must be compressed"); 1.99 - assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 1.100 - sllx(r, LogKlassAlignmentInBytes, r); 1.101 - if (Universe::narrow_klass_base() != NULL) 1.102 - add(r, G6_heapbase, r); 1.103 + assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); 1.104 + assert(r != G6_heapbase, "bad register choice"); 1.105 + set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 1.106 + if (Universe::narrow_klass_shift() != 0) 1.107 + sllx(r, LogKlassAlignmentInBytes, r); 1.108 + add(r, G6_heapbase, r); 1.109 + reinit_heapbase(); 1.110 } 1.111 1.112 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 1.113 - assert(Metaspace::is_initialized(), "metaspace should be initialized"); 1.114 - // Do not add assert code to this unless you change vtableStubs_sparc.cpp 1.115 - // pd_code_size_limit. 1.116 - assert (UseCompressedKlassPointers, "must be compressed"); 1.117 - assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 1.118 - sllx(src, LogKlassAlignmentInBytes, dst); 1.119 - if (Universe::narrow_klass_base() != NULL) 1.120 - add(dst, G6_heapbase, dst); 1.121 + if (src == dst) { 1.122 + decode_klass_not_null(src); 1.123 + } else { 1.124 + // Do not add assert code to this unless you change vtableStubs_sparc.cpp 1.125 + // pd_code_size_limit. 1.126 + assert (UseCompressedKlassPointers, "must be compressed"); 1.127 + assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); 1.128 + if (Universe::narrow_klass_shift() != 0) { 1.129 + assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 1.130 + set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); 1.131 + sllx(src, LogKlassAlignmentInBytes, dst); 1.132 + add(dst, G6_heapbase, dst); 1.133 + reinit_heapbase(); 1.134 + } else { 1.135 + set((intptr_t)Universe::narrow_klass_base(), dst); 1.136 + add(src, dst, dst); 1.137 + } 1.138 + } 1.139 } 1.140 1.141 void MacroAssembler::reinit_heapbase() { 1.142 if (UseCompressedOops || UseCompressedKlassPointers) { 1.143 - AddressLiteral base(Universe::narrow_ptrs_base_addr()); 1.144 - load_ptr_contents(base, G6_heapbase); 1.145 + if (Universe::heap() != NULL) { 1.146 + set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); 1.147 + } else { 1.148 + AddressLiteral base(Universe::narrow_ptrs_base_addr()); 1.149 + load_ptr_contents(base, G6_heapbase); 1.150 + } 1.151 } 1.152 } 1.153
2.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp Fri Aug 16 04:24:07 2013 -0700 2.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp Fri Aug 16 10:06:58 2013 -0700 2.3 @@ -1177,6 +1177,9 @@ 2.4 void push_CPU_state(); 2.5 void pop_CPU_state(); 2.6 2.7 + // Returns the byte size of the instructions generated by decode_klass_not_null(). 2.8 + static int instr_size_for_decode_klass_not_null(); 2.9 + 2.10 // if heap base register is used - reinit it with the correct value 2.11 void reinit_heapbase(); 2.12
3.1 --- a/src/cpu/sparc/vm/relocInfo_sparc.cpp Fri Aug 16 04:24:07 2013 -0700 3.2 +++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp Fri Aug 16 10:06:58 2013 -0700 3.3 @@ -1,5 +1,5 @@ 3.4 /* 3.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. 3.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 3.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3.8 * 3.9 * This code is free software; you can redistribute it and/or modify it 3.10 @@ -97,7 +97,7 @@ 3.11 guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi"); 3.12 if (format() != 0) { 3.13 assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case"); 3.14 - jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : oopDesc::encode_klass((Klass*)x); 3.15 + jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x); 3.16 inst &= ~Assembler::hi22(-1); 3.17 inst |= Assembler::hi22((intptr_t)np); 3.18 if (verify_only) {
4.1 --- a/src/cpu/sparc/vm/sparc.ad Fri Aug 16 04:24:07 2013 -0700 4.2 +++ b/src/cpu/sparc/vm/sparc.ad Fri Aug 16 10:06:58 2013 -0700 4.3 @@ -559,10 +559,7 @@ 4.4 int klass_load_size; 4.5 if (UseCompressedKlassPointers) { 4.6 assert(Universe::heap() != NULL, "java heap should be initialized"); 4.7 - if (Universe::narrow_klass_base() == NULL) 4.8 - klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass() 4.9 - else 4.10 - klass_load_size = 3*BytesPerInstWord; 4.11 + klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 4.12 } else { 4.13 klass_load_size = 1*BytesPerInstWord; 4.14 } 4.15 @@ -1663,9 +1660,12 @@ 4.16 if (UseCompressedKlassPointers) { 4.17 assert(Universe::heap() != NULL, "java heap should be initialized"); 4.18 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); 4.19 - st->print_cr("\tSLL R_G5,3,R_G5"); 4.20 - if (Universe::narrow_klass_base() != NULL) 4.21 - st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); 4.22 + st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); 4.23 + if (Universe::narrow_klass_shift() != 0) { 4.24 + st->print_cr("\tSLL R_G5,3,R_G5"); 4.25 + } 4.26 + st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); 4.27 + st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base"); 4.28 } else { 4.29 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 4.30 } 4.31 @@ -2563,10 +2563,7 @@ 4.32 int klass_load_size; 4.33 if (UseCompressedKlassPointers) { 4.34 assert(Universe::heap() != NULL, "java heap should be initialized"); 4.35 - if (Universe::narrow_klass_base() == NULL) 4.36 - klass_load_size = 2*BytesPerInstWord; 4.37 - else 4.38 - klass_load_size = 3*BytesPerInstWord; 4.39 + klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 4.40 } else { 4.41 klass_load_size = 1*BytesPerInstWord; 4.42 }
5.1 --- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp Fri Aug 16 04:24:07 2013 -0700 5.2 +++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp Fri Aug 16 10:06:58 2013 -0700 5.3 @@ -1,5 +1,5 @@ 5.4 /* 5.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 5.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 5.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5.8 * 5.9 * This code is free software; you can redistribute it and/or modify it 5.10 @@ -219,13 +219,13 @@ 5.11 const int basic = 5*BytesPerInstWord + 5.12 // shift;add for load_klass (only shift with zero heap based) 5.13 (UseCompressedKlassPointers ? 5.14 - ((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0); 5.15 + MacroAssembler::instr_size_for_decode_klass_not_null() : 0); 5.16 return basic + slop; 5.17 } else { 5.18 const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord + 5.19 // shift;add for load_klass (only shift with zero heap based) 5.20 (UseCompressedKlassPointers ? 5.21 - ((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0); 5.22 + MacroAssembler::instr_size_for_decode_klass_not_null() : 0); 5.23 return (basic + slop); 5.24 } 5.25 }
6.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Aug 16 04:24:07 2013 -0700 6.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Aug 16 10:06:58 2013 -0700 6.3 @@ -30,6 +30,7 @@ 6.4 #include "interpreter/interpreter.hpp" 6.5 #include "memory/cardTableModRefBS.hpp" 6.6 #include "memory/resourceArea.hpp" 6.7 +#include "memory/universe.hpp" 6.8 #include "prims/methodHandles.hpp" 6.9 #include "runtime/biasedLocking.hpp" 6.10 #include "runtime/interfaceSupport.hpp" 6.11 @@ -4810,23 +4811,8 @@ 6.12 } 6.13 6.14 void MacroAssembler::load_prototype_header(Register dst, Register src) { 6.15 -#ifdef _LP64 6.16 - if (UseCompressedKlassPointers) { 6.17 - assert (Universe::heap() != NULL, "java heap should be initialized"); 6.18 - movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6.19 - if (Universe::narrow_klass_shift() != 0) { 6.20 - assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6.21 - assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 6.22 - movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset())); 6.23 - } else { 6.24 - movq(dst, Address(dst, Klass::prototype_header_offset())); 6.25 - } 6.26 - } else 6.27 -#endif 6.28 - { 6.29 - movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6.30 - movptr(dst, Address(dst, Klass::prototype_header_offset())); 6.31 - } 6.32 + load_klass(dst, src); 6.33 + movptr(dst, Address(dst, Klass::prototype_header_offset())); 6.34 } 6.35 6.36 void MacroAssembler::store_klass(Register dst, Register src) { 6.37 @@ -4914,7 +4900,7 @@ 6.38 6.39 #ifdef ASSERT 6.40 void MacroAssembler::verify_heapbase(const char* msg) { 6.41 - assert (UseCompressedOops || UseCompressedKlassPointers, "should be compressed"); 6.42 + assert (UseCompressedOops, "should be compressed"); 6.43 assert (Universe::heap() != NULL, "java heap should be initialized"); 6.44 if (CheckCompressedOops) { 6.45 Label ok; 6.46 @@ -5058,69 +5044,80 @@ 6.47 } 6.48 6.49 void MacroAssembler::encode_klass_not_null(Register r) { 6.50 - assert(Metaspace::is_initialized(), "metaspace should be initialized"); 6.51 -#ifdef ASSERT 6.52 - verify_heapbase("MacroAssembler::encode_klass_not_null: heap base corrupted?"); 6.53 -#endif 6.54 - if (Universe::narrow_klass_base() != NULL) { 6.55 - subq(r, r12_heapbase); 6.56 - } 6.57 + assert(Universe::narrow_klass_base() != NULL, "Base should be initialized"); 6.58 + // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. 6.59 + assert(r != r12_heapbase, "Encoding a klass in r12"); 6.60 + mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); 6.61 + subq(r, r12_heapbase); 6.62 if (Universe::narrow_klass_shift() != 0) { 6.63 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6.64 shrq(r, LogKlassAlignmentInBytes); 6.65 } 6.66 + reinit_heapbase(); 6.67 } 6.68 6.69 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 6.70 - assert(Metaspace::is_initialized(), "metaspace should be initialized"); 6.71 -#ifdef ASSERT 6.72 - verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?"); 6.73 -#endif 6.74 - if (dst != src) { 6.75 - movq(dst, src); 6.76 - } 6.77 - if (Universe::narrow_klass_base() != NULL) { 6.78 - subq(dst, r12_heapbase); 6.79 - } 6.80 - if (Universe::narrow_klass_shift() != 0) { 6.81 - assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6.82 - shrq(dst, LogKlassAlignmentInBytes); 6.83 - } 6.84 -} 6.85 - 6.86 + if (dst == src) { 6.87 + encode_klass_not_null(src); 6.88 + } else { 6.89 + mov64(dst, (int64_t)Universe::narrow_klass_base()); 6.90 + negq(dst); 6.91 + addq(dst, src); 6.92 + if (Universe::narrow_klass_shift() != 0) { 6.93 + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6.94 + shrq(dst, LogKlassAlignmentInBytes); 6.95 + } 6.96 + } 6.97 +} 6.98 + 6.99 +// Function instr_size_for_decode_klass_not_null() counts the instructions 6.100 +// generated by decode_klass_not_null(register r) and reinit_heapbase(), 6.101 +// when (Universe::heap() != NULL). Hence, if the instructions they 6.102 +// generate change, then this method needs to be updated. 6.103 +int MacroAssembler::instr_size_for_decode_klass_not_null() { 6.104 + assert (UseCompressedKlassPointers, "only for compressed klass ptrs"); 6.105 + // mov64 + addq + shlq? + mov64 (for reinit_heapbase()). 6.106 + return (Universe::narrow_klass_shift() == 0 ? 20 : 24); 6.107 +} 6.108 + 6.109 +// !!! If the instructions that get generated here change then function 6.110 +// instr_size_for_decode_klass_not_null() needs to get updated. 6.111 void MacroAssembler::decode_klass_not_null(Register r) { 6.112 - assert(Metaspace::is_initialized(), "metaspace should be initialized"); 6.113 // Note: it will change flags 6.114 + assert(Universe::narrow_klass_base() != NULL, "Base should be initialized"); 6.115 assert (UseCompressedKlassPointers, "should only be used for compressed headers"); 6.116 + assert(r != r12_heapbase, "Decoding a klass in r12"); 6.117 // Cannot assert, unverified entry point counts instructions (see .ad file) 6.118 // vtableStubs also counts instructions in pd_code_size_limit. 6.119 // Also do not verify_oop as this is called by verify_oop. 6.120 if (Universe::narrow_klass_shift() != 0) { 6.121 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6.122 shlq(r, LogKlassAlignmentInBytes); 6.123 - if (Universe::narrow_klass_base() != NULL) { 6.124 - addq(r, r12_heapbase); 6.125 - } 6.126 + } 6.127 + // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. 6.128 + mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); 6.129 + addq(r, r12_heapbase); 6.130 + reinit_heapbase(); 6.131 +} 6.132 + 6.133 +void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 6.134 + // Note: it will change flags 6.135 + assert(Universe::narrow_klass_base() != NULL, "Base should be initialized"); 6.136 + assert (UseCompressedKlassPointers, "should only be used for compressed headers"); 6.137 + if (dst == src) { 6.138 + decode_klass_not_null(dst); 6.139 } else { 6.140 - assert (Universe::narrow_klass_base() == NULL, "sanity"); 6.141 - } 6.142 -} 6.143 - 6.144 -void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 6.145 - assert(Metaspace::is_initialized(), "metaspace should be initialized"); 6.146 - // Note: it will change flags 6.147 - assert (UseCompressedKlassPointers, "should only be used for compressed headers"); 6.148 - // Cannot assert, unverified entry point counts instructions (see .ad file) 6.149 - // vtableStubs also counts instructions in pd_code_size_limit. 6.150 - // Also do not verify_oop as this is called by verify_oop. 6.151 - if (Universe::narrow_klass_shift() != 0) { 6.152 - assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6.153 - assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 6.154 - leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 6.155 - } else { 6.156 - assert (Universe::narrow_klass_base() == NULL, "sanity"); 6.157 - if (dst != src) { 6.158 - movq(dst, src); 6.159 + // Cannot assert, unverified entry point counts instructions (see .ad file) 6.160 + // vtableStubs also counts instructions in pd_code_size_limit. 6.161 + // Also do not verify_oop as this is called by verify_oop. 6.162 + 6.163 + mov64(dst, (int64_t)Universe::narrow_klass_base()); 6.164 + if (Universe::narrow_klass_shift() != 0) { 6.165 + assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6.166 + assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 6.167 + leaq(dst, Address(dst, src, Address::times_8, 0)); 6.168 + } else { 6.169 + addq(dst, src); 6.170 } 6.171 } 6.172 } 6.173 @@ -5148,7 +5145,7 @@ 6.174 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 6.175 int klass_index = oop_recorder()->find_index(k); 6.176 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6.177 - mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec); 6.178 + mov_narrow_oop(dst, Klass::encode_klass(k), rspec); 6.179 } 6.180 6.181 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 6.182 @@ -5156,7 +5153,7 @@ 6.183 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 6.184 int klass_index = oop_recorder()->find_index(k); 6.185 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6.186 - mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec); 6.187 + mov_narrow_oop(dst, Klass::encode_klass(k), rspec); 6.188 } 6.189 6.190 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 6.191 @@ -5182,7 +5179,7 @@ 6.192 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 6.193 int klass_index = oop_recorder()->find_index(k); 6.194 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6.195 - Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec); 6.196 + Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec); 6.197 } 6.198 6.199 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 6.200 @@ -5190,14 +5187,23 @@ 6.201 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 6.202 int klass_index = oop_recorder()->find_index(k); 6.203 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6.204 - Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec); 6.205 + Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec); 6.206 } 6.207 6.208 void MacroAssembler::reinit_heapbase() { 6.209 if (UseCompressedOops || UseCompressedKlassPointers) { 6.210 - movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 6.211 - } 6.212 -} 6.213 + if (Universe::heap() != NULL) { 6.214 + if (Universe::narrow_oop_base() == NULL) { 6.215 + MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 6.216 + } else { 6.217 + mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base()); 6.218 + } 6.219 + } else { 6.220 + movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 6.221 + } 6.222 + } 6.223 +} 6.224 + 6.225 #endif // _LP64 6.226 6.227
7.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp Fri Aug 16 04:24:07 2013 -0700 7.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp Fri Aug 16 10:06:58 2013 -0700 7.3 @@ -371,6 +371,10 @@ 7.4 void cmp_narrow_klass(Register dst, Klass* k); 7.5 void cmp_narrow_klass(Address dst, Klass* k); 7.6 7.7 + // Returns the byte size of the instructions generated by decode_klass_not_null() 7.8 + // when compressed klass pointers are being used. 7.9 + static int instr_size_for_decode_klass_not_null(); 7.10 + 7.11 // if heap base register is used - reinit it with the correct value 7.12 void reinit_heapbase(); 7.13
8.1 --- a/src/cpu/x86/vm/relocInfo_x86.cpp Fri Aug 16 04:24:07 2013 -0700 8.2 +++ b/src/cpu/x86/vm/relocInfo_x86.cpp Fri Aug 16 10:06:58 2013 -0700 8.3 @@ -1,5 +1,5 @@ 8.4 /* 8.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. 8.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 8.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 8.8 * 8.9 * This code is free software; you can redistribute it and/or modify it 8.10 @@ -55,9 +55,9 @@ 8.11 } 8.12 } else { 8.13 if (verify_only) { 8.14 - assert(*(uint32_t*) disp == oopDesc::encode_klass((Klass*)x), "instructions must match"); 8.15 + assert(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match"); 8.16 } else { 8.17 - *(int32_t*) disp = oopDesc::encode_klass((Klass*)x); 8.18 + *(int32_t*) disp = Klass::encode_klass((Klass*)x); 8.19 } 8.20 } 8.21 } else {
9.1 --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Fri Aug 16 04:24:07 2013 -0700 9.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Fri Aug 16 10:06:58 2013 -0700 9.3 @@ -675,7 +675,6 @@ 9.4 __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass 9.5 __ testptr(rax, rax); 9.6 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 9.7 - // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers 9.8 9.9 // return if everything seems ok 9.10 __ bind(exit);
10.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Fri Aug 16 04:24:07 2013 -0700 10.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Fri Aug 16 10:06:58 2013 -0700 10.3 @@ -1021,7 +1021,6 @@ 10.4 __ load_klass(rax, rax); // get klass 10.5 __ testptr(rax, rax); 10.6 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 10.7 - // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers 10.8 10.9 // return if everything seems ok 10.10 __ bind(exit);
11.1 --- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp Fri Aug 16 04:24:07 2013 -0700 11.2 +++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp Fri Aug 16 10:06:58 2013 -0700 11.3 @@ -211,11 +211,11 @@ 11.4 if (is_vtable_stub) { 11.5 // Vtable stub size 11.6 return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) + 11.7 - (UseCompressedKlassPointers ? 16 : 0); // 1 leaq can be 3 bytes + 1 long 11.8 + (UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0); 11.9 } else { 11.10 // Itable stub size 11.11 return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) + 11.12 - (UseCompressedKlassPointers ? 32 : 0); // 2 leaqs 11.13 + (UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0); 11.14 } 11.15 // In order to tune these parameters, run the JVM with VM options 11.16 // +PrintMiscellaneous and +WizardMode to see information about
12.1 --- a/src/cpu/x86/vm/x86_64.ad Fri Aug 16 04:24:07 2013 -0700 12.2 +++ b/src/cpu/x86/vm/x86_64.ad Fri Aug 16 10:06:58 2013 -0700 12.3 @@ -1393,9 +1393,7 @@ 12.4 { 12.5 if (UseCompressedKlassPointers) { 12.6 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); 12.7 - if (Universe::narrow_klass_shift() != 0) { 12.8 - st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1"); 12.9 - } 12.10 + st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1"); 12.11 st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check"); 12.12 } else { 12.13 st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t" 12.14 @@ -4035,146 +4033,6 @@ 12.15 %} 12.16 %} 12.17 12.18 -operand indirectNarrowKlass(rRegN reg) 12.19 -%{ 12.20 - predicate(Universe::narrow_klass_shift() == 0); 12.21 - constraint(ALLOC_IN_RC(ptr_reg)); 12.22 - match(DecodeNKlass reg); 12.23 - 12.24 - format %{ "[$reg]" %} 12.25 - interface(MEMORY_INTER) %{ 12.26 - base($reg); 12.27 - index(0x4); 12.28 - scale(0x0); 12.29 - disp(0x0); 12.30 - %} 12.31 -%} 12.32 - 12.33 -operand indOffset8NarrowKlass(rRegN reg, immL8 off) 12.34 -%{ 12.35 - predicate(Universe::narrow_klass_shift() == 0); 12.36 - constraint(ALLOC_IN_RC(ptr_reg)); 12.37 - match(AddP (DecodeNKlass reg) off); 12.38 - 12.39 - format %{ "[$reg + $off (8-bit)]" %} 12.40 - interface(MEMORY_INTER) %{ 12.41 - base($reg); 12.42 - index(0x4); 12.43 - scale(0x0); 12.44 - disp($off); 12.45 - %} 12.46 -%} 12.47 - 12.48 -operand indOffset32NarrowKlass(rRegN reg, immL32 off) 12.49 -%{ 12.50 - predicate(Universe::narrow_klass_shift() == 0); 12.51 - constraint(ALLOC_IN_RC(ptr_reg)); 12.52 - match(AddP (DecodeNKlass reg) off); 12.53 - 12.54 - format %{ "[$reg + $off (32-bit)]" %} 12.55 - interface(MEMORY_INTER) %{ 12.56 - base($reg); 12.57 - index(0x4); 12.58 - scale(0x0); 12.59 - disp($off); 12.60 - %} 12.61 -%} 12.62 - 12.63 -operand indIndexOffsetNarrowKlass(rRegN reg, rRegL lreg, immL32 off) 12.64 -%{ 12.65 - predicate(Universe::narrow_klass_shift() == 0); 12.66 - constraint(ALLOC_IN_RC(ptr_reg)); 12.67 - match(AddP (AddP (DecodeNKlass reg) lreg) off); 12.68 - 12.69 - op_cost(10); 12.70 - format %{"[$reg + $off + $lreg]" %} 12.71 - interface(MEMORY_INTER) %{ 12.72 - base($reg); 12.73 - index($lreg); 12.74 - scale(0x0); 12.75 - disp($off); 12.76 - %} 12.77 -%} 12.78 - 12.79 -operand indIndexNarrowKlass(rRegN reg, rRegL lreg) 12.80 -%{ 12.81 - predicate(Universe::narrow_klass_shift() == 0); 12.82 - constraint(ALLOC_IN_RC(ptr_reg)); 12.83 - match(AddP (DecodeNKlass reg) lreg); 12.84 - 12.85 - op_cost(10); 12.86 - format %{"[$reg + $lreg]" %} 12.87 - interface(MEMORY_INTER) %{ 12.88 - base($reg); 12.89 - index($lreg); 12.90 - scale(0x0); 12.91 - disp(0x0); 12.92 - %} 12.93 -%} 12.94 - 12.95 -operand indIndexScaleNarrowKlass(rRegN reg, rRegL lreg, immI2 scale) 12.96 -%{ 12.97 - predicate(Universe::narrow_klass_shift() == 0); 12.98 - constraint(ALLOC_IN_RC(ptr_reg)); 12.99 - match(AddP (DecodeNKlass reg) (LShiftL lreg scale)); 12.100 - 12.101 - op_cost(10); 12.102 - format %{"[$reg + $lreg << $scale]" %} 12.103 - interface(MEMORY_INTER) %{ 12.104 - base($reg); 12.105 - index($lreg); 12.106 - scale($scale); 12.107 - disp(0x0); 12.108 - %} 12.109 -%} 12.110 - 12.111 -operand indIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegL lreg, immI2 scale) 12.112 -%{ 12.113 - predicate(Universe::narrow_klass_shift() == 0); 12.114 - constraint(ALLOC_IN_RC(ptr_reg)); 12.115 - match(AddP (AddP (DecodeNKlass reg) (LShiftL lreg scale)) off); 12.116 - 12.117 - op_cost(10); 12.118 - format %{"[$reg + $off + $lreg << $scale]" %} 12.119 - interface(MEMORY_INTER) %{ 12.120 - base($reg); 12.121 - index($lreg); 12.122 - scale($scale); 12.123 - disp($off); 12.124 - %} 12.125 -%} 12.126 - 12.127 -operand indCompressedKlassOffset(rRegN reg, immL32 off) %{ 12.128 - predicate(UseCompressedKlassPointers && (Universe::narrow_klass_shift() == Address::times_8)); 12.129 - constraint(ALLOC_IN_RC(ptr_reg)); 12.130 - match(AddP (DecodeNKlass reg) off); 12.131 - 12.132 - op_cost(10); 12.133 - format %{"[R12 + $reg << 3 + $off] (compressed klass addressing)" %} 12.134 - interface(MEMORY_INTER) %{ 12.135 - base(0xc); // R12 12.136 - index($reg); 12.137 - scale(0x3); 12.138 - disp($off); 12.139 - %} 12.140 -%} 12.141 - 12.142 -operand indPosIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegI idx, immI2 scale) 12.143 -%{ 12.144 - constraint(ALLOC_IN_RC(ptr_reg)); 12.145 - predicate(Universe::narrow_klass_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0); 12.146 - match(AddP (AddP (DecodeNKlass reg) (LShiftL (ConvI2L idx) scale)) off); 12.147 - 12.148 - op_cost(10); 12.149 - format %{"[$reg + $off + $idx << $scale]" %} 12.150 - interface(MEMORY_INTER) %{ 12.151 - base($reg); 12.152 - index($idx); 12.153 - scale($scale); 12.154 - disp($off); 12.155 - %} 12.156 -%} 12.157 - 12.158 //----------Special Memory Operands-------------------------------------------- 12.159 // Stack Slot Operand - This operand is used for loading and storing temporary 12.160 // values on the stack where a match requires a value to 12.161 @@ -4345,11 +4203,7 @@ 12.162 indCompressedOopOffset, 12.163 indirectNarrow, indOffset8Narrow, indOffset32Narrow, 12.164 indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow, 12.165 - indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow, 12.166 - indCompressedKlassOffset, 12.167 - indirectNarrowKlass, indOffset8NarrowKlass, indOffset32NarrowKlass, 12.168 - indIndexOffsetNarrowKlass, indIndexNarrowKlass, indIndexScaleNarrowKlass, 12.169 - indIndexScaleOffsetNarrowKlass, indPosIndexScaleOffsetNarrowKlass); 12.170 + indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow); 12.171 12.172 //----------PIPELINE----------------------------------------------------------- 12.173 // Rules which define the behavior of the target architectures pipeline. 12.174 @@ -6665,7 +6519,7 @@ 12.175 instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{ 12.176 match(Set dst (EncodePKlass src)); 12.177 effect(KILL cr); 12.178 - format %{ "encode_heap_oop_not_null $dst,$src" %} 12.179 + format %{ "encode_klass_not_null $dst,$src" %} 12.180 ins_encode %{ 12.181 __ encode_klass_not_null($dst$$Register, $src$$Register); 12.182 %} 12.183 @@ -6675,7 +6529,7 @@ 12.184 instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{ 12.185 match(Set dst (DecodeNKlass src)); 12.186 effect(KILL cr); 12.187 - format %{ "decode_heap_oop_not_null $dst,$src" %} 12.188 + format %{ "decode_klass_not_null $dst,$src" %} 12.189 ins_encode %{ 12.190 Register s = $src$$Register; 12.191 Register d = $dst$$Register;
13.1 --- a/src/share/vm/memory/filemap.cpp Fri Aug 16 04:24:07 2013 -0700 13.2 +++ b/src/share/vm/memory/filemap.cpp Fri Aug 16 10:06:58 2013 -0700 13.3 @@ -362,15 +362,12 @@ 13.4 ReservedSpace FileMapInfo::reserve_shared_memory() { 13.5 struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0]; 13.6 char* requested_addr = si->_base; 13.7 - size_t alignment = os::vm_allocation_granularity(); 13.8 13.9 - size_t size = align_size_up(SharedReadOnlySize + SharedReadWriteSize + 13.10 - SharedMiscDataSize + SharedMiscCodeSize, 13.11 - alignment); 13.12 + size_t size = FileMapInfo::shared_spaces_size(); 13.13 13.14 // Reserve the space first, then map otherwise map will go right over some 13.15 // other reserved memory (like the code cache). 13.16 - ReservedSpace rs(size, alignment, false, requested_addr); 13.17 + ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr); 13.18 if (!rs.is_reserved()) { 13.19 fail_continue(err_msg("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr)); 13.20 return rs; 13.21 @@ -559,3 +556,19 @@ 13.22 si->_base, si->_base + si->_used); 13.23 } 13.24 } 13.25 + 13.26 +// Unmap mapped regions of shared space. 13.27 +void FileMapInfo::stop_sharing_and_unmap(const char* msg) { 13.28 + FileMapInfo *map_info = FileMapInfo::current_info(); 13.29 + if (map_info) { 13.30 + map_info->fail_continue(msg); 13.31 + for (int i = 0; i < MetaspaceShared::n_regions; i++) { 13.32 + if (map_info->_header._space[i]._base != NULL) { 13.33 + map_info->unmap_region(i); 13.34 + map_info->_header._space[i]._base = NULL; 13.35 + } 13.36 + } 13.37 + } else if (DumpSharedSpaces) { 13.38 + fail_stop(msg, NULL); 13.39 + } 13.40 +}
14.1 --- a/src/share/vm/memory/filemap.hpp Fri Aug 16 04:24:07 2013 -0700 14.2 +++ b/src/share/vm/memory/filemap.hpp Fri Aug 16 10:06:58 2013 -0700 14.3 @@ -1,5 +1,5 @@ 14.4 /* 14.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 14.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 14.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 14.8 * 14.9 * This code is free software; you can redistribute it and/or modify it 14.10 @@ -150,6 +150,15 @@ 14.11 // Return true if given address is in the mapped shared space. 14.12 bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false); 14.13 void print_shared_spaces() NOT_CDS_RETURN; 14.14 + 14.15 + static size_t shared_spaces_size() { 14.16 + return align_size_up(SharedReadOnlySize + SharedReadWriteSize + 14.17 + SharedMiscDataSize + SharedMiscCodeSize, 14.18 + os::vm_allocation_granularity()); 14.19 + } 14.20 + 14.21 + // Stop CDS sharing and unmap CDS regions. 14.22 + static void stop_sharing_and_unmap(const char* msg); 14.23 }; 14.24 14.25 #endif // SHARE_VM_MEMORY_FILEMAP_HPP
15.1 --- a/src/share/vm/memory/heap.cpp Fri Aug 16 04:24:07 2013 -0700 15.2 +++ b/src/share/vm/memory/heap.cpp Fri Aug 16 10:06:58 2013 -0700 15.3 @@ -1,5 +1,5 @@ 15.4 /* 15.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 15.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 15.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 15.8 * 15.9 * This code is free software; you can redistribute it and/or modify it 15.10 @@ -118,9 +118,12 @@ 15.11 _number_of_committed_segments = size_to_segments(_memory.committed_size()); 15.12 _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); 15.13 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); 15.14 + const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity); 15.15 + const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment); 15.16 + const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments); 15.17 15.18 // reserve space for _segmap 15.19 - if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) { 15.20 + if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) { 15.21 return false; 15.22 } 15.23
16.1 --- a/src/share/vm/memory/metaspace.cpp Fri Aug 16 04:24:07 2013 -0700 16.2 +++ b/src/share/vm/memory/metaspace.cpp Fri Aug 16 10:06:58 2013 -0700 16.3 @@ -35,6 +35,7 @@ 16.4 #include "memory/resourceArea.hpp" 16.5 #include "memory/universe.hpp" 16.6 #include "runtime/globals.hpp" 16.7 +#include "runtime/java.hpp" 16.8 #include "runtime/mutex.hpp" 16.9 #include "runtime/orderAccess.hpp" 16.10 #include "services/memTracker.hpp" 16.11 @@ -54,6 +55,8 @@ 16.12 16.13 MetaWord* last_allocated = 0; 16.14 16.15 +size_t Metaspace::_class_metaspace_size; 16.16 + 16.17 // Used in declarations in SpaceManager and ChunkManager 16.18 enum ChunkIndex { 16.19 ZeroIndex = 0, 16.20 @@ -261,10 +264,6 @@ 16.21 // count of chunks contained in this VirtualSpace 16.22 uintx _container_count; 16.23 16.24 - // Convenience functions for logical bottom and end 16.25 - MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 16.26 - MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 16.27 - 16.28 // Convenience functions to access the _virtual_space 16.29 char* low() const { return virtual_space()->low(); } 16.30 char* high() const { return virtual_space()->high(); } 16.31 @@ -284,6 +283,10 @@ 16.32 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 16.33 ~VirtualSpaceNode(); 16.34 16.35 + // Convenience functions for logical bottom and end 16.36 + MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 16.37 + MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 16.38 + 16.39 // address of next available space in _virtual_space; 16.40 // Accessors 16.41 VirtualSpaceNode* next() { return _next; } 16.42 @@ -1313,7 +1316,8 @@ 16.43 16.44 // Class virtual space should always be expanded. Call GC for the other 16.45 // metadata virtual space. 16.46 - if (vsl == Metaspace::class_space_list()) return true; 16.47 + if (Metaspace::using_class_space() && 16.48 + (vsl == Metaspace::class_space_list())) return true; 16.49 16.50 // If this is part of an allocation after a GC, expand 16.51 // unconditionally. 16.52 @@ -2257,7 +2261,7 @@ 16.53 size_t raw_word_size = get_raw_word_size(word_size); 16.54 size_t min_size = TreeChunk<Metablock, FreeList>::min_size(); 16.55 assert(raw_word_size >= min_size, 16.56 - err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size)); 16.57 + err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size)); 16.58 block_freelists()->return_block(p, raw_word_size); 16.59 } 16.60 16.61 @@ -2374,7 +2378,7 @@ 16.62 if (result == NULL) { 16.63 result = grow_and_allocate(word_size); 16.64 } 16.65 - if (result > 0) { 16.66 + if (result != 0) { 16.67 inc_used_metrics(word_size); 16.68 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 16.69 "Head of the list is being allocated"); 16.70 @@ -2478,7 +2482,8 @@ 16.71 16.72 size_t MetaspaceAux::free_bytes() { 16.73 size_t result = 0; 16.74 - if (Metaspace::class_space_list() != NULL) { 16.75 + if (Metaspace::using_class_space() && 16.76 + (Metaspace::class_space_list() != NULL)) { 16.77 result = result + Metaspace::class_space_list()->free_bytes(); 16.78 } 16.79 if (Metaspace::space_list() != NULL) { 16.80 @@ -2549,6 +2554,9 @@ 16.81 } 16.82 16.83 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 16.84 + if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 16.85 + return 0; 16.86 + } 16.87 // Don't count the space in the freelists. That space will be 16.88 // added to the capacity calculation as needed. 16.89 size_t capacity = 0; 16.90 @@ -2563,18 +2571,23 @@ 16.91 } 16.92 16.93 size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) { 16.94 - size_t reserved = (mdtype == Metaspace::ClassType) ? 16.95 - Metaspace::class_space_list()->virtual_space_total() : 16.96 - Metaspace::space_list()->virtual_space_total(); 16.97 - return reserved * BytesPerWord; 16.98 + if (mdtype == Metaspace::ClassType) { 16.99 + return Metaspace::using_class_space() ? 16.100 + Metaspace::class_space_list()->virtual_space_total() * BytesPerWord : 0; 16.101 + } else { 16.102 + return Metaspace::space_list()->virtual_space_total() * BytesPerWord; 16.103 + } 16.104 } 16.105 16.106 size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); } 16.107 16.108 size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) { 16.109 + if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 16.110 + return 0; 16.111 + } 16.112 ChunkManager* chunk = (mdtype == Metaspace::ClassType) ? 16.113 - Metaspace::class_space_list()->chunk_manager() : 16.114 - Metaspace::space_list()->chunk_manager(); 16.115 + Metaspace::class_space_list()->chunk_manager() : 16.116 + Metaspace::space_list()->chunk_manager(); 16.117 chunk->slow_verify(); 16.118 return chunk->free_chunks_total(); 16.119 } 16.120 @@ -2615,7 +2628,6 @@ 16.121 16.122 // This is printed when PrintGCDetails 16.123 void MetaspaceAux::print_on(outputStream* out) { 16.124 - Metaspace::MetadataType ct = Metaspace::ClassType; 16.125 Metaspace::MetadataType nct = Metaspace::NonClassType; 16.126 16.127 out->print_cr(" Metaspace total " 16.128 @@ -2629,12 +2641,15 @@ 16.129 allocated_capacity_bytes(nct)/K, 16.130 allocated_used_bytes(nct)/K, 16.131 reserved_in_bytes(nct)/K); 16.132 - out->print_cr(" class space " 16.133 - SIZE_FORMAT "K, used " SIZE_FORMAT "K," 16.134 - " reserved " SIZE_FORMAT "K", 16.135 - allocated_capacity_bytes(ct)/K, 16.136 - allocated_used_bytes(ct)/K, 16.137 - reserved_in_bytes(ct)/K); 16.138 + if (Metaspace::using_class_space()) { 16.139 + Metaspace::MetadataType ct = Metaspace::ClassType; 16.140 + out->print_cr(" class space " 16.141 + SIZE_FORMAT "K, used " SIZE_FORMAT "K," 16.142 + " reserved " SIZE_FORMAT "K", 16.143 + allocated_capacity_bytes(ct)/K, 16.144 + allocated_used_bytes(ct)/K, 16.145 + reserved_in_bytes(ct)/K); 16.146 + } 16.147 } 16.148 16.149 // Print information for class space and data space separately. 16.150 @@ -2659,13 +2674,37 @@ 16.151 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 16.152 } 16.153 16.154 -// Print total fragmentation for class and data metaspaces separately 16.155 +// Print total fragmentation for class metaspaces 16.156 +void MetaspaceAux::print_class_waste(outputStream* out) { 16.157 + assert(Metaspace::using_class_space(), "class metaspace not used"); 16.158 + size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 16.159 + size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 16.160 + ClassLoaderDataGraphMetaspaceIterator iter; 16.161 + while (iter.repeat()) { 16.162 + Metaspace* msp = iter.get_next(); 16.163 + if (msp != NULL) { 16.164 + cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 16.165 + cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 16.166 + cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 16.167 + cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 16.168 + cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 16.169 + cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 16.170 + cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 16.171 + } 16.172 + } 16.173 + out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 16.174 + SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 16.175 + SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 16.176 + "large count " SIZE_FORMAT, 16.177 + cls_specialized_count, cls_specialized_waste, 16.178 + cls_small_count, cls_small_waste, 16.179 + cls_medium_count, cls_medium_waste, cls_humongous_count); 16.180 +} 16.181 + 16.182 +// Print total fragmentation for data and class metaspaces separately 16.183 void MetaspaceAux::print_waste(outputStream* out) { 16.184 - 16.185 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 16.186 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 16.187 - size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 16.188 - size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 16.189 16.190 ClassLoaderDataGraphMetaspaceIterator iter; 16.191 while (iter.repeat()) { 16.192 @@ -2678,14 +2717,6 @@ 16.193 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 16.194 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 16.195 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 16.196 - 16.197 - cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 16.198 - cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 16.199 - cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 16.200 - cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 16.201 - cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 16.202 - cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 16.203 - cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 16.204 } 16.205 } 16.206 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 16.207 @@ -2695,13 +2726,9 @@ 16.208 "large count " SIZE_FORMAT, 16.209 specialized_count, specialized_waste, small_count, 16.210 small_waste, medium_count, medium_waste, humongous_count); 16.211 - out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 16.212 - SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 16.213 - SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 16.214 - "large count " SIZE_FORMAT, 16.215 - cls_specialized_count, cls_specialized_waste, 16.216 - cls_small_count, cls_small_waste, 16.217 - cls_medium_count, cls_medium_waste, cls_humongous_count); 16.218 + if (Metaspace::using_class_space()) { 16.219 + print_class_waste(out); 16.220 + } 16.221 } 16.222 16.223 // Dump global metaspace things from the end of ClassLoaderDataGraph 16.224 @@ -2714,7 +2741,9 @@ 16.225 16.226 void MetaspaceAux::verify_free_chunks() { 16.227 Metaspace::space_list()->chunk_manager()->verify(); 16.228 - Metaspace::class_space_list()->chunk_manager()->verify(); 16.229 + if (Metaspace::using_class_space()) { 16.230 + Metaspace::class_space_list()->chunk_manager()->verify(); 16.231 + } 16.232 } 16.233 16.234 void MetaspaceAux::verify_capacity() { 16.235 @@ -2776,7 +2805,9 @@ 16.236 16.237 Metaspace::~Metaspace() { 16.238 delete _vsm; 16.239 - delete _class_vsm; 16.240 + if (using_class_space()) { 16.241 + delete _class_vsm; 16.242 + } 16.243 } 16.244 16.245 VirtualSpaceList* Metaspace::_space_list = NULL; 16.246 @@ -2784,9 +2815,123 @@ 16.247 16.248 #define VIRTUALSPACEMULTIPLIER 2 16.249 16.250 +#ifdef _LP64 16.251 +void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 16.252 + // Figure out the narrow_klass_base and the narrow_klass_shift. The 16.253 + // narrow_klass_base is the lower of the metaspace base and the cds base 16.254 + // (if cds is enabled). The narrow_klass_shift depends on the distance 16.255 + // between the lower base and higher address. 16.256 + address lower_base; 16.257 + address higher_address; 16.258 + if (UseSharedSpaces) { 16.259 + higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 16.260 + (address)(metaspace_base + class_metaspace_size())); 16.261 + lower_base = MIN2(metaspace_base, cds_base); 16.262 + } else { 16.263 + higher_address = metaspace_base + class_metaspace_size(); 16.264 + lower_base = metaspace_base; 16.265 + } 16.266 + Universe::set_narrow_klass_base(lower_base); 16.267 + if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) { 16.268 + Universe::set_narrow_klass_shift(0); 16.269 + } else { 16.270 + assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 16.271 + Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 16.272 + } 16.273 +} 16.274 + 16.275 +// Return TRUE if the specified metaspace_base and cds_base are close enough 16.276 +// to work with compressed klass pointers. 16.277 +bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 16.278 + assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 16.279 + assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs"); 16.280 + address lower_base = MIN2((address)metaspace_base, cds_base); 16.281 + address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 16.282 + (address)(metaspace_base + class_metaspace_size())); 16.283 + return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint); 16.284 +} 16.285 + 16.286 +// Try to allocate the metaspace at the requested addr. 16.287 +void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 16.288 + assert(using_class_space(), "called improperly"); 16.289 + assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs"); 16.290 + assert(class_metaspace_size() < KlassEncodingMetaspaceMax, 16.291 + "Metaspace size is too big"); 16.292 + 16.293 + ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(), 16.294 + os::vm_allocation_granularity(), 16.295 + false, requested_addr, 0); 16.296 + if (!metaspace_rs.is_reserved()) { 16.297 + if (UseSharedSpaces) { 16.298 + // Keep trying to allocate the metaspace, increasing the requested_addr 16.299 + // by 1GB each time, until we reach an address that will no longer allow 16.300 + // use of CDS with compressed klass pointers. 16.301 + char *addr = requested_addr; 16.302 + while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) && 16.303 + can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) { 16.304 + addr = addr + 1*G; 16.305 + metaspace_rs = ReservedSpace(class_metaspace_size(), 16.306 + os::vm_allocation_granularity(), false, addr, 0); 16.307 + } 16.308 + } 16.309 + 16.310 + // If no successful allocation then try to allocate the space anywhere. If 16.311 + // that fails then OOM doom. At this point we cannot try allocating the 16.312 + // metaspace as if UseCompressedKlassPointers is off because too much 16.313 + // initialization has happened that depends on UseCompressedKlassPointers. 16.314 + // So, UseCompressedKlassPointers cannot be turned off at this point. 16.315 + if (!metaspace_rs.is_reserved()) { 16.316 + metaspace_rs = ReservedSpace(class_metaspace_size(), 16.317 + os::vm_allocation_granularity(), false); 16.318 + if (!metaspace_rs.is_reserved()) { 16.319 + vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", 16.320 + class_metaspace_size())); 16.321 + } 16.322 + } 16.323 + } 16.324 + 16.325 + // If we got here then the metaspace got allocated. 16.326 + MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 16.327 + 16.328 + // Verify that we can use shared spaces. Otherwise, turn off CDS. 16.329 + if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 16.330 + FileMapInfo::stop_sharing_and_unmap( 16.331 + "Could not allocate metaspace at a compatible address"); 16.332 + } 16.333 + 16.334 + set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 16.335 + UseSharedSpaces ? (address)cds_base : 0); 16.336 + 16.337 + initialize_class_space(metaspace_rs); 16.338 + 16.339 + if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { 16.340 + gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, 16.341 + Universe::narrow_klass_base(), Universe::narrow_klass_shift()); 16.342 + gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, 16.343 + class_metaspace_size(), metaspace_rs.base(), requested_addr); 16.344 + } 16.345 +} 16.346 + 16.347 +// For UseCompressedKlassPointers the class space is reserved above the top of 16.348 +// the Java heap. The argument passed in is at the base of the compressed space. 16.349 +void Metaspace::initialize_class_space(ReservedSpace rs) { 16.350 + // The reserved space size may be bigger because of alignment, esp with UseLargePages 16.351 + assert(rs.size() >= ClassMetaspaceSize, 16.352 + err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize)); 16.353 + assert(using_class_space(), "Must be using class space"); 16.354 + _class_space_list = new VirtualSpaceList(rs); 16.355 +} 16.356 + 16.357 +#endif 16.358 + 16.359 void Metaspace::global_initialize() { 16.360 // Initialize the alignment for shared spaces. 16.361 int max_alignment = os::vm_page_size(); 16.362 + size_t cds_total = 0; 16.363 + 16.364 + set_class_metaspace_size(align_size_up(ClassMetaspaceSize, 16.365 + os::vm_allocation_granularity())); 16.366 + 16.367 MetaspaceShared::set_max_alignment(max_alignment); 16.368 16.369 if (DumpSharedSpaces) { 16.370 @@ -2798,15 +2943,31 @@ 16.371 // Initialize with the sum of the shared space sizes. The read-only 16.372 // and read write metaspace chunks will be allocated out of this and the 16.373 // remainder is the misc code and data chunks. 16.374 - size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize + 16.375 - SharedMiscDataSize + SharedMiscCodeSize, 16.376 - os::vm_allocation_granularity()); 16.377 - size_t word_size = total/wordSize; 16.378 - _space_list = new VirtualSpaceList(word_size); 16.379 + cds_total = FileMapInfo::shared_spaces_size(); 16.380 + _space_list = new VirtualSpaceList(cds_total/wordSize); 16.381 + 16.382 +#ifdef _LP64 16.383 + // Set the compressed klass pointer base so that decoding of these pointers works 16.384 + // properly when creating the shared archive. 16.385 + assert(UseCompressedOops && UseCompressedKlassPointers, 16.386 + "UseCompressedOops and UseCompressedKlassPointers must be set"); 16.387 + Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 16.388 + if (TraceMetavirtualspaceAllocation && Verbose) { 16.389 + gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, 16.390 + _space_list->current_virtual_space()->bottom()); 16.391 + } 16.392 + 16.393 + // Set the shift to zero. 16.394 + assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total, 16.395 + "CDS region is too large"); 16.396 + Universe::set_narrow_klass_shift(0); 16.397 +#endif 16.398 + 16.399 } else { 16.400 // If using shared space, open the file that contains the shared space 16.401 // and map in the memory before initializing the rest of metaspace (so 16.402 // the addresses don't conflict) 16.403 + address cds_address = NULL; 16.404 if (UseSharedSpaces) { 16.405 FileMapInfo* mapinfo = new FileMapInfo(); 16.406 memset(mapinfo, 0, sizeof(FileMapInfo)); 16.407 @@ -2821,8 +2982,22 @@ 16.408 assert(!mapinfo->is_open() && !UseSharedSpaces, 16.409 "archive file not closed or shared spaces not disabled."); 16.410 } 16.411 + cds_total = FileMapInfo::shared_spaces_size(); 16.412 + cds_address = (address)mapinfo->region_base(0); 16.413 } 16.414 16.415 +#ifdef _LP64 16.416 + // If UseCompressedKlassPointers is set then allocate the metaspace area 16.417 + // above the heap and above the CDS area (if it exists). 16.418 + if (using_class_space()) { 16.419 + if (UseSharedSpaces) { 16.420 + allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address); 16.421 + } else { 16.422 + allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0); 16.423 + } 16.424 + } 16.425 +#endif 16.426 + 16.427 // Initialize these before initializing the VirtualSpaceList 16.428 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 16.429 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 16.430 @@ -2840,39 +3015,28 @@ 16.431 } 16.432 } 16.433 16.434 -// For UseCompressedKlassPointers the class space is reserved as a piece of the 16.435 -// Java heap because the compression algorithm is the same for each. The 16.436 -// argument passed in is at the top of the compressed space 16.437 -void Metaspace::initialize_class_space(ReservedSpace rs) { 16.438 - // The reserved space size may be bigger because of alignment, esp with UseLargePages 16.439 - assert(rs.size() >= ClassMetaspaceSize, 16.440 - err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize)); 16.441 - _class_space_list = new VirtualSpaceList(rs); 16.442 -} 16.443 - 16.444 -void Metaspace::initialize(Mutex* lock, 16.445 - MetaspaceType type) { 16.446 +void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 16.447 16.448 assert(space_list() != NULL, 16.449 "Metadata VirtualSpaceList has not been initialized"); 16.450 16.451 - _vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list()); 16.452 + _vsm = new SpaceManager(NonClassType, lock, space_list()); 16.453 if (_vsm == NULL) { 16.454 return; 16.455 } 16.456 size_t word_size; 16.457 size_t class_word_size; 16.458 - vsm()->get_initial_chunk_sizes(type, 16.459 - &word_size, 16.460 - &class_word_size); 16.461 - 16.462 - assert(class_space_list() != NULL, 16.463 - "Class VirtualSpaceList has not been initialized"); 16.464 - 16.465 - // Allocate SpaceManager for classes. 16.466 - _class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list()); 16.467 - if (_class_vsm == NULL) { 16.468 - return; 16.469 + vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); 16.470 + 16.471 + if (using_class_space()) { 16.472 + assert(class_space_list() != NULL, 16.473 + "Class VirtualSpaceList has not been initialized"); 16.474 + 16.475 + // Allocate SpaceManager for classes. 16.476 + _class_vsm = new SpaceManager(ClassType, lock, class_space_list()); 16.477 + if (_class_vsm == NULL) { 16.478 + return; 16.479 + } 16.480 } 16.481 16.482 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 16.483 @@ -2888,11 +3052,13 @@ 16.484 } 16.485 16.486 // Allocate chunk for class metadata objects 16.487 - Metachunk* class_chunk = 16.488 - class_space_list()->get_initialization_chunk(class_word_size, 16.489 - class_vsm()->medium_chunk_bunch()); 16.490 - if (class_chunk != NULL) { 16.491 - class_vsm()->add_chunk(class_chunk, true); 16.492 + if (using_class_space()) { 16.493 + Metachunk* class_chunk = 16.494 + class_space_list()->get_initialization_chunk(class_word_size, 16.495 + class_vsm()->medium_chunk_bunch()); 16.496 + if (class_chunk != NULL) { 16.497 + class_vsm()->add_chunk(class_chunk, true); 16.498 + } 16.499 } 16.500 16.501 _alloc_record_head = NULL; 16.502 @@ -2906,7 +3072,8 @@ 16.503 16.504 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 16.505 // DumpSharedSpaces doesn't use class metadata area (yet) 16.506 - if (mdtype == ClassType && !DumpSharedSpaces) { 16.507 + // Also, don't use class_vsm() unless UseCompressedKlassPointers is true. 16.508 + if (mdtype == ClassType && using_class_space()) { 16.509 return class_vsm()->allocate(word_size); 16.510 } else { 16.511 return vsm()->allocate(word_size); 16.512 @@ -2937,14 +3104,19 @@ 16.513 } 16.514 16.515 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 16.516 - // return vsm()->allocated_used_words(); 16.517 - return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() : 16.518 - vsm()->sum_used_in_chunks_in_use(); // includes overhead! 16.519 + if (mdtype == ClassType) { 16.520 + return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 16.521 + } else { 16.522 + return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 16.523 + } 16.524 } 16.525 16.526 size_t Metaspace::free_words(MetadataType mdtype) const { 16.527 - return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() : 16.528 - vsm()->sum_free_in_chunks_in_use(); 16.529 + if (mdtype == ClassType) { 16.530 + return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 16.531 + } else { 16.532 + return vsm()->sum_free_in_chunks_in_use(); 16.533 + } 16.534 } 16.535 16.536 // Space capacity in the Metaspace. It includes 16.537 @@ -2953,8 +3125,11 @@ 16.538 // in the space available in the dictionary which 16.539 // is already counted in some chunk. 16.540 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 16.541 - return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() : 16.542 - vsm()->sum_capacity_in_chunks_in_use(); 16.543 + if (mdtype == ClassType) { 16.544 + return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 16.545 + } else { 16.546 + return vsm()->sum_capacity_in_chunks_in_use(); 16.547 + } 16.548 } 16.549 16.550 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 16.551 @@ -2977,8 +3152,8 @@ 16.552 #endif 16.553 return; 16.554 } 16.555 - if (is_class) { 16.556 - class_vsm()->deallocate(ptr, word_size); 16.557 + if (is_class && using_class_space()) { 16.558 + class_vsm()->deallocate(ptr, word_size); 16.559 } else { 16.560 vsm()->deallocate(ptr, word_size); 16.561 } 16.562 @@ -2992,7 +3167,7 @@ 16.563 #endif 16.564 return; 16.565 } 16.566 - if (is_class) { 16.567 + if (is_class && using_class_space()) { 16.568 class_vsm()->deallocate(ptr, word_size); 16.569 } else { 16.570 vsm()->deallocate(ptr, word_size); 16.571 @@ -3101,14 +3276,18 @@ 16.572 MutexLockerEx cl(SpaceManager::expand_lock(), 16.573 Mutex::_no_safepoint_check_flag); 16.574 space_list()->purge(); 16.575 - class_space_list()->purge(); 16.576 + if (using_class_space()) { 16.577 + class_space_list()->purge(); 16.578 + } 16.579 } 16.580 16.581 void Metaspace::print_on(outputStream* out) const { 16.582 // Print both class virtual space counts and metaspace. 16.583 if (Verbose) { 16.584 - vsm()->print_on(out); 16.585 + vsm()->print_on(out); 16.586 + if (using_class_space()) { 16.587 class_vsm()->print_on(out); 16.588 + } 16.589 } 16.590 } 16.591 16.592 @@ -3122,17 +3301,21 @@ 16.593 // be needed. Note, locking this can cause inversion problems with the 16.594 // caller in MetaspaceObj::is_metadata() function. 16.595 return space_list()->contains(ptr) || 16.596 - class_space_list()->contains(ptr); 16.597 + (using_class_space() && class_space_list()->contains(ptr)); 16.598 } 16.599 16.600 void Metaspace::verify() { 16.601 vsm()->verify(); 16.602 - class_vsm()->verify(); 16.603 + if (using_class_space()) { 16.604 + class_vsm()->verify(); 16.605 + } 16.606 } 16.607 16.608 void Metaspace::dump(outputStream* const out) const { 16.609 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm()); 16.610 vsm()->dump(out); 16.611 - out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); 16.612 - class_vsm()->dump(out); 16.613 + if (using_class_space()) { 16.614 + out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); 16.615 + class_vsm()->dump(out); 16.616 + } 16.617 }
17.1 --- a/src/share/vm/memory/metaspace.hpp Fri Aug 16 04:24:07 2013 -0700 17.2 +++ b/src/share/vm/memory/metaspace.hpp Fri Aug 16 10:06:58 2013 -0700 17.3 @@ -105,6 +105,16 @@ 17.4 // Align up the word size to the allocation word size 17.5 static size_t align_word_size_up(size_t); 17.6 17.7 + // Aligned size of the metaspace. 17.8 + static size_t _class_metaspace_size; 17.9 + 17.10 + static size_t class_metaspace_size() { 17.11 + return _class_metaspace_size; 17.12 + } 17.13 + static void set_class_metaspace_size(size_t metaspace_size) { 17.14 + _class_metaspace_size = metaspace_size; 17.15 + } 17.16 + 17.17 static size_t _first_chunk_word_size; 17.18 static size_t _first_class_chunk_word_size; 17.19 17.20 @@ -131,6 +141,17 @@ 17.21 // maintain a single list for now. 17.22 void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size); 17.23 17.24 +#ifdef _LP64 17.25 + static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base); 17.26 + 17.27 + // Returns true if can use CDS with metaspace allocated as specified address. 17.28 + static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base); 17.29 + 17.30 + static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base); 17.31 + 17.32 + static void initialize_class_space(ReservedSpace rs); 17.33 +#endif 17.34 + 17.35 class AllocRecord : public CHeapObj<mtClass> { 17.36 public: 17.37 AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size) 17.38 @@ -151,7 +172,6 @@ 17.39 17.40 // Initialize globals for Metaspace 17.41 static void global_initialize(); 17.42 - static void initialize_class_space(ReservedSpace rs); 17.43 17.44 static size_t first_chunk_word_size() { return _first_chunk_word_size; } 17.45 static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } 17.46 @@ -172,8 +192,6 @@ 17.47 MetaWord* expand_and_allocate(size_t size, 17.48 MetadataType mdtype); 17.49 17.50 - static bool is_initialized() { return _class_space_list != NULL; } 17.51 - 17.52 static bool contains(const void *ptr); 17.53 void dump(outputStream* const out) const; 17.54 17.55 @@ -190,6 +208,12 @@ 17.56 }; 17.57 17.58 void iterate(AllocRecordClosure *closure); 17.59 + 17.60 + // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False. 17.61 + static bool using_class_space() { 17.62 + return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces); 17.63 + } 17.64 + 17.65 }; 17.66 17.67 class MetaspaceAux : AllStatic { 17.68 @@ -243,8 +267,9 @@ 17.69 return _allocated_capacity_words[mdtype]; 17.70 } 17.71 static size_t allocated_capacity_words() { 17.72 - return _allocated_capacity_words[Metaspace::ClassType] + 17.73 - _allocated_capacity_words[Metaspace::NonClassType]; 17.74 + return _allocated_capacity_words[Metaspace::NonClassType] + 17.75 + (Metaspace::using_class_space() ? 17.76 + _allocated_capacity_words[Metaspace::ClassType] : 0); 17.77 } 17.78 static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) { 17.79 return allocated_capacity_words(mdtype) * BytesPerWord; 17.80 @@ -257,8 +282,9 @@ 17.81 return _allocated_used_words[mdtype]; 17.82 } 17.83 static size_t allocated_used_words() { 17.84 - return _allocated_used_words[Metaspace::ClassType] + 17.85 - _allocated_used_words[Metaspace::NonClassType]; 17.86 + return _allocated_used_words[Metaspace::NonClassType] + 17.87 + (Metaspace::using_class_space() ? 17.88 + _allocated_used_words[Metaspace::ClassType] : 0); 17.89 } 17.90 static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) { 17.91 return allocated_used_words(mdtype) * BytesPerWord; 17.92 @@ -300,6 +326,7 @@ 17.93 static void print_on(outputStream * out); 17.94 static void print_on(outputStream * out, Metaspace::MetadataType mdtype); 17.95 17.96 + static void print_class_waste(outputStream* out); 17.97 static void print_waste(outputStream* out); 17.98 static void dump(outputStream* out); 17.99 static void verify_free_chunks();
18.1 --- a/src/share/vm/memory/metaspaceShared.cpp Fri Aug 16 04:24:07 2013 -0700 18.2 +++ b/src/share/vm/memory/metaspaceShared.cpp Fri Aug 16 10:06:58 2013 -0700 18.3 @@ -52,7 +52,6 @@ 18.4 int tag = 0; 18.5 soc->do_tag(--tag); 18.6 18.7 - assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive"); 18.8 // Verify the sizes of various metadata in the system. 18.9 soc->do_tag(sizeof(Method)); 18.10 soc->do_tag(sizeof(ConstMethod));
19.1 --- a/src/share/vm/memory/universe.cpp Fri Aug 16 04:24:07 2013 -0700 19.2 +++ b/src/share/vm/memory/universe.cpp Fri Aug 16 10:06:58 2013 -0700 19.3 @@ -145,8 +145,6 @@ 19.4 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true }; 19.5 address Universe::_narrow_ptrs_base; 19.6 19.7 -size_t Universe::_class_metaspace_size; 19.8 - 19.9 void Universe::basic_type_classes_do(void f(Klass*)) { 19.10 f(boolArrayKlassObj()); 19.11 f(byteArrayKlassObj()); 19.12 @@ -641,6 +639,8 @@ 19.13 return status; 19.14 } 19.15 19.16 + Metaspace::global_initialize(); 19.17 + 19.18 // Create memory for metadata. Must be after initializing heap for 19.19 // DumpSharedSpaces. 19.20 ClassLoaderData::init_null_class_loader_data(); 19.21 @@ -693,13 +693,9 @@ 19.22 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) { 19.23 base = HeapBaseMinAddress; 19.24 19.25 - // If the total size and the metaspace size are small enough to allow 19.26 - // UnscaledNarrowOop then just use UnscaledNarrowOop. 19.27 - } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) && 19.28 - (!UseCompressedKlassPointers || 19.29 - (((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) { 19.30 - // We don't need to check the metaspace size here because it is always smaller 19.31 - // than total_size. 19.32 + // If the total size is small enough to allow UnscaledNarrowOop then 19.33 + // just use UnscaledNarrowOop. 19.34 + } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) { 19.35 if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) && 19.36 (Universe::narrow_oop_shift() == 0)) { 19.37 // Use 32-bits oops without encoding and 19.38 @@ -716,13 +712,6 @@ 19.39 base = (OopEncodingHeapMax - heap_size); 19.40 } 19.41 } 19.42 - 19.43 - // See if ZeroBaseNarrowOop encoding will work for a heap based at 19.44 - // (KlassEncodingMetaspaceMax - class_metaspace_size()). 19.45 - } else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) && 19.46 - (Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) && 19.47 - (KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) { 19.48 - base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size()); 19.49 } else { 19.50 // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or 19.51 // HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb. 19.52 @@ -732,8 +721,7 @@ 19.53 // Set narrow_oop_base and narrow_oop_use_implicit_null_checks 19.54 // used in ReservedHeapSpace() constructors. 19.55 // The final values will be set in initialize_heap() below. 19.56 - if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) && 19.57 - (!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) { 19.58 + if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) { 19.59 // Use zero based compressed oops 19.60 Universe::set_narrow_oop_base(NULL); 19.61 // Don't need guard page for implicit checks in indexed 19.62 @@ -816,9 +804,7 @@ 19.63 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB", 19.64 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M); 19.65 } 19.66 - if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) || 19.67 - (UseCompressedKlassPointers && 19.68 - ((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) { 19.69 + if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) { 19.70 // Can't reserve heap below 32Gb. 19.71 // keep the Universe::narrow_oop_base() set in Universe::reserve_heap() 19.72 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 19.73 @@ -849,20 +835,16 @@ 19.74 } 19.75 } 19.76 } 19.77 + 19.78 if (verbose) { 19.79 tty->cr(); 19.80 tty->cr(); 19.81 } 19.82 - if (UseCompressedKlassPointers) { 19.83 - Universe::set_narrow_klass_base(Universe::narrow_oop_base()); 19.84 - Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes)); 19.85 - } 19.86 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base()); 19.87 } 19.88 - // Universe::narrow_oop_base() is one page below the metaspace 19.89 - // base. The actual metaspace base depends on alignment constraints 19.90 - // so we don't know its exact location here. 19.91 - assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) || 19.92 + // Universe::narrow_oop_base() is one page below the heap. 19.93 + assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - 19.94 + os::vm_page_size()) || 19.95 Universe::narrow_oop_base() == NULL, "invalid value"); 19.96 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes || 19.97 Universe::narrow_oop_shift() == 0, "invalid value"); 19.98 @@ -882,12 +864,7 @@ 19.99 19.100 // Reserve the Java heap, which is now the same for all GCs. 19.101 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { 19.102 - // Add in the class metaspace area so the classes in the headers can 19.103 - // be compressed the same as instances. 19.104 - // Need to round class space size up because it's below the heap and 19.105 - // the actual alignment depends on its size. 19.106 - Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment)); 19.107 - size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment); 19.108 + size_t total_reserved = align_size_up(heap_size, alignment); 19.109 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), 19.110 "heap size is too big for compressed oops"); 19.111 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); 19.112 @@ -923,28 +900,17 @@ 19.113 return total_rs; 19.114 } 19.115 19.116 - // Split the reserved space into main Java heap and a space for 19.117 - // classes so that they can be compressed using the same algorithm 19.118 - // as compressed oops. If compress oops and compress klass ptrs are 19.119 - // used we need the meta space first: if the alignment used for 19.120 - // compressed oops is greater than the one used for compressed klass 19.121 - // ptrs, a metadata space on top of the heap could become 19.122 - // unreachable. 19.123 - ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size()); 19.124 - ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment); 19.125 - Metaspace::initialize_class_space(class_rs); 19.126 - 19.127 if (UseCompressedOops) { 19.128 // Universe::initialize_heap() will reset this to NULL if unscaled 19.129 // or zero-based narrow oops are actually used. 19.130 address base = (address)(total_rs.base() - os::vm_page_size()); 19.131 Universe::set_narrow_oop_base(base); 19.132 } 19.133 - return heap_rs; 19.134 + return total_rs; 19.135 } 19.136 19.137 19.138 -// It's the caller's repsonsibility to ensure glitch-freedom 19.139 +// It's the caller's responsibility to ensure glitch-freedom 19.140 // (if required). 19.141 void Universe::update_heap_info_at_gc() { 19.142 _heap_capacity_at_last_gc = heap()->capacity();
20.1 --- a/src/share/vm/memory/universe.hpp Fri Aug 16 04:24:07 2013 -0700 20.2 +++ b/src/share/vm/memory/universe.hpp Fri Aug 16 10:06:58 2013 -0700 20.3 @@ -75,10 +75,10 @@ 20.4 }; 20.5 20.6 20.7 -// For UseCompressedOops and UseCompressedKlassPointers. 20.8 +// For UseCompressedOops. 20.9 struct NarrowPtrStruct { 20.10 - // Base address for oop/klass-within-java-object materialization. 20.11 - // NULL if using wide oops/klasses or zero based narrow oops/klasses. 20.12 + // Base address for oop-within-java-object materialization. 20.13 + // NULL if using wide oops or zero based narrow oops. 20.14 address _base; 20.15 // Number of shift bits for encoding/decoding narrow ptrs. 20.16 // 0 if using wide ptrs or zero based unscaled narrow ptrs, 20.17 @@ -106,6 +106,7 @@ 20.18 friend class SystemDictionary; 20.19 friend class VMStructs; 20.20 friend class VM_PopulateDumpSharedSpace; 20.21 + friend class Metaspace; 20.22 20.23 friend jint universe_init(); 20.24 friend void universe2_init(); 20.25 @@ -184,9 +185,6 @@ 20.26 static struct NarrowPtrStruct _narrow_klass; 20.27 static address _narrow_ptrs_base; 20.28 20.29 - // Aligned size of the metaspace. 20.30 - static size_t _class_metaspace_size; 20.31 - 20.32 // array of dummy objects used with +FullGCAlot 20.33 debug_only(static objArrayOop _fullgc_alot_dummy_array;) 20.34 // index of next entry to clear 20.35 @@ -238,15 +236,6 @@ 20.36 assert(UseCompressedOops, "no compressed ptrs?"); 20.37 _narrow_oop._use_implicit_null_checks = use; 20.38 } 20.39 - static bool reserve_metaspace_helper(bool with_base = false); 20.40 - static ReservedHeapSpace reserve_heap_metaspace(size_t heap_size, size_t alignment, bool& contiguous); 20.41 - 20.42 - static size_t class_metaspace_size() { 20.43 - return _class_metaspace_size; 20.44 - } 20.45 - static void set_class_metaspace_size(size_t metaspace_size) { 20.46 - _class_metaspace_size = metaspace_size; 20.47 - } 20.48 20.49 // Debugging 20.50 static int _verify_count; // number of verifies done
21.1 --- a/src/share/vm/oops/klass.hpp Fri Aug 16 04:24:07 2013 -0700 21.2 +++ b/src/share/vm/oops/klass.hpp Fri Aug 16 10:06:58 2013 -0700 21.3 @@ -703,6 +703,16 @@ 21.4 21.5 virtual void oop_verify_on(oop obj, outputStream* st); 21.6 21.7 + static bool is_null(narrowKlass obj); 21.8 + static bool is_null(Klass* obj); 21.9 + 21.10 + // klass encoding for klass pointer in objects. 21.11 + static narrowKlass encode_klass_not_null(Klass* v); 21.12 + static narrowKlass encode_klass(Klass* v); 21.13 + 21.14 + static Klass* decode_klass_not_null(narrowKlass v); 21.15 + static Klass* decode_klass(narrowKlass v); 21.16 + 21.17 private: 21.18 // barriers used by klass_oop_store 21.19 void klass_update_barrier_set(oop v);
22.1 --- a/src/share/vm/oops/klass.inline.hpp Fri Aug 16 04:24:07 2013 -0700 22.2 +++ b/src/share/vm/oops/klass.inline.hpp Fri Aug 16 10:06:58 2013 -0700 22.3 @@ -1,5 +1,5 @@ 22.4 /* 22.5 - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 22.6 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. 22.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 22.8 * 22.9 * This code is free software; you can redistribute it and/or modify it 22.10 @@ -25,6 +25,7 @@ 22.11 #ifndef SHARE_VM_OOPS_KLASS_INLINE_HPP 22.12 #define SHARE_VM_OOPS_KLASS_INLINE_HPP 22.13 22.14 +#include "memory/universe.hpp" 22.15 #include "oops/klass.hpp" 22.16 #include "oops/markOop.hpp" 22.17 22.18 @@ -33,4 +34,41 @@ 22.19 _prototype_header = header; 22.20 } 22.21 22.22 +inline bool Klass::is_null(Klass* obj) { return obj == NULL; } 22.23 +inline bool Klass::is_null(narrowKlass obj) { return obj == 0; } 22.24 + 22.25 +// Encoding and decoding for klass field. 22.26 + 22.27 +inline bool check_klass_alignment(Klass* obj) { 22.28 + return (intptr_t)obj % KlassAlignmentInBytes == 0; 22.29 +} 22.30 + 22.31 +inline narrowKlass Klass::encode_klass_not_null(Klass* v) { 22.32 + assert(!is_null(v), "klass value can never be zero"); 22.33 + assert(check_klass_alignment(v), "Address not aligned"); 22.34 + int shift = Universe::narrow_klass_shift(); 22.35 + uint64_t pd = (uint64_t)(pointer_delta((void*)v, Universe::narrow_klass_base(), 1)); 22.36 + assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding"); 22.37 + uint64_t result = pd >> shift; 22.38 + assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow"); 22.39 + assert(decode_klass(result) == v, "reversibility"); 22.40 + return (narrowKlass)result; 22.41 +} 22.42 + 22.43 +inline narrowKlass Klass::encode_klass(Klass* v) { 22.44 + return is_null(v) ? (narrowKlass)0 : encode_klass_not_null(v); 22.45 +} 22.46 + 22.47 +inline Klass* Klass::decode_klass_not_null(narrowKlass v) { 22.48 + assert(!is_null(v), "narrow klass value can never be zero"); 22.49 + int shift = Universe::narrow_klass_shift(); 22.50 + Klass* result = (Klass*)(void*)((uintptr_t)Universe::narrow_klass_base() + ((uintptr_t)v << shift)); 22.51 + assert(check_klass_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result)); 22.52 + return result; 22.53 +} 22.54 + 22.55 +inline Klass* Klass::decode_klass(narrowKlass v) { 22.56 + return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v); 22.57 +} 22.58 + 22.59 #endif // SHARE_VM_OOPS_KLASS_INLINE_HPP
23.1 --- a/src/share/vm/oops/oop.hpp Fri Aug 16 04:24:07 2013 -0700 23.2 +++ b/src/share/vm/oops/oop.hpp Fri Aug 16 10:06:58 2013 -0700 23.3 @@ -1,5 +1,5 @@ 23.4 /* 23.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 23.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 23.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 23.8 * 23.9 * This code is free software; you can redistribute it and/or modify it 23.10 @@ -62,7 +62,7 @@ 23.11 volatile markOop _mark; 23.12 union _metadata { 23.13 Klass* _klass; 23.14 - narrowOop _compressed_klass; 23.15 + narrowKlass _compressed_klass; 23.16 } _metadata; 23.17 23.18 // Fast access to barrier set. Must be initialized. 23.19 @@ -84,7 +84,7 @@ 23.20 Klass* klass() const; 23.21 Klass* klass_or_null() const volatile; 23.22 Klass** klass_addr(); 23.23 - narrowOop* compressed_klass_addr(); 23.24 + narrowKlass* compressed_klass_addr(); 23.25 23.26 void set_klass(Klass* k); 23.27 23.28 @@ -189,13 +189,6 @@ 23.29 oop compare_value, 23.30 bool prebarrier = false); 23.31 23.32 - // klass encoding for klass pointer in objects. 23.33 - static narrowOop encode_klass_not_null(Klass* v); 23.34 - static narrowOop encode_klass(Klass* v); 23.35 - 23.36 - static Klass* decode_klass_not_null(narrowOop v); 23.37 - static Klass* decode_klass(narrowOop v); 23.38 - 23.39 // Access to fields in a instanceOop through these methods. 23.40 oop obj_field(int offset) const; 23.41 volatile oop obj_field_volatile(int offset) const;
24.1 --- a/src/share/vm/oops/oop.inline.hpp Fri Aug 16 04:24:07 2013 -0700 24.2 +++ b/src/share/vm/oops/oop.inline.hpp Fri Aug 16 10:06:58 2013 -0700 24.3 @@ -35,7 +35,7 @@ 24.4 #include "memory/specialized_oop_closures.hpp" 24.5 #include "oops/arrayKlass.hpp" 24.6 #include "oops/arrayOop.hpp" 24.7 -#include "oops/klass.hpp" 24.8 +#include "oops/klass.inline.hpp" 24.9 #include "oops/markOop.inline.hpp" 24.10 #include "oops/oop.hpp" 24.11 #include "runtime/atomic.hpp" 24.12 @@ -70,7 +70,7 @@ 24.13 24.14 inline Klass* oopDesc::klass() const { 24.15 if (UseCompressedKlassPointers) { 24.16 - return decode_klass_not_null(_metadata._compressed_klass); 24.17 + return Klass::decode_klass_not_null(_metadata._compressed_klass); 24.18 } else { 24.19 return _metadata._klass; 24.20 } 24.21 @@ -79,7 +79,7 @@ 24.22 inline Klass* oopDesc::klass_or_null() const volatile { 24.23 // can be NULL in CMS 24.24 if (UseCompressedKlassPointers) { 24.25 - return decode_klass(_metadata._compressed_klass); 24.26 + return Klass::decode_klass(_metadata._compressed_klass); 24.27 } else { 24.28 return _metadata._klass; 24.29 } 24.30 @@ -87,7 +87,7 @@ 24.31 24.32 inline int oopDesc::klass_gap_offset_in_bytes() { 24.33 assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers"); 24.34 - return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop); 24.35 + return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass); 24.36 } 24.37 24.38 inline Klass** oopDesc::klass_addr() { 24.39 @@ -97,9 +97,9 @@ 24.40 return (Klass**) &_metadata._klass; 24.41 } 24.42 24.43 -inline narrowOop* oopDesc::compressed_klass_addr() { 24.44 +inline narrowKlass* oopDesc::compressed_klass_addr() { 24.45 assert(UseCompressedKlassPointers, "only called by compressed klass pointers"); 24.46 - return (narrowOop*) &_metadata._compressed_klass; 24.47 + return &_metadata._compressed_klass; 24.48 } 24.49 24.50 inline void oopDesc::set_klass(Klass* k) { 24.51 @@ -107,7 +107,7 @@ 24.52 assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*"); 24.53 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*"); 24.54 if (UseCompressedKlassPointers) { 24.55 - *compressed_klass_addr() = encode_klass_not_null(k); 24.56 + *compressed_klass_addr() = Klass::encode_klass_not_null(k); 24.57 } else { 24.58 *klass_addr() = k; 24.59 } 24.60 @@ -127,7 +127,7 @@ 24.61 // This is only to be used during GC, for from-space objects, so no 24.62 // barrier is needed. 24.63 if (UseCompressedKlassPointers) { 24.64 - _metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling) 24.65 + _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling) 24.66 } else { 24.67 _metadata._klass = (Klass*)(address)k; 24.68 } 24.69 @@ -136,7 +136,7 @@ 24.70 inline oop oopDesc::list_ptr_from_klass() { 24.71 // This is only to be used during GC, for from-space objects. 24.72 if (UseCompressedKlassPointers) { 24.73 - return decode_heap_oop(_metadata._compressed_klass); 24.74 + return decode_heap_oop((narrowOop)_metadata._compressed_klass); 24.75 } else { 24.76 // Special case for GC 24.77 return (oop)(address)_metadata._klass; 24.78 @@ -176,7 +176,6 @@ 24.79 // the right type and inlines the appopriate code). 24.80 24.81 inline bool oopDesc::is_null(oop obj) { return obj == NULL; } 24.82 -inline bool oopDesc::is_null(Klass* obj) { return obj == NULL; } 24.83 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; } 24.84 24.85 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit 24.86 @@ -186,9 +185,6 @@ 24.87 inline bool check_obj_alignment(oop obj) { 24.88 return (intptr_t)obj % MinObjAlignmentInBytes == 0; 24.89 } 24.90 -inline bool check_klass_alignment(Klass* obj) { 24.91 - return (intptr_t)obj % KlassAlignmentInBytes == 0; 24.92 -} 24.93 24.94 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) { 24.95 assert(!is_null(v), "oop value can never be zero"); 24.96 @@ -224,39 +220,6 @@ 24.97 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; } 24.98 inline oop oopDesc::decode_heap_oop(oop v) { return v; } 24.99 24.100 -// Encoding and decoding for klass field. It is copied code, but someday 24.101 -// might not be the same as oop. 24.102 - 24.103 -inline narrowOop oopDesc::encode_klass_not_null(Klass* v) { 24.104 - assert(!is_null(v), "klass value can never be zero"); 24.105 - assert(check_klass_alignment(v), "Address not aligned"); 24.106 - address base = Universe::narrow_klass_base(); 24.107 - int shift = Universe::narrow_klass_shift(); 24.108 - uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1)); 24.109 - assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding"); 24.110 - uint64_t result = pd >> shift; 24.111 - assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow"); 24.112 - assert(decode_klass(result) == v, "reversibility"); 24.113 - return (narrowOop)result; 24.114 -} 24.115 - 24.116 -inline narrowOop oopDesc::encode_klass(Klass* v) { 24.117 - return (is_null(v)) ? (narrowOop)0 : encode_klass_not_null(v); 24.118 -} 24.119 - 24.120 -inline Klass* oopDesc::decode_klass_not_null(narrowOop v) { 24.121 - assert(!is_null(v), "narrow oop value can never be zero"); 24.122 - address base = Universe::narrow_klass_base(); 24.123 - int shift = Universe::narrow_klass_shift(); 24.124 - Klass* result = (Klass*)(void*)((uintptr_t)base + ((uintptr_t)v << shift)); 24.125 - assert(check_klass_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result)); 24.126 - return result; 24.127 -} 24.128 - 24.129 -inline Klass* oopDesc::decode_klass(narrowOop v) { 24.130 - return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v); 24.131 -} 24.132 - 24.133 // Load an oop out of the Java heap as is without decoding. 24.134 // Called by GC to check for null before decoding. 24.135 inline oop oopDesc::load_heap_oop(oop* p) { return *p; }
25.1 --- a/src/share/vm/oops/oopsHierarchy.hpp Fri Aug 16 04:24:07 2013 -0700 25.2 +++ b/src/share/vm/oops/oopsHierarchy.hpp Fri Aug 16 10:06:58 2013 -0700 25.3 @@ -1,5 +1,5 @@ 25.4 /* 25.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 25.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 25.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 25.8 * 25.9 * This code is free software; you can redistribute it and/or modify it 25.10 @@ -33,6 +33,10 @@ 25.11 // of B, A's representation is a prefix of B's representation. 25.12 25.13 typedef juint narrowOop; // Offset instead of address for an oop within a java object 25.14 + 25.15 +// If compressed klass pointers then use narrowKlass. 25.16 +typedef juint narrowKlass; 25.17 + 25.18 typedef void* OopOrNarrowOopStar; 25.19 typedef class markOopDesc* markOop; 25.20
26.1 --- a/src/share/vm/runtime/arguments.cpp Fri Aug 16 04:24:07 2013 -0700 26.2 +++ b/src/share/vm/runtime/arguments.cpp Fri Aug 16 10:06:58 2013 -0700 26.3 @@ -1393,10 +1393,8 @@ 26.4 26.5 inline uintx max_heap_for_compressed_oops() { 26.6 // Avoid sign flip. 26.7 - if (OopEncodingHeapMax < ClassMetaspaceSize + os::vm_page_size()) { 26.8 - return 0; 26.9 - } 26.10 - LP64_ONLY(return OopEncodingHeapMax - ClassMetaspaceSize - os::vm_page_size()); 26.11 + assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size"); 26.12 + LP64_ONLY(return OopEncodingHeapMax - os::vm_page_size()); 26.13 NOT_LP64(ShouldNotReachHere(); return 0); 26.14 } 26.15 26.16 @@ -1448,6 +1446,35 @@ 26.17 #endif // ZERO 26.18 } 26.19 26.20 + 26.21 +// NOTE: set_use_compressed_klass_ptrs() must be called after calling 26.22 +// set_use_compressed_oops(). 26.23 +void Arguments::set_use_compressed_klass_ptrs() { 26.24 +#ifndef ZERO 26.25 +#ifdef _LP64 26.26 + // UseCompressedOops must be on for UseCompressedKlassPointers to be on. 26.27 + if (!UseCompressedOops) { 26.28 + if (UseCompressedKlassPointers) { 26.29 + warning("UseCompressedKlassPointers requires UseCompressedOops"); 26.30 + } 26.31 + FLAG_SET_DEFAULT(UseCompressedKlassPointers, false); 26.32 + } else { 26.33 + // Turn on UseCompressedKlassPointers too 26.34 + if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) { 26.35 + FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true); 26.36 + } 26.37 + // Check the ClassMetaspaceSize to make sure we use compressed klass ptrs. 26.38 + if (UseCompressedKlassPointers) { 26.39 + if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) { 26.40 + warning("Class metaspace size is too large for UseCompressedKlassPointers"); 26.41 + FLAG_SET_DEFAULT(UseCompressedKlassPointers, false); 26.42 + } 26.43 + } 26.44 + } 26.45 +#endif // _LP64 26.46 +#endif // !ZERO 26.47 +} 26.48 + 26.49 void Arguments::set_ergonomics_flags() { 26.50 26.51 if (os::is_server_class_machine()) { 26.52 @@ -1470,7 +1497,8 @@ 26.53 // server performance. On server class machines, keep the default 26.54 // off unless it is asked for. Future work: either add bytecode rewriting 26.55 // at link time, or rewrite bytecodes in non-shared methods. 26.56 - if (!DumpSharedSpaces && !RequireSharedSpaces) { 26.57 + if (!DumpSharedSpaces && !RequireSharedSpaces && 26.58 + (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) { 26.59 no_shared_spaces(); 26.60 } 26.61 } 26.62 @@ -1478,33 +1506,11 @@ 26.63 #ifndef ZERO 26.64 #ifdef _LP64 26.65 set_use_compressed_oops(); 26.66 - // UseCompressedOops must be on for UseCompressedKlassPointers to be on. 26.67 - if (!UseCompressedOops) { 26.68 - if (UseCompressedKlassPointers) { 26.69 - warning("UseCompressedKlassPointers requires UseCompressedOops"); 26.70 - } 26.71 - FLAG_SET_DEFAULT(UseCompressedKlassPointers, false); 26.72 - } else { 26.73 - // Turn on UseCompressedKlassPointers too 26.74 - if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) { 26.75 - FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true); 26.76 - } 26.77 - // Set the ClassMetaspaceSize to something that will not need to be 26.78 - // expanded, since it cannot be expanded. 26.79 - if (UseCompressedKlassPointers) { 26.80 - if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) { 26.81 - warning("Class metaspace size is too large for UseCompressedKlassPointers"); 26.82 - FLAG_SET_DEFAULT(UseCompressedKlassPointers, false); 26.83 - } else if (FLAG_IS_DEFAULT(ClassMetaspaceSize)) { 26.84 - // 100,000 classes seems like a good size, so 100M assumes around 1K 26.85 - // per klass. The vtable and oopMap is embedded so we don't have a fixed 26.86 - // size per klass. Eventually, this will be parameterized because it 26.87 - // would also be useful to determine the optimal size of the 26.88 - // systemDictionary. 26.89 - FLAG_SET_ERGO(uintx, ClassMetaspaceSize, 100*M); 26.90 - } 26.91 - } 26.92 - } 26.93 + 26.94 + // set_use_compressed_klass_ptrs() must be called after calling 26.95 + // set_use_compressed_oops(). 26.96 + set_use_compressed_klass_ptrs(); 26.97 + 26.98 // Also checks that certain machines are slower with compressed oops 26.99 // in vm_version initialization code. 26.100 #endif // _LP64 26.101 @@ -2153,7 +2159,7 @@ 26.102 26.103 status = status && verify_object_alignment(); 26.104 26.105 - status = status && verify_min_value(ClassMetaspaceSize, 1*M, 26.106 + status = status && verify_interval(ClassMetaspaceSize, 1*M, 3*G, 26.107 "ClassMetaspaceSize"); 26.108 26.109 status = status && verify_interval(MarkStackSizeMax, 26.110 @@ -3273,33 +3279,22 @@ 26.111 } 26.112 26.113 void Arguments::set_shared_spaces_flags() { 26.114 -#ifdef _LP64 26.115 - const bool must_share = DumpSharedSpaces || RequireSharedSpaces; 26.116 - 26.117 - // CompressedOops cannot be used with CDS. The offsets of oopmaps and 26.118 - // static fields are incorrect in the archive. With some more clever 26.119 - // initialization, this restriction can probably be lifted. 26.120 - if (UseCompressedOops) { 26.121 - if (must_share) { 26.122 - warning("disabling compressed oops because of %s", 26.123 - DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on"); 26.124 - FLAG_SET_CMDLINE(bool, UseCompressedOops, false); 26.125 - FLAG_SET_CMDLINE(bool, UseCompressedKlassPointers, false); 26.126 - } else { 26.127 - // Prefer compressed oops to class data sharing 26.128 - if (UseSharedSpaces && Verbose) { 26.129 - warning("turning off use of shared archive because of compressed oops"); 26.130 - } 26.131 - no_shared_spaces(); 26.132 - } 26.133 - } 26.134 -#endif 26.135 - 26.136 if (DumpSharedSpaces) { 26.137 if (RequireSharedSpaces) { 26.138 warning("cannot dump shared archive while using shared archive"); 26.139 } 26.140 UseSharedSpaces = false; 26.141 +#ifdef _LP64 26.142 + if (!UseCompressedOops || !UseCompressedKlassPointers) { 26.143 + vm_exit_during_initialization( 26.144 + "Cannot dump shared archive when UseCompressedOops or UseCompressedKlassPointers is off.", NULL); 26.145 + } 26.146 + } else { 26.147 + // UseCompressedOops and UseCompressedKlassPointers must be on for UseSharedSpaces. 26.148 + if (!UseCompressedOops || !UseCompressedKlassPointers) { 26.149 + no_shared_spaces(); 26.150 + } 26.151 +#endif 26.152 } 26.153 } 26.154
27.1 --- a/src/share/vm/runtime/arguments.hpp Fri Aug 16 04:24:07 2013 -0700 27.2 +++ b/src/share/vm/runtime/arguments.hpp Fri Aug 16 10:06:58 2013 -0700 27.3 @@ -309,6 +309,7 @@ 27.4 static void set_g1_gc_flags(); 27.5 // GC ergonomics 27.6 static void set_use_compressed_oops(); 27.7 + static void set_use_compressed_klass_ptrs(); 27.8 static void set_ergonomics_flags(); 27.9 static void set_shared_spaces_flags(); 27.10 // limits the given memory size by the maximum amount of memory this process is
28.1 --- a/src/share/vm/runtime/globals.hpp Fri Aug 16 04:24:07 2013 -0700 28.2 +++ b/src/share/vm/runtime/globals.hpp Fri Aug 16 10:06:58 2013 -0700 28.3 @@ -3036,7 +3036,7 @@ 28.4 product(uintx, MaxMetaspaceSize, max_uintx, \ 28.5 "Maximum size of Metaspaces (in bytes)") \ 28.6 \ 28.7 - product(uintx, ClassMetaspaceSize, 2*M, \ 28.8 + product(uintx, ClassMetaspaceSize, 1*G, \ 28.9 "Maximum size of InstanceKlass area in Metaspace used for " \ 28.10 "UseCompressedKlassPointers") \ 28.11 \
29.1 --- a/src/share/vm/runtime/init.cpp Fri Aug 16 04:24:07 2013 -0700 29.2 +++ b/src/share/vm/runtime/init.cpp Fri Aug 16 10:06:58 2013 -0700 29.3 @@ -1,5 +1,5 @@ 29.4 /* 29.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 29.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 29.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 29.8 * 29.9 * This code is free software; you can redistribute it and/or modify it 29.10 @@ -95,7 +95,6 @@ 29.11 management_init(); 29.12 bytecodes_init(); 29.13 classLoader_init(); 29.14 - Metaspace::global_initialize(); // must be before codeCache 29.15 codeCache_init(); 29.16 VM_Version_init(); 29.17 os_init_globals();
30.1 --- a/src/share/vm/utilities/globalDefinitions.hpp Fri Aug 16 04:24:07 2013 -0700 30.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp Fri Aug 16 10:06:58 2013 -0700 30.3 @@ -362,6 +362,8 @@ 30.4 // Klass encoding metaspace max size 30.5 const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes; 30.6 30.7 +const jlong CompressedKlassPointersBase = NOT_LP64(0) LP64_ONLY(CONST64(0x800000000)); // 32*G 30.8 + 30.9 // Machine dependent stuff 30.10 30.11 #ifdef TARGET_ARCH_x86
31.1 --- a/test/Makefile Fri Aug 16 04:24:07 2013 -0700 31.2 +++ b/test/Makefile Fri Aug 16 10:06:58 2013 -0700 31.3 @@ -210,9 +210,7 @@ 31.4 $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help 31.5 $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X 31.6 $(RM) $(PRODUCT_HOME)/jre/lib/*/client/classes.jsa 31.7 - $(RM) $(PRODUCT_HOME)/jre/lib/*/client/classes_g.jsa 31.8 $(RM) $(PRODUCT_HOME)/jre/bin/client/classes.jsa 31.9 - $(RM) $(PRODUCT_HOME)/jre/bin/client/classes_g.jsa 31.10 $(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -Xshare:dump 31.11 31.12 PHONY_LIST += clienttest
32.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 32.2 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java Fri Aug 16 10:06:58 2013 -0700 32.3 @@ -0,0 +1,61 @@ 32.4 +/* 32.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 32.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 32.7 + * 32.8 + * This code is free software; you can redistribute it and/or modify it 32.9 + * under the terms of the GNU General Public License version 2 only, as 32.10 + * published by the Free Software Foundation. 32.11 + * 32.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 32.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 32.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 32.15 + * version 2 for more details (a copy is included in the LICENSE file that 32.16 + * accompanied this code). 32.17 + * 32.18 + * You should have received a copy of the GNU General Public License version 32.19 + * 2 along with this work; if not, write to the Free Software Foundation, 32.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 32.21 + * 32.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 32.23 + * or visit www.oracle.com if you need additional information or have any 32.24 + * questions. 32.25 + */ 32.26 + 32.27 +/* 32.28 + * @test 32.29 + * @bug 8003424 32.30 + * @summary Testing UseCompressedKlassPointers with CDS 32.31 + * @library /testlibrary 32.32 + * @run main CDSCompressedKPtrs 32.33 + */ 32.34 + 32.35 +import com.oracle.java.testlibrary.*; 32.36 + 32.37 +public class CDSCompressedKPtrs { 32.38 + public static void main(String[] args) throws Exception { 32.39 + ProcessBuilder pb; 32.40 + if (Platform.is64bit()) { 32.41 + pb = ProcessTools.createJavaProcessBuilder( 32.42 + "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops", 32.43 + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); 32.44 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 32.45 + try { 32.46 + output.shouldContain("Loading classes to share"); 32.47 + output.shouldHaveExitValue(0); 32.48 + 32.49 + pb = ProcessTools.createJavaProcessBuilder( 32.50 + "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops", 32.51 + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version"); 32.52 + output = new OutputAnalyzer(pb.start()); 32.53 + output.shouldContain("sharing"); 32.54 + output.shouldHaveExitValue(0); 32.55 + 32.56 + } catch (RuntimeException e) { 32.57 + // Report 'passed' if CDS was turned off because we could not allocate 32.58 + // the klass metaspace at an address that would work with CDS. 32.59 + output.shouldContain("Could not allocate metaspace at a compatible address"); 32.60 + output.shouldHaveExitValue(1); 32.61 + } 32.62 + } 32.63 + } 32.64 +}
33.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 33.2 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java Fri Aug 16 10:06:58 2013 -0700 33.3 @@ -0,0 +1,93 @@ 33.4 +/* 33.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 33.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 33.7 + * 33.8 + * This code is free software; you can redistribute it and/or modify it 33.9 + * under the terms of the GNU General Public License version 2 only, as 33.10 + * published by the Free Software Foundation. 33.11 + * 33.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 33.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 33.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 33.15 + * version 2 for more details (a copy is included in the LICENSE file that 33.16 + * accompanied this code). 33.17 + * 33.18 + * You should have received a copy of the GNU General Public License version 33.19 + * 2 along with this work; if not, write to the Free Software Foundation, 33.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 33.21 + * 33.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 33.23 + * or visit www.oracle.com if you need additional information or have any 33.24 + * questions. 33.25 + */ 33.26 + 33.27 +/* 33.28 + * @test 33.29 + * @bug 8003424 33.30 + * @summary Test that cannot use CDS if UseCompressedKlassPointers is turned off. 33.31 + * @library /testlibrary 33.32 + * @run main CDSCompressedKPtrsError 33.33 + */ 33.34 + 33.35 +import com.oracle.java.testlibrary.*; 33.36 + 33.37 +public class CDSCompressedKPtrsError { 33.38 + public static void main(String[] args) throws Exception { 33.39 + ProcessBuilder pb; 33.40 + if (Platform.is64bit()) { 33.41 + pb = ProcessTools.createJavaProcessBuilder( 33.42 + "-XX:+UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions", 33.43 + "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); 33.44 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 33.45 + try { 33.46 + output.shouldContain("Loading classes to share"); 33.47 + output.shouldHaveExitValue(0); 33.48 + 33.49 + pb = ProcessTools.createJavaProcessBuilder( 33.50 + "-XX:-UseCompressedKlassPointers", "-XX:-UseCompressedOops", 33.51 + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version"); 33.52 + output = new OutputAnalyzer(pb.start()); 33.53 + output.shouldContain("Unable to use shared archive"); 33.54 + output.shouldHaveExitValue(0); 33.55 + 33.56 + pb = ProcessTools.createJavaProcessBuilder( 33.57 + "-XX:-UseCompressedKlassPointers", "-XX:+UseCompressedOops", 33.58 + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version"); 33.59 + output = new OutputAnalyzer(pb.start()); 33.60 + output.shouldContain("Unable to use shared archive"); 33.61 + output.shouldHaveExitValue(0); 33.62 + 33.63 + pb = ProcessTools.createJavaProcessBuilder( 33.64 + "-XX:+UseCompressedKlassPointers", "-XX:-UseCompressedOops", 33.65 + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version"); 33.66 + output = new OutputAnalyzer(pb.start()); 33.67 + output.shouldContain("Unable to use shared archive"); 33.68 + output.shouldHaveExitValue(0); 33.69 + 33.70 + } catch (RuntimeException e) { 33.71 + output.shouldContain("Unable to use shared archive"); 33.72 + output.shouldHaveExitValue(1); 33.73 + } 33.74 + 33.75 + // Test bad options with -Xshare:dump. 33.76 + pb = ProcessTools.createJavaProcessBuilder( 33.77 + "-XX:-UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions", 33.78 + "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); 33.79 + output = new OutputAnalyzer(pb.start()); 33.80 + output.shouldContain("Cannot dump shared archive"); 33.81 + 33.82 + pb = ProcessTools.createJavaProcessBuilder( 33.83 + "-XX:+UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions", 33.84 + "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); 33.85 + output = new OutputAnalyzer(pb.start()); 33.86 + output.shouldContain("Cannot dump shared archive"); 33.87 + 33.88 + pb = ProcessTools.createJavaProcessBuilder( 33.89 + "-XX:-UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions", 33.90 + "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); 33.91 + output = new OutputAnalyzer(pb.start()); 33.92 + output.shouldContain("Cannot dump shared archive"); 33.93 + 33.94 + } 33.95 + } 33.96 +}
34.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 34.2 +++ b/test/runtime/CDSCompressedKPtrs/XShareAuto.java Fri Aug 16 10:06:58 2013 -0700 34.3 @@ -0,0 +1,76 @@ 34.4 +/* 34.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 34.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 34.7 + * 34.8 + * This code is free software; you can redistribute it and/or modify it 34.9 + * under the terms of the GNU General Public License version 2 only, as 34.10 + * published by the Free Software Foundation. 34.11 + * 34.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 34.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 34.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 34.15 + * version 2 for more details (a copy is included in the LICENSE file that 34.16 + * accompanied this code). 34.17 + * 34.18 + * You should have received a copy of the GNU General Public License version 34.19 + * 2 along with this work; if not, write to the Free Software Foundation, 34.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 34.21 + * 34.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 34.23 + * or visit www.oracle.com if you need additional information or have any 34.24 + * questions. 34.25 + */ 34.26 + 34.27 +/* 34.28 + * @test 34.29 + * @bug 8005933 34.30 + * @summary Test that -Xshare:auto uses CDS when explicitly specified with -server. 34.31 + * @library /testlibrary 34.32 + * @run main XShareAuto 34.33 + */ 34.34 + 34.35 +import com.oracle.java.testlibrary.*; 34.36 + 34.37 +public class XShareAuto { 34.38 + public static void main(String[] args) throws Exception { 34.39 + if (!Platform.is64bit()) { 34.40 + System.out.println("ObjectAlignmentInBytes for CDS is only " + 34.41 + "supported on 64bit platforms; this plaform is " + 34.42 + System.getProperty("sun.arch.data.model")); 34.43 + System.out.println("Skipping the test"); 34.44 + return; 34.45 + } 34.46 + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( 34.47 + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", 34.48 + "-Xshare:dump"); 34.49 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 34.50 + output.shouldContain("Loading classes to share"); 34.51 + output.shouldHaveExitValue(0); 34.52 + 34.53 + pb = ProcessTools.createJavaProcessBuilder( 34.54 + "-server", "-XX:+UnlockDiagnosticVMOptions", 34.55 + "-XX:SharedArchiveFile=./sample.jsa", "-version"); 34.56 + output = new OutputAnalyzer(pb.start()); 34.57 + output.shouldNotContain("sharing"); 34.58 + output.shouldHaveExitValue(0); 34.59 + 34.60 + pb = ProcessTools.createJavaProcessBuilder( 34.61 + "-server", "-Xshare:auto", "-XX:+UnlockDiagnosticVMOptions", 34.62 + "-XX:SharedArchiveFile=./sample.jsa", "-version"); 34.63 + output = new OutputAnalyzer(pb.start()); 34.64 + try { 34.65 + output.shouldContain("sharing"); 34.66 + output.shouldHaveExitValue(0); 34.67 + } catch (RuntimeException e) { 34.68 + // If this failed then check that it would also be unable 34.69 + // to share even if -Xshare:on is specified. If so, then 34.70 + // return a success status. 34.71 + pb = ProcessTools.createJavaProcessBuilder( 34.72 + "-server", "-Xshare:on", "-XX:+UnlockDiagnosticVMOptions", 34.73 + "-XX:SharedArchiveFile=./sample.jsa", "-version"); 34.74 + output = new OutputAnalyzer(pb.start()); 34.75 + output.shouldContain("Could not allocate metaspace at a compatible address"); 34.76 + output.shouldHaveExitValue(1); 34.77 + } 34.78 + } 34.79 +}
35.1 --- a/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java Fri Aug 16 04:24:07 2013 -0700 35.2 +++ b/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java Fri Aug 16 10:06:58 2013 -0700 35.3 @@ -84,8 +84,7 @@ 35.4 // there is a chance such reservation will fail 35.5 // If it does, it is NOT considered a failure of the feature, 35.6 // rather a possible expected outcome, though not likely 35.7 - output.shouldContain( 35.8 - "Unable to reserve shared space at required address"); 35.9 + output.shouldContain("Could not allocate metaspace at a compatible address"); 35.10 output.shouldHaveExitValue(1); 35.11 } 35.12 }