Fri, 11 Oct 2013 22:22:19 -0400
Merge
1.1 --- a/.hgtags Fri Oct 11 17:08:22 2013 -0400 1.2 +++ b/.hgtags Fri Oct 11 22:22:19 2013 -0400 1.3 @@ -381,3 +381,5 @@ 1.4 566db1b0e6efca31f181456e54c8911d0192410d hs25-b51 1.5 c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109 1.6 58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52 1.7 +6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110 1.8 +562a3d356de67670b4172b82aca2d30743449e04 hs25-b53
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainCacheEntry.java Fri Oct 11 22:22:19 2013 -0400 2.3 @@ -0,0 +1,56 @@ 2.4 +/* 2.5 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 2.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 2.7 + * 2.8 + * This code is free software; you can redistribute it and/or modify it 2.9 + * under the terms of the GNU General Public License version 2 only, as 2.10 + * published by the Free Software Foundation. 2.11 + * 2.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 2.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 2.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 2.15 + * version 2 for more details (a copy is included in the LICENSE file that 2.16 + * accompanied this code). 2.17 + * 2.18 + * You should have received a copy of the GNU General Public License version 2.19 + * 2 along with this work; if not, write to the Free Software Foundation, 2.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 2.21 + * 2.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 2.23 + * or visit www.oracle.com if you need additional information or have any 2.24 + * questions. 2.25 + * 2.26 + */ 2.27 + 2.28 +package sun.jvm.hotspot.memory; 2.29 + 2.30 +import java.util.*; 2.31 +import sun.jvm.hotspot.debugger.*; 2.32 +import sun.jvm.hotspot.oops.*; 2.33 +import sun.jvm.hotspot.runtime.*; 2.34 +import sun.jvm.hotspot.types.*; 2.35 + 2.36 +public class ProtectionDomainCacheEntry extends VMObject { 2.37 + private static sun.jvm.hotspot.types.OopField protectionDomainField; 2.38 + 2.39 + static { 2.40 + VM.registerVMInitializedObserver(new Observer() { 2.41 + public void update(Observable o, Object data) { 2.42 + initialize(VM.getVM().getTypeDataBase()); 2.43 + } 2.44 + }); 2.45 + } 2.46 + 2.47 + private static synchronized void initialize(TypeDataBase db) { 2.48 + Type type = db.lookupType("ProtectionDomainCacheEntry"); 2.49 + protectionDomainField = type.getOopField("_literal"); 2.50 + } 2.51 + 2.52 + public ProtectionDomainCacheEntry(Address addr) { 2.53 + super(addr); 2.54 + } 2.55 + 2.56 + public Oop protectionDomain() { 2.57 + return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr)); 2.58 + } 2.59 +}
3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java Fri Oct 11 17:08:22 2013 -0400 3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java Fri Oct 11 22:22:19 2013 -0400 3.3 @@ -1,5 +1,5 @@ 3.4 /* 3.5 - * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved. 3.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3.8 * 3.9 * This code is free software; you can redistribute it and/or modify it 3.10 @@ -32,7 +32,7 @@ 3.11 3.12 public class ProtectionDomainEntry extends VMObject { 3.13 private static AddressField nextField; 3.14 - private static sun.jvm.hotspot.types.OopField protectionDomainField; 3.15 + private static AddressField pdCacheField; 3.16 3.17 static { 3.18 VM.registerVMInitializedObserver(new Observer() { 3.19 @@ -46,7 +46,7 @@ 3.20 Type type = db.lookupType("ProtectionDomainEntry"); 3.21 3.22 nextField = type.getAddressField("_next"); 3.23 - protectionDomainField = type.getOopField("_protection_domain"); 3.24 + pdCacheField = type.getAddressField("_pd_cache"); 3.25 } 3.26 3.27 public ProtectionDomainEntry(Address addr) { 3.28 @@ -54,10 +54,12 @@ 3.29 } 3.30 3.31 public ProtectionDomainEntry next() { 3.32 - return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, addr); 3.33 + return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, nextField.getValue(addr)); 3.34 } 3.35 3.36 public Oop protectionDomain() { 3.37 - return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr)); 3.38 + ProtectionDomainCacheEntry pd_cache = (ProtectionDomainCacheEntry) 3.39 + VMObjectFactory.newObject(ProtectionDomainCacheEntry.class, pdCacheField.getValue(addr)); 3.40 + return pd_cache.protectionDomain(); 3.41 } 3.42 }
4.1 --- a/make/hotspot_version Fri Oct 11 17:08:22 2013 -0400 4.2 +++ b/make/hotspot_version Fri Oct 11 22:22:19 2013 -0400 4.3 @@ -35,7 +35,7 @@ 4.4 4.5 HS_MAJOR_VER=25 4.6 HS_MINOR_VER=0 4.7 -HS_BUILD_NUMBER=53 4.8 +HS_BUILD_NUMBER=54 4.9 4.10 JDK_MAJOR_VER=1 4.11 JDK_MINOR_VER=8
5.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Fri Oct 11 17:08:22 2013 -0400 5.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Fri Oct 11 22:22:19 2013 -0400 5.3 @@ -37,6 +37,9 @@ 5.4 #include "runtime/vframeArray.hpp" 5.5 #include "utilities/macros.hpp" 5.6 #include "vmreg_sparc.inline.hpp" 5.7 +#if INCLUDE_ALL_GCS 5.8 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 5.9 +#endif 5.10 5.11 // Implementation of StubAssembler 5.12 5.13 @@ -912,7 +915,7 @@ 5.14 Register tmp2 = G3_scratch; 5.15 jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; 5.16 5.17 - Label not_already_dirty, restart, refill; 5.18 + Label not_already_dirty, restart, refill, young_card; 5.19 5.20 #ifdef _LP64 5.21 __ srlx(addr, CardTableModRefBS::card_shift, addr); 5.22 @@ -924,9 +927,15 @@ 5.23 __ set(rs, cardtable); // cardtable := <card table base> 5.24 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] 5.25 5.26 + __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 5.27 + 5.28 + __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 5.29 + __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] 5.30 + 5.31 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 5.32 __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 5.33 5.34 + __ bind(young_card); 5.35 // We didn't take the branch, so we're already dirty: return. 5.36 // Use return-from-leaf 5.37 __ retl();
6.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Fri Oct 11 17:08:22 2013 -0400 6.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Fri Oct 11 22:22:19 2013 -0400 6.3 @@ -3752,7 +3752,7 @@ 6.4 #define __ masm. 6.5 address start = __ pc(); 6.6 6.7 - Label not_already_dirty, restart, refill; 6.8 + Label not_already_dirty, restart, refill, young_card; 6.9 6.10 #ifdef _LP64 6.11 __ srlx(O0, CardTableModRefBS::card_shift, O0); 6.12 @@ -3763,9 +3763,15 @@ 6.13 __ set(addrlit, O1); // O1 := <card table base> 6.14 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 6.15 6.16 + __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 6.17 + 6.18 + __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 6.19 + __ ldub(O0, O1, O2); // O2 := [O0 + O1] 6.20 + 6.21 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 6.22 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 6.23 6.24 + __ bind(young_card); 6.25 // We didn't take the branch, so we're already dirty: return. 6.26 // Use return-from-leaf 6.27 __ retl();
7.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Fri Oct 11 17:08:22 2013 -0400 7.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Fri Oct 11 22:22:19 2013 -0400 7.3 @@ -38,6 +38,9 @@ 7.4 #include "runtime/vframeArray.hpp" 7.5 #include "utilities/macros.hpp" 7.6 #include "vmreg_x86.inline.hpp" 7.7 +#if INCLUDE_ALL_GCS 7.8 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 7.9 +#endif 7.10 7.11 7.12 // Implementation of StubAssembler 7.13 @@ -1753,13 +1756,17 @@ 7.14 __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index))); 7.15 #endif 7.16 7.17 - __ cmpb(Address(card_addr, 0), 0); 7.18 + __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); 7.19 + __ jcc(Assembler::equal, done); 7.20 + 7.21 + __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 7.22 + __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 7.23 __ jcc(Assembler::equal, done); 7.24 7.25 // storing region crossing non-NULL, card is clean. 7.26 // dirty card and log. 7.27 7.28 - __ movb(Address(card_addr, 0), 0); 7.29 + __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 7.30 7.31 __ cmpl(queue_index, 0); 7.32 __ jcc(Assembler::equal, runtime);
8.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Oct 11 17:08:22 2013 -0400 8.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Oct 11 22:22:19 2013 -0400 8.3 @@ -3389,13 +3389,18 @@ 8.4 const Register card_addr = tmp; 8.5 lea(card_addr, as_Address(ArrayAddress(cardtable, index))); 8.6 #endif 8.7 - cmpb(Address(card_addr, 0), 0); 8.8 + cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); 8.9 jcc(Assembler::equal, done); 8.10 8.11 + membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 8.12 + cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 8.13 + jcc(Assembler::equal, done); 8.14 + 8.15 + 8.16 // storing a region crossing, non-NULL oop, card is clean. 8.17 // dirty card and log. 8.18 8.19 - movb(Address(card_addr, 0), 0); 8.20 + movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 8.21 8.22 cmpl(queue_index, 0); 8.23 jcc(Assembler::equal, runtime);
9.1 --- a/src/os/linux/vm/globals_linux.hpp Fri Oct 11 17:08:22 2013 -0400 9.2 +++ b/src/os/linux/vm/globals_linux.hpp Fri Oct 11 22:22:19 2013 -0400 9.3 @@ -53,7 +53,7 @@ 9.4 // Defines Linux-specific default values. The flags are available on all 9.5 // platforms, but they may have different default values on other platforms. 9.6 // 9.7 -define_pd_global(bool, UseLargePages, true); 9.8 +define_pd_global(bool, UseLargePages, false); 9.9 define_pd_global(bool, UseLargePagesIndividualAllocation, false); 9.10 define_pd_global(bool, UseOSErrorReporting, false); 9.11 define_pd_global(bool, UseThreadPriorities, true) ;
10.1 --- a/src/os/linux/vm/os_linux.cpp Fri Oct 11 17:08:22 2013 -0400 10.2 +++ b/src/os/linux/vm/os_linux.cpp Fri Oct 11 22:22:19 2013 -0400 10.3 @@ -3361,13 +3361,15 @@ 10.4 if (FLAG_IS_DEFAULT(UseHugeTLBFS) && 10.5 FLAG_IS_DEFAULT(UseSHM) && 10.6 FLAG_IS_DEFAULT(UseTransparentHugePages)) { 10.7 - // If UseLargePages is specified on the command line try all methods, 10.8 - // if it's default, then try only UseTransparentHugePages. 10.9 - if (FLAG_IS_DEFAULT(UseLargePages)) { 10.10 - UseTransparentHugePages = true; 10.11 - } else { 10.12 - UseHugeTLBFS = UseTransparentHugePages = UseSHM = true; 10.13 - } 10.14 + 10.15 + // The type of large pages has not been specified by the user. 10.16 + 10.17 + // Try UseHugeTLBFS and then UseSHM. 10.18 + UseHugeTLBFS = UseSHM = true; 10.19 + 10.20 + // Don't try UseTransparentHugePages since there are known 10.21 + // performance issues with it turned on. This might change in the future. 10.22 + UseTransparentHugePages = false; 10.23 } 10.24 10.25 if (UseTransparentHugePages) { 10.26 @@ -3393,9 +3395,19 @@ 10.27 } 10.28 10.29 void os::large_page_init() { 10.30 - if (!UseLargePages) { 10.31 + if (!UseLargePages && 10.32 + !UseTransparentHugePages && 10.33 + !UseHugeTLBFS && 10.34 + !UseSHM) { 10.35 + // Not using large pages. 10.36 + return; 10.37 + } 10.38 + 10.39 + if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) { 10.40 + // The user explicitly turned off large pages. 10.41 + // Ignore the rest of the large pages flags. 10.42 + UseTransparentHugePages = false; 10.43 UseHugeTLBFS = false; 10.44 - UseTransparentHugePages = false; 10.45 UseSHM = false; 10.46 return; 10.47 }
11.1 --- a/src/share/vm/classfile/dictionary.cpp Fri Oct 11 17:08:22 2013 -0400 11.2 +++ b/src/share/vm/classfile/dictionary.cpp Fri Oct 11 22:22:19 2013 -0400 11.3 @@ -1,5 +1,5 @@ 11.4 /* 11.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 11.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 11.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 11.8 * 11.9 * This code is free software; you can redistribute it and/or modify it 11.10 @@ -25,6 +25,7 @@ 11.11 #include "precompiled.hpp" 11.12 #include "classfile/dictionary.hpp" 11.13 #include "classfile/systemDictionary.hpp" 11.14 +#include "memory/iterator.hpp" 11.15 #include "oops/oop.inline.hpp" 11.16 #include "prims/jvmtiRedefineClassesTrace.hpp" 11.17 #include "utilities/hashtable.inline.hpp" 11.18 @@ -38,17 +39,21 @@ 11.19 : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry)) { 11.20 _current_class_index = 0; 11.21 _current_class_entry = NULL; 11.22 + _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize); 11.23 }; 11.24 11.25 11.26 - 11.27 Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t, 11.28 int number_of_entries) 11.29 : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) { 11.30 _current_class_index = 0; 11.31 _current_class_entry = NULL; 11.32 + _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize); 11.33 }; 11.34 11.35 +ProtectionDomainCacheEntry* Dictionary::cache_get(oop protection_domain) { 11.36 + return _pd_cache_table->get(protection_domain); 11.37 +} 11.38 11.39 DictionaryEntry* Dictionary::new_entry(unsigned int hash, Klass* klass, 11.40 ClassLoaderData* loader_data) { 11.41 @@ -105,11 +110,12 @@ 11.42 } 11.43 11.44 11.45 -void DictionaryEntry::add_protection_domain(oop protection_domain) { 11.46 +void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_domain) { 11.47 assert_locked_or_safepoint(SystemDictionary_lock); 11.48 if (!contains_protection_domain(protection_domain)) { 11.49 + ProtectionDomainCacheEntry* entry = dict->cache_get(protection_domain); 11.50 ProtectionDomainEntry* new_head = 11.51 - new ProtectionDomainEntry(protection_domain, _pd_set); 11.52 + new ProtectionDomainEntry(entry, _pd_set); 11.53 // Warning: Preserve store ordering. The SystemDictionary is read 11.54 // without locks. The new ProtectionDomainEntry must be 11.55 // complete before other threads can be allowed to see it 11.56 @@ -193,7 +199,10 @@ 11.57 11.58 11.59 void Dictionary::always_strong_oops_do(OopClosure* blk) { 11.60 - // Follow all system classes and temporary placeholders in dictionary 11.61 + // Follow all system classes and temporary placeholders in dictionary; only 11.62 + // protection domain oops contain references into the heap. In a first 11.63 + // pass over the system dictionary determine which need to be treated as 11.64 + // strongly reachable and mark them as such. 11.65 for (int index = 0; index < table_size(); index++) { 11.66 for (DictionaryEntry *probe = bucket(index); 11.67 probe != NULL; 11.68 @@ -201,10 +210,13 @@ 11.69 Klass* e = probe->klass(); 11.70 ClassLoaderData* loader_data = probe->loader_data(); 11.71 if (is_strongly_reachable(loader_data, e)) { 11.72 - probe->protection_domain_set_oops_do(blk); 11.73 + probe->set_strongly_reachable(); 11.74 } 11.75 } 11.76 } 11.77 + // Then iterate over the protection domain cache to apply the closure on the 11.78 + // previously marked ones. 11.79 + _pd_cache_table->always_strong_oops_do(blk); 11.80 } 11.81 11.82 11.83 @@ -266,18 +278,12 @@ 11.84 } 11.85 } 11.86 11.87 - 11.88 void Dictionary::oops_do(OopClosure* f) { 11.89 - for (int index = 0; index < table_size(); index++) { 11.90 - for (DictionaryEntry* probe = bucket(index); 11.91 - probe != NULL; 11.92 - probe = probe->next()) { 11.93 - probe->protection_domain_set_oops_do(f); 11.94 - } 11.95 - } 11.96 + // Only the protection domain oops contain references into the heap. Iterate 11.97 + // over all of them. 11.98 + _pd_cache_table->oops_do(f); 11.99 } 11.100 11.101 - 11.102 void Dictionary::methods_do(void f(Method*)) { 11.103 for (int index = 0; index < table_size(); index++) { 11.104 for (DictionaryEntry* probe = bucket(index); 11.105 @@ -292,6 +298,11 @@ 11.106 } 11.107 } 11.108 11.109 +void Dictionary::unlink(BoolObjectClosure* is_alive) { 11.110 + // Only the protection domain cache table may contain references to the heap 11.111 + // that need to be unlinked. 11.112 + _pd_cache_table->unlink(is_alive); 11.113 +} 11.114 11.115 Klass* Dictionary::try_get_next_class() { 11.116 while (true) { 11.117 @@ -306,7 +317,6 @@ 11.118 // never reached 11.119 } 11.120 11.121 - 11.122 // Add a loaded class to the system dictionary. 11.123 // Readers of the SystemDictionary aren't always locked, so _buckets 11.124 // is volatile. The store of the next field in the constructor is 11.125 @@ -396,7 +406,7 @@ 11.126 assert(protection_domain() != NULL, 11.127 "real protection domain should be present"); 11.128 11.129 - entry->add_protection_domain(protection_domain()); 11.130 + entry->add_protection_domain(this, protection_domain()); 11.131 11.132 assert(entry->contains_protection_domain(protection_domain()), 11.133 "now protection domain should be present"); 11.134 @@ -446,6 +456,146 @@ 11.135 } 11.136 } 11.137 11.138 +ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size) 11.139 + : Hashtable<oop, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry)) 11.140 +{ 11.141 +} 11.142 + 11.143 +void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) { 11.144 + assert(SafepointSynchronize::is_at_safepoint(), "must be"); 11.145 + for (int i = 0; i < table_size(); ++i) { 11.146 + ProtectionDomainCacheEntry** p = bucket_addr(i); 11.147 + ProtectionDomainCacheEntry* entry = bucket(i); 11.148 + while (entry != NULL) { 11.149 + if (is_alive->do_object_b(entry->literal())) { 11.150 + p = entry->next_addr(); 11.151 + } else { 11.152 + *p = entry->next(); 11.153 + free_entry(entry); 11.154 + } 11.155 + entry = *p; 11.156 + } 11.157 + } 11.158 +} 11.159 + 11.160 +void ProtectionDomainCacheTable::oops_do(OopClosure* f) { 11.161 + for (int index = 0; index < table_size(); index++) { 11.162 + for (ProtectionDomainCacheEntry* probe = bucket(index); 11.163 + probe != NULL; 11.164 + probe = probe->next()) { 11.165 + probe->oops_do(f); 11.166 + } 11.167 + } 11.168 +} 11.169 + 11.170 +uint ProtectionDomainCacheTable::bucket_size() { 11.171 + return sizeof(ProtectionDomainCacheEntry); 11.172 +} 11.173 + 11.174 +#ifndef PRODUCT 11.175 +void ProtectionDomainCacheTable::print() { 11.176 + tty->print_cr("Protection domain cache table (table_size=%d, classes=%d)", 11.177 + table_size(), number_of_entries()); 11.178 + for (int index = 0; index < table_size(); index++) { 11.179 + for (ProtectionDomainCacheEntry* probe = bucket(index); 11.180 + probe != NULL; 11.181 + probe = probe->next()) { 11.182 + probe->print(); 11.183 + } 11.184 + } 11.185 +} 11.186 + 11.187 +void ProtectionDomainCacheEntry::print() { 11.188 + tty->print_cr("entry "PTR_FORMAT" value "PTR_FORMAT" strongly_reachable %d next "PTR_FORMAT, 11.189 + this, (void*)literal(), _strongly_reachable, next()); 11.190 +} 11.191 +#endif 11.192 + 11.193 +void ProtectionDomainCacheTable::verify() { 11.194 + int element_count = 0; 11.195 + for (int index = 0; index < table_size(); index++) { 11.196 + for (ProtectionDomainCacheEntry* probe = bucket(index); 11.197 + probe != NULL; 11.198 + probe = probe->next()) { 11.199 + probe->verify(); 11.200 + element_count++; 11.201 + } 11.202 + } 11.203 + guarantee(number_of_entries() == element_count, 11.204 + "Verify of protection domain cache table failed"); 11.205 + debug_only(verify_lookup_length((double)number_of_entries() / table_size())); 11.206 +} 11.207 + 11.208 +void ProtectionDomainCacheEntry::verify() { 11.209 + guarantee(literal()->is_oop(), "must be an oop"); 11.210 +} 11.211 + 11.212 +void ProtectionDomainCacheTable::always_strong_oops_do(OopClosure* f) { 11.213 + // the caller marked the protection domain cache entries that we need to apply 11.214 + // the closure on. Only process them. 11.215 + for (int index = 0; index < table_size(); index++) { 11.216 + for (ProtectionDomainCacheEntry* probe = bucket(index); 11.217 + probe != NULL; 11.218 + probe = probe->next()) { 11.219 + if (probe->is_strongly_reachable()) { 11.220 + probe->reset_strongly_reachable(); 11.221 + probe->oops_do(f); 11.222 + } 11.223 + } 11.224 + } 11.225 +} 11.226 + 11.227 +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(oop protection_domain) { 11.228 + unsigned int hash = compute_hash(protection_domain); 11.229 + int index = hash_to_index(hash); 11.230 + 11.231 + ProtectionDomainCacheEntry* entry = find_entry(index, protection_domain); 11.232 + if (entry == NULL) { 11.233 + entry = add_entry(index, hash, protection_domain); 11.234 + } 11.235 + return entry; 11.236 +} 11.237 + 11.238 +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, oop protection_domain) { 11.239 + for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) { 11.240 + if (e->protection_domain() == protection_domain) { 11.241 + return e; 11.242 + } 11.243 + } 11.244 + 11.245 + return NULL; 11.246 +} 11.247 + 11.248 +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, unsigned int hash, oop protection_domain) { 11.249 + assert_locked_or_safepoint(SystemDictionary_lock); 11.250 + assert(index == index_for(protection_domain), "incorrect index?"); 11.251 + assert(find_entry(index, protection_domain) == NULL, "no double entry"); 11.252 + 11.253 + ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain); 11.254 + Hashtable<oop, mtClass>::add_entry(index, p); 11.255 + return p; 11.256 +} 11.257 + 11.258 +void ProtectionDomainCacheTable::free(ProtectionDomainCacheEntry* to_delete) { 11.259 + unsigned int hash = compute_hash(to_delete->protection_domain()); 11.260 + int index = hash_to_index(hash); 11.261 + 11.262 + ProtectionDomainCacheEntry** p = bucket_addr(index); 11.263 + ProtectionDomainCacheEntry* entry = bucket(index); 11.264 + while (true) { 11.265 + assert(entry != NULL, "sanity"); 11.266 + 11.267 + if (entry == to_delete) { 11.268 + *p = entry->next(); 11.269 + Hashtable<oop, mtClass>::free_entry(entry); 11.270 + break; 11.271 + } else { 11.272 + p = entry->next_addr(); 11.273 + entry = *p; 11.274 + } 11.275 + } 11.276 +} 11.277 + 11.278 SymbolPropertyTable::SymbolPropertyTable(int table_size) 11.279 : Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry)) 11.280 { 11.281 @@ -532,11 +682,13 @@ 11.282 tty->cr(); 11.283 } 11.284 } 11.285 + tty->cr(); 11.286 + _pd_cache_table->print(); 11.287 + tty->cr(); 11.288 } 11.289 11.290 #endif 11.291 11.292 - 11.293 void Dictionary::verify() { 11.294 guarantee(number_of_entries() >= 0, "Verify of system dictionary failed"); 11.295 11.296 @@ -563,5 +715,7 @@ 11.297 guarantee(number_of_entries() == element_count, 11.298 "Verify of system dictionary failed"); 11.299 debug_only(verify_lookup_length((double)number_of_entries() / table_size())); 11.300 + 11.301 + _pd_cache_table->verify(); 11.302 } 11.303
12.1 --- a/src/share/vm/classfile/dictionary.hpp Fri Oct 11 17:08:22 2013 -0400 12.2 +++ b/src/share/vm/classfile/dictionary.hpp Fri Oct 11 22:22:19 2013 -0400 12.3 @@ -27,11 +27,14 @@ 12.4 12.5 #include "classfile/systemDictionary.hpp" 12.6 #include "oops/instanceKlass.hpp" 12.7 -#include "oops/oop.hpp" 12.8 +#include "oops/oop.inline.hpp" 12.9 #include "utilities/hashtable.hpp" 12.10 12.11 class DictionaryEntry; 12.12 class PSPromotionManager; 12.13 +class ProtectionDomainCacheTable; 12.14 +class ProtectionDomainCacheEntry; 12.15 +class BoolObjectClosure; 12.16 12.17 //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 12.18 // The data structure for the system dictionary (and the shared system 12.19 @@ -45,6 +48,8 @@ 12.20 // pointer to the current hash table entry. 12.21 static DictionaryEntry* _current_class_entry; 12.22 12.23 + ProtectionDomainCacheTable* _pd_cache_table; 12.24 + 12.25 DictionaryEntry* get_entry(int index, unsigned int hash, 12.26 Symbol* name, ClassLoaderData* loader_data); 12.27 12.28 @@ -93,6 +98,7 @@ 12.29 12.30 void methods_do(void f(Method*)); 12.31 12.32 + void unlink(BoolObjectClosure* is_alive); 12.33 12.34 // Classes loaded by the bootstrap loader are always strongly reachable. 12.35 // If we're not doing class unloading, all classes are strongly reachable. 12.36 @@ -118,6 +124,7 @@ 12.37 // Sharing support 12.38 void reorder_dictionary(); 12.39 12.40 + ProtectionDomainCacheEntry* cache_get(oop protection_domain); 12.41 12.42 #ifndef PRODUCT 12.43 void print(); 12.44 @@ -126,21 +133,112 @@ 12.45 }; 12.46 12.47 // The following classes can be in dictionary.cpp, but we need these 12.48 -// to be in header file so that SA's vmStructs can access. 12.49 +// to be in header file so that SA's vmStructs can access them. 12.50 +class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> { 12.51 + friend class VMStructs; 12.52 + private: 12.53 + // Flag indicating whether this protection domain entry is strongly reachable. 12.54 + // Used during iterating over the system dictionary to remember oops that need 12.55 + // to be updated. 12.56 + bool _strongly_reachable; 12.57 + public: 12.58 + oop protection_domain() { return literal(); } 12.59 + 12.60 + void init() { 12.61 + _strongly_reachable = false; 12.62 + } 12.63 + 12.64 + ProtectionDomainCacheEntry* next() { 12.65 + return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next(); 12.66 + } 12.67 + 12.68 + ProtectionDomainCacheEntry** next_addr() { 12.69 + return (ProtectionDomainCacheEntry**)HashtableEntry<oop, mtClass>::next_addr(); 12.70 + } 12.71 + 12.72 + void oops_do(OopClosure* f) { 12.73 + f->do_oop(literal_addr()); 12.74 + } 12.75 + 12.76 + void set_strongly_reachable() { _strongly_reachable = true; } 12.77 + bool is_strongly_reachable() { return _strongly_reachable; } 12.78 + void reset_strongly_reachable() { _strongly_reachable = false; } 12.79 + 12.80 + void print() PRODUCT_RETURN; 12.81 + void verify(); 12.82 +}; 12.83 + 12.84 +// The ProtectionDomainCacheTable contains all protection domain oops. The system 12.85 +// dictionary entries reference its entries instead of having references to oops 12.86 +// directly. 12.87 +// This is used to speed up system dictionary iteration: the oops in the 12.88 +// protection domain are the only ones referring the Java heap. So when there is 12.89 +// need to update these, instead of going over every entry of the system dictionary, 12.90 +// we only need to iterate over this set. 12.91 +// The amount of different protection domains used is typically magnitudes smaller 12.92 +// than the number of system dictionary entries (loaded classes). 12.93 +class ProtectionDomainCacheTable : public Hashtable<oop, mtClass> { 12.94 + friend class VMStructs; 12.95 +private: 12.96 + ProtectionDomainCacheEntry* bucket(int i) { 12.97 + return (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::bucket(i); 12.98 + } 12.99 + 12.100 + // The following method is not MT-safe and must be done under lock. 12.101 + ProtectionDomainCacheEntry** bucket_addr(int i) { 12.102 + return (ProtectionDomainCacheEntry**) Hashtable<oop, mtClass>::bucket_addr(i); 12.103 + } 12.104 + 12.105 + ProtectionDomainCacheEntry* new_entry(unsigned int hash, oop protection_domain) { 12.106 + ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::new_entry(hash, protection_domain); 12.107 + entry->init(); 12.108 + return entry; 12.109 + } 12.110 + 12.111 + static unsigned int compute_hash(oop protection_domain) { 12.112 + return (unsigned int)(protection_domain->identity_hash()); 12.113 + } 12.114 + 12.115 + int index_for(oop protection_domain) { 12.116 + return hash_to_index(compute_hash(protection_domain)); 12.117 + } 12.118 + 12.119 + ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, oop protection_domain); 12.120 + ProtectionDomainCacheEntry* find_entry(int index, oop protection_domain); 12.121 + 12.122 +public: 12.123 + 12.124 + ProtectionDomainCacheTable(int table_size); 12.125 + 12.126 + ProtectionDomainCacheEntry* get(oop protection_domain); 12.127 + void free(ProtectionDomainCacheEntry* entry); 12.128 + 12.129 + void unlink(BoolObjectClosure* cl); 12.130 + 12.131 + // GC support 12.132 + void oops_do(OopClosure* f); 12.133 + void always_strong_oops_do(OopClosure* f); 12.134 + 12.135 + static uint bucket_size(); 12.136 + 12.137 + void print() PRODUCT_RETURN; 12.138 + void verify(); 12.139 +}; 12.140 + 12.141 12.142 class ProtectionDomainEntry :public CHeapObj<mtClass> { 12.143 friend class VMStructs; 12.144 public: 12.145 ProtectionDomainEntry* _next; 12.146 - oop _protection_domain; 12.147 + ProtectionDomainCacheEntry* _pd_cache; 12.148 12.149 - ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) { 12.150 - _protection_domain = protection_domain; 12.151 - _next = next; 12.152 + ProtectionDomainEntry(ProtectionDomainCacheEntry* pd_cache, ProtectionDomainEntry* next) { 12.153 + _pd_cache = pd_cache; 12.154 + _next = next; 12.155 } 12.156 12.157 ProtectionDomainEntry* next() { return _next; } 12.158 - oop protection_domain() { return _protection_domain; } 12.159 + oop protection_domain() { return _pd_cache->protection_domain(); } 12.160 }; 12.161 12.162 // An entry in the system dictionary, this describes a class as 12.163 @@ -151,6 +249,24 @@ 12.164 private: 12.165 // Contains the set of approved protection domains that can access 12.166 // this system dictionary entry. 12.167 + // 12.168 + // This protection domain set is a set of tuples: 12.169 + // 12.170 + // (InstanceKlass C, initiating class loader ICL, Protection Domain PD) 12.171 + // 12.172 + // [Note that C.protection_domain(), which is stored in the java.lang.Class 12.173 + // mirror of C, is NOT the same as PD] 12.174 + // 12.175 + // If such an entry (C, ICL, PD) exists in the table, it means that 12.176 + // it is okay for a class Foo to reference C, where 12.177 + // 12.178 + // Foo.protection_domain() == PD, and 12.179 + // Foo's defining class loader == ICL 12.180 + // 12.181 + // The usage of the PD set can be seen in SystemDictionary::validate_protection_domain() 12.182 + // It is essentially a cache to avoid repeated Java up-calls to 12.183 + // ClassLoader.checkPackageAccess(). 12.184 + // 12.185 ProtectionDomainEntry* _pd_set; 12.186 ClassLoaderData* _loader_data; 12.187 12.188 @@ -158,7 +274,7 @@ 12.189 // Tells whether a protection is in the approved set. 12.190 bool contains_protection_domain(oop protection_domain) const; 12.191 // Adds a protection domain to the approved set. 12.192 - void add_protection_domain(oop protection_domain); 12.193 + void add_protection_domain(Dictionary* dict, oop protection_domain); 12.194 12.195 Klass* klass() const { return (Klass*)literal(); } 12.196 Klass** klass_addr() { return (Klass**)literal_addr(); } 12.197 @@ -189,12 +305,11 @@ 12.198 : contains_protection_domain(protection_domain()); 12.199 } 12.200 12.201 - 12.202 - void protection_domain_set_oops_do(OopClosure* f) { 12.203 + void set_strongly_reachable() { 12.204 for (ProtectionDomainEntry* current = _pd_set; 12.205 current != NULL; 12.206 current = current->_next) { 12.207 - f->do_oop(&(current->_protection_domain)); 12.208 + current->_pd_cache->set_strongly_reachable(); 12.209 } 12.210 } 12.211 12.212 @@ -202,7 +317,7 @@ 12.213 for (ProtectionDomainEntry* current = _pd_set; 12.214 current != NULL; 12.215 current = current->_next) { 12.216 - current->_protection_domain->verify(); 12.217 + current->_pd_cache->protection_domain()->verify(); 12.218 } 12.219 } 12.220
13.1 --- a/src/share/vm/classfile/systemDictionary.cpp Fri Oct 11 17:08:22 2013 -0400 13.2 +++ b/src/share/vm/classfile/systemDictionary.cpp Fri Oct 11 22:22:19 2013 -0400 13.3 @@ -1697,6 +1697,24 @@ 13.4 return newsize; 13.5 } 13.6 13.7 +#ifdef ASSERT 13.8 +class VerifySDReachableAndLiveClosure : public OopClosure { 13.9 +private: 13.10 + BoolObjectClosure* _is_alive; 13.11 + 13.12 + template <class T> void do_oop_work(T* p) { 13.13 + oop obj = oopDesc::load_decode_heap_oop(p); 13.14 + guarantee(_is_alive->do_object_b(obj), "Oop in system dictionary must be live"); 13.15 + } 13.16 + 13.17 +public: 13.18 + VerifySDReachableAndLiveClosure(BoolObjectClosure* is_alive) : OopClosure(), _is_alive(is_alive) { } 13.19 + 13.20 + virtual void do_oop(oop* p) { do_oop_work(p); } 13.21 + virtual void do_oop(narrowOop* p) { do_oop_work(p); } 13.22 +}; 13.23 +#endif 13.24 + 13.25 // Assumes classes in the SystemDictionary are only unloaded at a safepoint 13.26 // Note: anonymous classes are not in the SD. 13.27 bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) { 13.28 @@ -1707,7 +1725,15 @@ 13.29 unloading_occurred = dictionary()->do_unloading(); 13.30 constraints()->purge_loader_constraints(); 13.31 resolution_errors()->purge_resolution_errors(); 13.32 -} 13.33 + } 13.34 + // Oops referenced by the system dictionary may get unreachable independently 13.35 + // of the class loader (eg. cached protection domain oops). So we need to 13.36 + // explicitly unlink them here instead of in Dictionary::do_unloading. 13.37 + dictionary()->unlink(is_alive); 13.38 +#ifdef ASSERT 13.39 + VerifySDReachableAndLiveClosure cl(is_alive); 13.40 + dictionary()->oops_do(&cl); 13.41 +#endif 13.42 return unloading_occurred; 13.43 } 13.44
14.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Oct 11 17:08:22 2013 -0400 14.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Oct 11 22:22:19 2013 -0400 14.3 @@ -6035,7 +6035,11 @@ 14.4 // is dirty. 14.5 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); 14.6 MemRegion mr(hr->bottom(), hr->pre_dummy_top()); 14.7 - ct_bs->verify_dirty_region(mr); 14.8 + if (hr->is_young()) { 14.9 + ct_bs->verify_g1_young_region(mr); 14.10 + } else { 14.11 + ct_bs->verify_dirty_region(mr); 14.12 + } 14.13 } 14.14 14.15 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
15.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Oct 11 17:08:22 2013 -0400 15.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Oct 11 22:22:19 2013 -0400 15.3 @@ -29,6 +29,7 @@ 15.4 #include "gc_implementation/g1/g1CollectedHeap.hpp" 15.5 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" 15.6 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 15.7 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 15.8 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 15.9 #include "utilities/taskqueue.hpp" 15.10 15.11 @@ -134,7 +135,7 @@ 15.12 assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); 15.13 15.14 MemRegion mr(start, end); 15.15 - g1_barrier_set()->dirty(mr); 15.16 + g1_barrier_set()->g1_mark_as_young(mr); 15.17 } 15.18 15.19 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
16.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Oct 11 17:08:22 2013 -0400 16.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Oct 11 22:22:19 2013 -0400 16.3 @@ -319,10 +319,10 @@ 16.4 } 16.5 16.6 void G1CollectorPolicy::initialize_flags() { 16.7 - set_min_alignment(HeapRegion::GrainBytes); 16.8 + _min_alignment = HeapRegion::GrainBytes; 16.9 size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); 16.10 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 16.11 - set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size)); 16.12 + _max_alignment = MAX3(card_table_alignment, _min_alignment, page_size); 16.13 if (SurvivorRatio < 1) { 16.14 vm_exit_during_initialization("Invalid survivor ratio specified"); 16.15 }
17.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Fri Oct 11 17:08:22 2013 -0400 17.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Fri Oct 11 22:22:19 2013 -0400 17.3 @@ -70,6 +70,12 @@ 17.4 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { 17.5 return false; 17.6 } 17.7 + 17.8 + if (val == g1_young_gen) { 17.9 + // the card is for a young gen region. We don't need to keep track of all pointers into young 17.10 + return false; 17.11 + } 17.12 + 17.13 // Cached bit can be installed either on a clean card or on a claimed card. 17.14 jbyte new_val = val; 17.15 if (val == clean_card_val()) { 17.16 @@ -85,6 +91,19 @@ 17.17 return true; 17.18 } 17.19 17.20 +void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) { 17.21 + jbyte *const first = byte_for(mr.start()); 17.22 + jbyte *const last = byte_after(mr.last()); 17.23 + 17.24 + memset(first, g1_young_gen, last - first); 17.25 +} 17.26 + 17.27 +#ifndef PRODUCT 17.28 +void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) { 17.29 + verify_region(mr, g1_young_gen, true); 17.30 +} 17.31 +#endif 17.32 + 17.33 G1SATBCardTableLoggingModRefBS:: 17.34 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, 17.35 int max_covered_regions) : 17.36 @@ -97,7 +116,11 @@ 17.37 void 17.38 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field, 17.39 oop new_val) { 17.40 - jbyte* byte = byte_for(field); 17.41 + volatile jbyte* byte = byte_for(field); 17.42 + if (*byte == g1_young_gen) { 17.43 + return; 17.44 + } 17.45 + OrderAccess::storeload(); 17.46 if (*byte != dirty_card) { 17.47 *byte = dirty_card; 17.48 Thread* thr = Thread::current(); 17.49 @@ -129,7 +152,7 @@ 17.50 17.51 void 17.52 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) { 17.53 - jbyte* byte = byte_for(mr.start()); 17.54 + volatile jbyte* byte = byte_for(mr.start()); 17.55 jbyte* last_byte = byte_for(mr.last()); 17.56 Thread* thr = Thread::current(); 17.57 if (whole_heap) { 17.58 @@ -138,25 +161,35 @@ 17.59 byte++; 17.60 } 17.61 } else { 17.62 - // Enqueue if necessary. 17.63 - if (thr->is_Java_thread()) { 17.64 - JavaThread* jt = (JavaThread*)thr; 17.65 - while (byte <= last_byte) { 17.66 - if (*byte != dirty_card) { 17.67 - *byte = dirty_card; 17.68 - jt->dirty_card_queue().enqueue(byte); 17.69 + // skip all consecutive young cards 17.70 + for (; byte <= last_byte && *byte == g1_young_gen; byte++); 17.71 + 17.72 + if (byte <= last_byte) { 17.73 + OrderAccess::storeload(); 17.74 + // Enqueue if necessary. 17.75 + if (thr->is_Java_thread()) { 17.76 + JavaThread* jt = (JavaThread*)thr; 17.77 + for (; byte <= last_byte; byte++) { 17.78 + if (*byte == g1_young_gen) { 17.79 + continue; 17.80 + } 17.81 + if (*byte != dirty_card) { 17.82 + *byte = dirty_card; 17.83 + jt->dirty_card_queue().enqueue(byte); 17.84 + } 17.85 } 17.86 - byte++; 17.87 - } 17.88 - } else { 17.89 - MutexLockerEx x(Shared_DirtyCardQ_lock, 17.90 - Mutex::_no_safepoint_check_flag); 17.91 - while (byte <= last_byte) { 17.92 - if (*byte != dirty_card) { 17.93 - *byte = dirty_card; 17.94 - _dcqs.shared_dirty_card_queue()->enqueue(byte); 17.95 + } else { 17.96 + MutexLockerEx x(Shared_DirtyCardQ_lock, 17.97 + Mutex::_no_safepoint_check_flag); 17.98 + for (; byte <= last_byte; byte++) { 17.99 + if (*byte == g1_young_gen) { 17.100 + continue; 17.101 + } 17.102 + if (*byte != dirty_card) { 17.103 + *byte = dirty_card; 17.104 + _dcqs.shared_dirty_card_queue()->enqueue(byte); 17.105 + } 17.106 } 17.107 - byte++; 17.108 } 17.109 } 17.110 }
18.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Fri Oct 11 17:08:22 2013 -0400 18.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Fri Oct 11 22:22:19 2013 -0400 18.3 @@ -38,7 +38,14 @@ 18.4 // snapshot-at-the-beginning marking. 18.5 18.6 class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS { 18.7 +protected: 18.8 + enum G1CardValues { 18.9 + g1_young_gen = CT_MR_BS_last_reserved << 1 18.10 + }; 18.11 + 18.12 public: 18.13 + static int g1_young_card_val() { return g1_young_gen; } 18.14 + 18.15 // Add "pre_val" to a set of objects that may have been disconnected from the 18.16 // pre-marking object graph. 18.17 static void enqueue(oop pre_val); 18.18 @@ -118,6 +125,9 @@ 18.19 _byte_map[card_index] = val; 18.20 } 18.21 18.22 + void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN; 18.23 + void g1_mark_as_young(const MemRegion& mr); 18.24 + 18.25 bool mark_card_deferred(size_t card_index); 18.26 18.27 bool is_card_deferred(size_t card_index) {
19.1 --- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp Fri Oct 11 17:08:22 2013 -0400 19.2 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp Fri Oct 11 22:22:19 2013 -0400 19.3 @@ -80,6 +80,10 @@ 19.4 19.5 void reset() { if (_buf != NULL) _index = _sz; } 19.6 19.7 + void enqueue(volatile void* ptr) { 19.8 + enqueue((void*)(ptr)); 19.9 + } 19.10 + 19.11 // Enqueues the given "obj". 19.12 void enqueue(void* ptr) { 19.13 if (!_active) return;
20.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Fri Oct 11 17:08:22 2013 -0400 20.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Fri Oct 11 22:22:19 2013 -0400 20.3 @@ -214,9 +214,6 @@ 20.4 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), 20.5 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { 20.6 } 20.7 - ~VM_CollectForMetadataAllocation() { 20.8 - MetaspaceGC::set_expand_after_GC(false); 20.9 - } 20.10 virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; } 20.11 virtual void doit(); 20.12 MetaWord* result() const { return _result; }
21.1 --- a/src/share/vm/gc_interface/collectedHeap.cpp Fri Oct 11 17:08:22 2013 -0400 21.2 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Fri Oct 11 22:22:19 2013 -0400 21.3 @@ -202,12 +202,6 @@ 21.4 ShouldNotReachHere(); // Unexpected use of this function 21.5 } 21.6 } 21.7 -MetaWord* CollectedHeap::satisfy_failed_metadata_allocation( 21.8 - ClassLoaderData* loader_data, 21.9 - size_t size, Metaspace::MetadataType mdtype) { 21.10 - return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype); 21.11 -} 21.12 - 21.13 21.14 void CollectedHeap::pre_initialize() { 21.15 // Used for ReduceInitialCardMarks (when COMPILER2 is used);
22.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp Fri Oct 11 17:08:22 2013 -0400 22.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Fri Oct 11 22:22:19 2013 -0400 22.3 @@ -475,11 +475,6 @@ 22.4 // the context of the vm thread. 22.5 virtual void collect_as_vm_thread(GCCause::Cause cause); 22.6 22.7 - // Callback from VM_CollectForMetadataAllocation operation. 22.8 - MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 22.9 - size_t size, 22.10 - Metaspace::MetadataType mdtype); 22.11 - 22.12 // Returns the barrier set for this heap 22.13 BarrierSet* barrier_set() { return _barrier_set; } 22.14
23.1 --- a/src/share/vm/memory/collectorPolicy.cpp Fri Oct 11 17:08:22 2013 -0400 23.2 +++ b/src/share/vm/memory/collectorPolicy.cpp Fri Oct 11 22:22:19 2013 -0400 23.3 @@ -47,85 +47,53 @@ 23.4 23.5 // CollectorPolicy methods. 23.6 23.7 -// Align down. If the aligning result in 0, return 'alignment'. 23.8 -static size_t restricted_align_down(size_t size, size_t alignment) { 23.9 - return MAX2(alignment, align_size_down_(size, alignment)); 23.10 -} 23.11 - 23.12 void CollectorPolicy::initialize_flags() { 23.13 - assert(max_alignment() >= min_alignment(), 23.14 - err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, 23.15 - max_alignment(), min_alignment())); 23.16 - assert(max_alignment() % min_alignment() == 0, 23.17 - err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, 23.18 - max_alignment(), min_alignment())); 23.19 + assert(_max_alignment >= _min_alignment, 23.20 + err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, 23.21 + _max_alignment, _min_alignment)); 23.22 + assert(_max_alignment % _min_alignment == 0, 23.23 + err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, 23.24 + _max_alignment, _min_alignment)); 23.25 23.26 if (MaxHeapSize < InitialHeapSize) { 23.27 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 23.28 } 23.29 23.30 - // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 23.31 - // override if MaxMetaspaceSize was set on the command line or not. 23.32 - // This information is needed later to conform to the specification of the 23.33 - // java.lang.management.MemoryUsage API. 23.34 - // 23.35 - // Ideally, we would be able to set the default value of MaxMetaspaceSize in 23.36 - // globals.hpp to the aligned value, but this is not possible, since the 23.37 - // alignment depends on other flags being parsed. 23.38 - MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment()); 23.39 - 23.40 - if (MetaspaceSize > MaxMetaspaceSize) { 23.41 - MetaspaceSize = MaxMetaspaceSize; 23.42 - } 23.43 - 23.44 - MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment()); 23.45 - 23.46 - assert(MetaspaceSize <= MaxMetaspaceSize, "Must be"); 23.47 - 23.48 - MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment()); 23.49 - MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment()); 23.50 - 23.51 - MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); 23.52 - 23.53 - assert(MetaspaceSize % min_alignment() == 0, "metapace alignment"); 23.54 - assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment"); 23.55 - if (MetaspaceSize < 256*K) { 23.56 - vm_exit_during_initialization("Too small initial Metaspace size"); 23.57 - } 23.58 + MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment); 23.59 } 23.60 23.61 void CollectorPolicy::initialize_size_info() { 23.62 // User inputs from -mx and ms must be aligned 23.63 - set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment())); 23.64 - set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment())); 23.65 - set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); 23.66 + _min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment); 23.67 + _initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment); 23.68 + _max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment); 23.69 23.70 // Check heap parameter properties 23.71 - if (initial_heap_byte_size() < M) { 23.72 + if (_initial_heap_byte_size < M) { 23.73 vm_exit_during_initialization("Too small initial heap"); 23.74 } 23.75 // Check heap parameter properties 23.76 - if (min_heap_byte_size() < M) { 23.77 + if (_min_heap_byte_size < M) { 23.78 vm_exit_during_initialization("Too small minimum heap"); 23.79 } 23.80 - if (initial_heap_byte_size() <= NewSize) { 23.81 + if (_initial_heap_byte_size <= NewSize) { 23.82 // make sure there is at least some room in old space 23.83 vm_exit_during_initialization("Too small initial heap for new size specified"); 23.84 } 23.85 - if (max_heap_byte_size() < min_heap_byte_size()) { 23.86 + if (_max_heap_byte_size < _min_heap_byte_size) { 23.87 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); 23.88 } 23.89 - if (initial_heap_byte_size() < min_heap_byte_size()) { 23.90 + if (_initial_heap_byte_size < _min_heap_byte_size) { 23.91 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); 23.92 } 23.93 - if (max_heap_byte_size() < initial_heap_byte_size()) { 23.94 + if (_max_heap_byte_size < _initial_heap_byte_size) { 23.95 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 23.96 } 23.97 23.98 if (PrintGCDetails && Verbose) { 23.99 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " 23.100 SIZE_FORMAT " Maximum heap " SIZE_FORMAT, 23.101 - min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size()); 23.102 + _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size); 23.103 } 23.104 } 23.105 23.106 @@ -180,15 +148,15 @@ 23.107 23.108 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { 23.109 size_t x = base_size / (NewRatio+1); 23.110 - size_t new_gen_size = x > min_alignment() ? 23.111 - align_size_down(x, min_alignment()) : 23.112 - min_alignment(); 23.113 + size_t new_gen_size = x > _min_alignment ? 23.114 + align_size_down(x, _min_alignment) : 23.115 + _min_alignment; 23.116 return new_gen_size; 23.117 } 23.118 23.119 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, 23.120 size_t maximum_size) { 23.121 - size_t alignment = min_alignment(); 23.122 + size_t alignment = _min_alignment; 23.123 size_t max_minus = maximum_size - alignment; 23.124 return desired_size < max_minus ? desired_size : max_minus; 23.125 } 23.126 @@ -207,8 +175,8 @@ 23.127 23.128 void GenCollectorPolicy::initialize_flags() { 23.129 // All sizes must be multiples of the generation granularity. 23.130 - set_min_alignment((uintx) Generation::GenGrain); 23.131 - set_max_alignment(compute_max_alignment()); 23.132 + _min_alignment = (uintx) Generation::GenGrain; 23.133 + _max_alignment = compute_max_alignment(); 23.134 23.135 CollectorPolicy::initialize_flags(); 23.136 23.137 @@ -218,26 +186,26 @@ 23.138 if (NewSize > MaxNewSize) { 23.139 MaxNewSize = NewSize; 23.140 } 23.141 - NewSize = align_size_down(NewSize, min_alignment()); 23.142 - MaxNewSize = align_size_down(MaxNewSize, min_alignment()); 23.143 + NewSize = align_size_down(NewSize, _min_alignment); 23.144 + MaxNewSize = align_size_down(MaxNewSize, _min_alignment); 23.145 23.146 // Check validity of heap flags 23.147 - assert(NewSize % min_alignment() == 0, "eden space alignment"); 23.148 - assert(MaxNewSize % min_alignment() == 0, "survivor space alignment"); 23.149 + assert(NewSize % _min_alignment == 0, "eden space alignment"); 23.150 + assert(MaxNewSize % _min_alignment == 0, "survivor space alignment"); 23.151 23.152 - if (NewSize < 3*min_alignment()) { 23.153 + if (NewSize < 3 * _min_alignment) { 23.154 // make sure there room for eden and two survivor spaces 23.155 vm_exit_during_initialization("Too small new size specified"); 23.156 } 23.157 if (SurvivorRatio < 1 || NewRatio < 1) { 23.158 - vm_exit_during_initialization("Invalid heap ratio specified"); 23.159 + vm_exit_during_initialization("Invalid young gen ratio specified"); 23.160 } 23.161 } 23.162 23.163 void TwoGenerationCollectorPolicy::initialize_flags() { 23.164 GenCollectorPolicy::initialize_flags(); 23.165 23.166 - OldSize = align_size_down(OldSize, min_alignment()); 23.167 + OldSize = align_size_down(OldSize, _min_alignment); 23.168 23.169 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { 23.170 // NewRatio will be used later to set the young generation size so we use 23.171 @@ -246,11 +214,11 @@ 23.172 assert(NewRatio > 0, "NewRatio should have been set up earlier"); 23.173 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); 23.174 23.175 - calculated_heapsize = align_size_up(calculated_heapsize, max_alignment()); 23.176 + calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment); 23.177 MaxHeapSize = calculated_heapsize; 23.178 InitialHeapSize = calculated_heapsize; 23.179 } 23.180 - MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 23.181 + MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); 23.182 23.183 // adjust max heap size if necessary 23.184 if (NewSize + OldSize > MaxHeapSize) { 23.185 @@ -260,18 +228,18 @@ 23.186 uintx calculated_size = NewSize + OldSize; 23.187 double shrink_factor = (double) MaxHeapSize / calculated_size; 23.188 // align 23.189 - NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); 23.190 + NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment); 23.191 // OldSize is already aligned because above we aligned MaxHeapSize to 23.192 - // max_alignment(), and we just made sure that NewSize is aligned to 23.193 - // min_alignment(). In initialize_flags() we verified that max_alignment() 23.194 - // is a multiple of min_alignment(). 23.195 + // _max_alignment, and we just made sure that NewSize is aligned to 23.196 + // _min_alignment. In initialize_flags() we verified that _max_alignment 23.197 + // is a multiple of _min_alignment. 23.198 OldSize = MaxHeapSize - NewSize; 23.199 } else { 23.200 MaxHeapSize = NewSize + OldSize; 23.201 } 23.202 } 23.203 // need to do this again 23.204 - MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 23.205 + MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); 23.206 23.207 // adjust max heap size if necessary 23.208 if (NewSize + OldSize > MaxHeapSize) { 23.209 @@ -281,24 +249,24 @@ 23.210 uintx calculated_size = NewSize + OldSize; 23.211 double shrink_factor = (double) MaxHeapSize / calculated_size; 23.212 // align 23.213 - NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); 23.214 + NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment); 23.215 // OldSize is already aligned because above we aligned MaxHeapSize to 23.216 - // max_alignment(), and we just made sure that NewSize is aligned to 23.217 - // min_alignment(). In initialize_flags() we verified that max_alignment() 23.218 - // is a multiple of min_alignment(). 23.219 + // _max_alignment, and we just made sure that NewSize is aligned to 23.220 + // _min_alignment. In initialize_flags() we verified that _max_alignment 23.221 + // is a multiple of _min_alignment. 23.222 OldSize = MaxHeapSize - NewSize; 23.223 } else { 23.224 MaxHeapSize = NewSize + OldSize; 23.225 } 23.226 } 23.227 // need to do this again 23.228 - MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 23.229 + MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); 23.230 23.231 always_do_update_barrier = UseConcMarkSweepGC; 23.232 23.233 // Check validity of heap flags 23.234 - assert(OldSize % min_alignment() == 0, "old space alignment"); 23.235 - assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); 23.236 + assert(OldSize % _min_alignment == 0, "old space alignment"); 23.237 + assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment"); 23.238 } 23.239 23.240 // Values set on the command line win over any ergonomically 23.241 @@ -313,7 +281,7 @@ 23.242 void GenCollectorPolicy::initialize_size_info() { 23.243 CollectorPolicy::initialize_size_info(); 23.244 23.245 - // min_alignment() is used for alignment within a generation. 23.246 + // _min_alignment is used for alignment within a generation. 23.247 // There is additional alignment done down stream for some 23.248 // collectors that sometimes causes unwanted rounding up of 23.249 // generations sizes. 23.250 @@ -322,18 +290,18 @@ 23.251 23.252 size_t max_new_size = 0; 23.253 if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { 23.254 - if (MaxNewSize < min_alignment()) { 23.255 - max_new_size = min_alignment(); 23.256 + if (MaxNewSize < _min_alignment) { 23.257 + max_new_size = _min_alignment; 23.258 } 23.259 - if (MaxNewSize >= max_heap_byte_size()) { 23.260 - max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), 23.261 - min_alignment()); 23.262 + if (MaxNewSize >= _max_heap_byte_size) { 23.263 + max_new_size = align_size_down(_max_heap_byte_size - _min_alignment, 23.264 + _min_alignment); 23.265 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " 23.266 "greater than the entire heap (" SIZE_FORMAT "k). A " 23.267 "new generation size of " SIZE_FORMAT "k will be used.", 23.268 - MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K); 23.269 + MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K); 23.270 } else { 23.271 - max_new_size = align_size_down(MaxNewSize, min_alignment()); 23.272 + max_new_size = align_size_down(MaxNewSize, _min_alignment); 23.273 } 23.274 23.275 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated 23.276 @@ -351,7 +319,7 @@ 23.277 // just accept those choices. The choices currently made are 23.278 // not always "wise". 23.279 } else { 23.280 - max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size()); 23.281 + max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size); 23.282 // Bound the maximum size by NewSize below (since it historically 23.283 // would have been NewSize and because the NewRatio calculation could 23.284 // yield a size that is too small) and bound it by MaxNewSize above. 23.285 @@ -364,13 +332,13 @@ 23.286 // Given the maximum gen0 size, determine the initial and 23.287 // minimum gen0 sizes. 23.288 23.289 - if (max_heap_byte_size() == min_heap_byte_size()) { 23.290 + if (_max_heap_byte_size == _min_heap_byte_size) { 23.291 // The maximum and minimum heap sizes are the same so 23.292 // the generations minimum and initial must be the 23.293 // same as its maximum. 23.294 - set_min_gen0_size(max_new_size); 23.295 - set_initial_gen0_size(max_new_size); 23.296 - set_max_gen0_size(max_new_size); 23.297 + _min_gen0_size = max_new_size; 23.298 + _initial_gen0_size = max_new_size; 23.299 + _max_gen0_size = max_new_size; 23.300 } else { 23.301 size_t desired_new_size = 0; 23.302 if (!FLAG_IS_DEFAULT(NewSize)) { 23.303 @@ -391,43 +359,37 @@ 23.304 // Use the default NewSize as the floor for these values. If 23.305 // NewRatio is overly large, the resulting sizes can be too 23.306 // small. 23.307 - _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()), 23.308 - NewSize); 23.309 + _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize); 23.310 desired_new_size = 23.311 - MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()), 23.312 - NewSize); 23.313 + MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize); 23.314 } 23.315 23.316 assert(_min_gen0_size > 0, "Sanity check"); 23.317 - set_initial_gen0_size(desired_new_size); 23.318 - set_max_gen0_size(max_new_size); 23.319 + _initial_gen0_size = desired_new_size; 23.320 + _max_gen0_size = max_new_size; 23.321 23.322 // At this point the desirable initial and minimum sizes have been 23.323 // determined without regard to the maximum sizes. 23.324 23.325 // Bound the sizes by the corresponding overall heap sizes. 23.326 - set_min_gen0_size( 23.327 - bound_minus_alignment(_min_gen0_size, min_heap_byte_size())); 23.328 - set_initial_gen0_size( 23.329 - bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size())); 23.330 - set_max_gen0_size( 23.331 - bound_minus_alignment(_max_gen0_size, max_heap_byte_size())); 23.332 + _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size); 23.333 + _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size); 23.334 + _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size); 23.335 23.336 // At this point all three sizes have been checked against the 23.337 // maximum sizes but have not been checked for consistency 23.338 // among the three. 23.339 23.340 // Final check min <= initial <= max 23.341 - set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size)); 23.342 - set_initial_gen0_size( 23.343 - MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size)); 23.344 - set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size)); 23.345 + _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size); 23.346 + _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size); 23.347 + _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size); 23.348 } 23.349 23.350 if (PrintGCDetails && Verbose) { 23.351 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 23.352 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 23.353 - min_gen0_size(), initial_gen0_size(), max_gen0_size()); 23.354 + _min_gen0_size, _initial_gen0_size, _max_gen0_size); 23.355 } 23.356 } 23.357 23.358 @@ -447,19 +409,17 @@ 23.359 23.360 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { 23.361 if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && 23.362 - (heap_size >= min_gen1_size + min_alignment())) { 23.363 + (heap_size >= min_gen1_size + _min_alignment)) { 23.364 // Adjust gen0 down to accommodate min_gen1_size 23.365 *gen0_size_ptr = heap_size - min_gen1_size; 23.366 *gen0_size_ptr = 23.367 - MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()), 23.368 - min_alignment()); 23.369 + MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment); 23.370 assert(*gen0_size_ptr > 0, "Min gen0 is too large"); 23.371 result = true; 23.372 } else { 23.373 *gen1_size_ptr = heap_size - *gen0_size_ptr; 23.374 *gen1_size_ptr = 23.375 - MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()), 23.376 - min_alignment()); 23.377 + MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment); 23.378 } 23.379 } 23.380 return result; 23.381 @@ -480,10 +440,9 @@ 23.382 // The maximum gen1 size can be determined from the maximum gen0 23.383 // and maximum heap size since no explicit flags exits 23.384 // for setting the gen1 maximum. 23.385 - _max_gen1_size = max_heap_byte_size() - _max_gen0_size; 23.386 + _max_gen1_size = _max_heap_byte_size - _max_gen0_size; 23.387 _max_gen1_size = 23.388 - MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()), 23.389 - min_alignment()); 23.390 + MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment); 23.391 // If no explicit command line flag has been set for the 23.392 // gen1 size, use what is left for gen1. 23.393 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { 23.394 @@ -492,70 +451,66 @@ 23.395 // with the overall heap size). In either case make 23.396 // the minimum, maximum and initial sizes consistent 23.397 // with the gen0 sizes and the overall heap sizes. 23.398 - assert(min_heap_byte_size() > _min_gen0_size, 23.399 + assert(_min_heap_byte_size > _min_gen0_size, 23.400 "gen0 has an unexpected minimum size"); 23.401 - set_min_gen1_size(min_heap_byte_size() - min_gen0_size()); 23.402 - set_min_gen1_size( 23.403 - MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()), 23.404 - min_alignment())); 23.405 - set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size()); 23.406 - set_initial_gen1_size( 23.407 - MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()), 23.408 - min_alignment())); 23.409 - 23.410 + _min_gen1_size = _min_heap_byte_size - _min_gen0_size; 23.411 + _min_gen1_size = 23.412 + MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment); 23.413 + _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size; 23.414 + _initial_gen1_size = 23.415 + MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment); 23.416 } else { 23.417 // It's been explicitly set on the command line. Use the 23.418 // OldSize and then determine the consequences. 23.419 - set_min_gen1_size(OldSize); 23.420 - set_initial_gen1_size(OldSize); 23.421 + _min_gen1_size = OldSize; 23.422 + _initial_gen1_size = OldSize; 23.423 23.424 // If the user has explicitly set an OldSize that is inconsistent 23.425 // with other command line flags, issue a warning. 23.426 // The generation minimums and the overall heap mimimum should 23.427 // be within one heap alignment. 23.428 - if ((_min_gen1_size + _min_gen0_size + min_alignment()) < 23.429 - min_heap_byte_size()) { 23.430 + if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) { 23.431 warning("Inconsistency between minimum heap size and minimum " 23.432 - "generation sizes: using minimum heap = " SIZE_FORMAT, 23.433 - min_heap_byte_size()); 23.434 + "generation sizes: using minimum heap = " SIZE_FORMAT, 23.435 + _min_heap_byte_size); 23.436 } 23.437 if ((OldSize > _max_gen1_size)) { 23.438 warning("Inconsistency between maximum heap size and maximum " 23.439 - "generation sizes: using maximum heap = " SIZE_FORMAT 23.440 - " -XX:OldSize flag is being ignored", 23.441 - max_heap_byte_size()); 23.442 + "generation sizes: using maximum heap = " SIZE_FORMAT 23.443 + " -XX:OldSize flag is being ignored", 23.444 + _max_heap_byte_size); 23.445 } 23.446 // If there is an inconsistency between the OldSize and the minimum and/or 23.447 // initial size of gen0, since OldSize was explicitly set, OldSize wins. 23.448 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, 23.449 - min_heap_byte_size(), OldSize)) { 23.450 + _min_heap_byte_size, OldSize)) { 23.451 if (PrintGCDetails && Verbose) { 23.452 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 23.453 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 23.454 - min_gen0_size(), initial_gen0_size(), max_gen0_size()); 23.455 + _min_gen0_size, _initial_gen0_size, _max_gen0_size); 23.456 } 23.457 } 23.458 // Initial size 23.459 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, 23.460 - initial_heap_byte_size(), OldSize)) { 23.461 + _initial_heap_byte_size, OldSize)) { 23.462 if (PrintGCDetails && Verbose) { 23.463 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 23.464 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 23.465 - min_gen0_size(), initial_gen0_size(), max_gen0_size()); 23.466 + _min_gen0_size, _initial_gen0_size, _max_gen0_size); 23.467 } 23.468 } 23.469 } 23.470 // Enforce the maximum gen1 size. 23.471 - set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size)); 23.472 + _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size); 23.473 23.474 // Check that min gen1 <= initial gen1 <= max gen1 23.475 - set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size)); 23.476 - set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size)); 23.477 + _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size); 23.478 + _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size); 23.479 23.480 if (PrintGCDetails && Verbose) { 23.481 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " 23.482 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, 23.483 - min_gen1_size(), initial_gen1_size(), max_gen1_size()); 23.484 + _min_gen1_size, _initial_gen1_size, _max_gen1_size); 23.485 } 23.486 } 23.487
24.1 --- a/src/share/vm/memory/collectorPolicy.hpp Fri Oct 11 17:08:22 2013 -0400 24.2 +++ b/src/share/vm/memory/collectorPolicy.hpp Fri Oct 11 22:22:19 2013 -0400 24.3 @@ -101,17 +101,12 @@ 24.4 // Return maximum heap alignment that may be imposed by the policy 24.5 static size_t compute_max_alignment(); 24.6 24.7 - void set_min_alignment(size_t align) { _min_alignment = align; } 24.8 size_t min_alignment() { return _min_alignment; } 24.9 - void set_max_alignment(size_t align) { _max_alignment = align; } 24.10 size_t max_alignment() { return _max_alignment; } 24.11 24.12 size_t initial_heap_byte_size() { return _initial_heap_byte_size; } 24.13 - void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; } 24.14 size_t max_heap_byte_size() { return _max_heap_byte_size; } 24.15 - void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; } 24.16 size_t min_heap_byte_size() { return _min_heap_byte_size; } 24.17 - void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; } 24.18 24.19 enum Name { 24.20 CollectorPolicyKind, 24.21 @@ -248,12 +243,9 @@ 24.22 24.23 public: 24.24 // Accessors 24.25 - size_t min_gen0_size() { return _min_gen0_size; } 24.26 - void set_min_gen0_size(size_t v) { _min_gen0_size = v; } 24.27 + size_t min_gen0_size() { return _min_gen0_size; } 24.28 size_t initial_gen0_size() { return _initial_gen0_size; } 24.29 - void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; } 24.30 - size_t max_gen0_size() { return _max_gen0_size; } 24.31 - void set_max_gen0_size(size_t v) { _max_gen0_size = v; } 24.32 + size_t max_gen0_size() { return _max_gen0_size; } 24.33 24.34 virtual int number_of_generations() = 0; 24.35 24.36 @@ -302,12 +294,9 @@ 24.37 24.38 public: 24.39 // Accessors 24.40 - size_t min_gen1_size() { return _min_gen1_size; } 24.41 - void set_min_gen1_size(size_t v) { _min_gen1_size = v; } 24.42 + size_t min_gen1_size() { return _min_gen1_size; } 24.43 size_t initial_gen1_size() { return _initial_gen1_size; } 24.44 - void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; } 24.45 - size_t max_gen1_size() { return _max_gen1_size; } 24.46 - void set_max_gen1_size(size_t v) { _max_gen1_size = v; } 24.47 + size_t max_gen1_size() { return _max_gen1_size; } 24.48 24.49 // Inherited methods 24.50 TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
25.1 --- a/src/share/vm/memory/filemap.hpp Fri Oct 11 17:08:22 2013 -0400 25.2 +++ b/src/share/vm/memory/filemap.hpp Fri Oct 11 22:22:19 2013 -0400 25.3 @@ -26,6 +26,7 @@ 25.4 #define SHARE_VM_MEMORY_FILEMAP_HPP 25.5 25.6 #include "memory/metaspaceShared.hpp" 25.7 +#include "memory/metaspace.hpp" 25.8 25.9 // Layout of the file: 25.10 // header: dump of archive instance plus versioning info, datestamp, etc.
26.1 --- a/src/share/vm/memory/metaspace.cpp Fri Oct 11 17:08:22 2013 -0400 26.2 +++ b/src/share/vm/memory/metaspace.cpp Fri Oct 11 22:22:19 2013 -0400 26.3 @@ -29,17 +29,21 @@ 26.4 #include "memory/collectorPolicy.hpp" 26.5 #include "memory/filemap.hpp" 26.6 #include "memory/freeList.hpp" 26.7 +#include "memory/gcLocker.hpp" 26.8 #include "memory/metablock.hpp" 26.9 #include "memory/metachunk.hpp" 26.10 #include "memory/metaspace.hpp" 26.11 #include "memory/metaspaceShared.hpp" 26.12 #include "memory/resourceArea.hpp" 26.13 #include "memory/universe.hpp" 26.14 +#include "runtime/atomic.inline.hpp" 26.15 #include "runtime/globals.hpp" 26.16 +#include "runtime/init.hpp" 26.17 #include "runtime/java.hpp" 26.18 #include "runtime/mutex.hpp" 26.19 #include "runtime/orderAccess.hpp" 26.20 #include "services/memTracker.hpp" 26.21 +#include "services/memoryService.hpp" 26.22 #include "utilities/copy.hpp" 26.23 #include "utilities/debug.hpp" 26.24 26.25 @@ -84,13 +88,7 @@ 26.26 return (ChunkIndex) (i+1); 26.27 } 26.28 26.29 -// Originally _capacity_until_GC was set to MetaspaceSize here but 26.30 -// the default MetaspaceSize before argument processing was being 26.31 -// used which was not the desired value. See the code 26.32 -// in should_expand() to see how the initialization is handled 26.33 -// now. 26.34 -size_t MetaspaceGC::_capacity_until_GC = 0; 26.35 -bool MetaspaceGC::_expand_after_GC = false; 26.36 +volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 26.37 uint MetaspaceGC::_shrink_factor = 0; 26.38 bool MetaspaceGC::_should_concurrent_collect = false; 26.39 26.40 @@ -293,9 +291,10 @@ 26.41 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 26.42 26.43 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 26.44 - size_t expanded_words() const { return _virtual_space.committed_size() / BytesPerWord; } 26.45 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 26.46 26.47 + bool is_pre_committed() const { return _virtual_space.special(); } 26.48 + 26.49 // address of next available space in _virtual_space; 26.50 // Accessors 26.51 VirtualSpaceNode* next() { return _next; } 26.52 @@ -337,7 +336,7 @@ 26.53 26.54 // Expands/shrinks the committed space in a virtual space. Delegates 26.55 // to Virtualspace 26.56 - bool expand_by(size_t words, bool pre_touch = false); 26.57 + bool expand_by(size_t min_words, size_t preferred_words); 26.58 26.59 // In preparation for deleting this node, remove all the chunks 26.60 // in the node from any freelist. 26.61 @@ -351,29 +350,64 @@ 26.62 void print_on(outputStream* st) const; 26.63 }; 26.64 26.65 +#define assert_is_ptr_aligned(ptr, alignment) \ 26.66 + assert(is_ptr_aligned(ptr, alignment), \ 26.67 + err_msg(PTR_FORMAT " is not aligned to " \ 26.68 + SIZE_FORMAT, ptr, alignment)) 26.69 + 26.70 +#define assert_is_size_aligned(size, alignment) \ 26.71 + assert(is_size_aligned(size, alignment), \ 26.72 + err_msg(SIZE_FORMAT " is not aligned to " \ 26.73 + SIZE_FORMAT, size, alignment)) 26.74 + 26.75 + 26.76 +// Decide if large pages should be committed when the memory is reserved. 26.77 +static bool should_commit_large_pages_when_reserving(size_t bytes) { 26.78 + if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 26.79 + size_t words = bytes / BytesPerWord; 26.80 + bool is_class = false; // We never reserve large pages for the class space. 26.81 + if (MetaspaceGC::can_expand(words, is_class) && 26.82 + MetaspaceGC::allowed_expansion() >= words) { 26.83 + return true; 26.84 + } 26.85 + } 26.86 + 26.87 + return false; 26.88 +} 26.89 + 26.90 // byte_size is the size of the associated virtualspace. 26.91 -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 26.92 - // align up to vm allocation granularity 26.93 - byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); 26.94 +VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 26.95 + assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 26.96 26.97 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 26.98 // configurable address, generally at the top of the Java heap so other 26.99 // memory addresses don't conflict. 26.100 if (DumpSharedSpaces) { 26.101 - char* shared_base = (char*)SharedBaseAddress; 26.102 - _rs = ReservedSpace(byte_size, 0, false, shared_base, 0); 26.103 + bool large_pages = false; // No large pages when dumping the CDS archive. 26.104 + char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 26.105 + 26.106 + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0); 26.107 if (_rs.is_reserved()) { 26.108 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 26.109 } else { 26.110 // Get a mmap region anywhere if the SharedBaseAddress fails. 26.111 - _rs = ReservedSpace(byte_size); 26.112 + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 26.113 } 26.114 MetaspaceShared::set_shared_rs(&_rs); 26.115 } else { 26.116 - _rs = ReservedSpace(byte_size); 26.117 + bool large_pages = should_commit_large_pages_when_reserving(bytes); 26.118 + 26.119 + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 26.120 } 26.121 26.122 - MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 26.123 + if (_rs.is_reserved()) { 26.124 + assert(_rs.base() != NULL, "Catch if we get a NULL address"); 26.125 + assert(_rs.size() != 0, "Catch if we get a 0 size"); 26.126 + assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 26.127 + assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 26.128 + 26.129 + MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 26.130 + } 26.131 } 26.132 26.133 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 26.134 @@ -410,8 +444,6 @@ 26.135 #endif 26.136 26.137 // List of VirtualSpaces for metadata allocation. 26.138 -// It has a _next link for singly linked list and a MemRegion 26.139 -// for total space in the VirtualSpace. 26.140 class VirtualSpaceList : public CHeapObj<mtClass> { 26.141 friend class VirtualSpaceNode; 26.142 26.143 @@ -419,16 +451,13 @@ 26.144 VirtualSpaceSize = 256 * K 26.145 }; 26.146 26.147 - // Global list of virtual spaces 26.148 // Head of the list 26.149 VirtualSpaceNode* _virtual_space_list; 26.150 // virtual space currently being used for allocations 26.151 VirtualSpaceNode* _current_virtual_space; 26.152 26.153 - // Can this virtual list allocate >1 spaces? Also, used to determine 26.154 - // whether to allocate unlimited small chunks in this virtual space 26.155 + // Is this VirtualSpaceList used for the compressed class space 26.156 bool _is_class; 26.157 - bool can_grow() const { return !is_class() || !UseCompressedClassPointers; } 26.158 26.159 // Sum of reserved and committed memory in the virtual spaces 26.160 size_t _reserved_words; 26.161 @@ -453,7 +482,7 @@ 26.162 // Get another virtual space and add it to the list. This 26.163 // is typically prompted by a failed attempt to allocate a chunk 26.164 // and is typically followed by the allocation of a chunk. 26.165 - bool grow_vs(size_t vs_word_size); 26.166 + bool create_new_virtual_space(size_t vs_word_size); 26.167 26.168 public: 26.169 VirtualSpaceList(size_t word_size); 26.170 @@ -465,12 +494,12 @@ 26.171 size_t grow_chunks_by_words, 26.172 size_t medium_chunk_bunch); 26.173 26.174 - bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false); 26.175 - 26.176 - // Get the first chunk for a Metaspace. Used for 26.177 - // special cases such as the boot class loader, reflection 26.178 - // class loader and anonymous class loader. 26.179 - Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch); 26.180 + bool expand_node_by(VirtualSpaceNode* node, 26.181 + size_t min_words, 26.182 + size_t preferred_words); 26.183 + 26.184 + bool expand_by(size_t min_words, 26.185 + size_t preferred_words); 26.186 26.187 VirtualSpaceNode* current_virtual_space() { 26.188 return _current_virtual_space; 26.189 @@ -478,8 +507,7 @@ 26.190 26.191 bool is_class() const { return _is_class; } 26.192 26.193 - // Allocate the first virtualspace. 26.194 - void initialize(size_t word_size); 26.195 + bool initialization_succeeded() { return _virtual_space_list != NULL; } 26.196 26.197 size_t reserved_words() { return _reserved_words; } 26.198 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 26.199 @@ -708,6 +736,9 @@ 26.200 // and allocates from that chunk. 26.201 MetaWord* grow_and_allocate(size_t word_size); 26.202 26.203 + // Notify memory usage to MemoryService. 26.204 + void track_metaspace_memory_usage(); 26.205 + 26.206 // debugging support. 26.207 26.208 void dump(outputStream* const out) const; 26.209 @@ -869,6 +900,12 @@ 26.210 MetaWord* chunk_limit = top(); 26.211 assert(chunk_limit != NULL, "Not safe to call this method"); 26.212 26.213 + // The virtual spaces are always expanded by the 26.214 + // commit granularity to enforce the following condition. 26.215 + // Without this the is_available check will not work correctly. 26.216 + assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 26.217 + "The committed memory doesn't match the expanded memory."); 26.218 + 26.219 if (!is_available(chunk_word_size)) { 26.220 if (TraceMetadataChunkAllocation) { 26.221 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); 26.222 @@ -888,14 +925,21 @@ 26.223 26.224 26.225 // Expand the virtual space (commit more of the reserved space) 26.226 -bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) { 26.227 - size_t bytes = words * BytesPerWord; 26.228 - bool result = virtual_space()->expand_by(bytes, pre_touch); 26.229 - if (TraceMetavirtualspaceAllocation && !result) { 26.230 - gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed " 26.231 - "for byte size " SIZE_FORMAT, bytes); 26.232 - virtual_space()->print_on(gclog_or_tty); 26.233 +bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 26.234 + size_t min_bytes = min_words * BytesPerWord; 26.235 + size_t preferred_bytes = preferred_words * BytesPerWord; 26.236 + 26.237 + size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 26.238 + 26.239 + if (uncommitted < min_bytes) { 26.240 + return false; 26.241 } 26.242 + 26.243 + size_t commit = MIN2(preferred_bytes, uncommitted); 26.244 + bool result = virtual_space()->expand_by(commit, false); 26.245 + 26.246 + assert(result, "Failed to commit memory"); 26.247 + 26.248 return result; 26.249 } 26.250 26.251 @@ -914,12 +958,23 @@ 26.252 return false; 26.253 } 26.254 26.255 - // An allocation out of this Virtualspace that is larger 26.256 - // than an initial commit size can waste that initial committed 26.257 - // space. 26.258 - size_t committed_byte_size = 0; 26.259 - bool result = virtual_space()->initialize(_rs, committed_byte_size); 26.260 + // These are necessary restriction to make sure that the virtual space always 26.261 + // grows in steps of Metaspace::commit_alignment(). If both base and size are 26.262 + // aligned only the middle alignment of the VirtualSpace is used. 26.263 + assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 26.264 + assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 26.265 + 26.266 + // ReservedSpaces marked as special will have the entire memory 26.267 + // pre-committed. Setting a committed size will make sure that 26.268 + // committed_size and actual_committed_size agrees. 26.269 + size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 26.270 + 26.271 + bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 26.272 + Metaspace::commit_alignment()); 26.273 if (result) { 26.274 + assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 26.275 + "Checking that the pre-committed memory was registered by the VirtualSpace"); 26.276 + 26.277 set_top((MetaWord*)virtual_space()->low()); 26.278 set_reserved(MemRegion((HeapWord*)_rs.base(), 26.279 (HeapWord*)(_rs.base() + _rs.size()))); 26.280 @@ -976,13 +1031,23 @@ 26.281 _reserved_words = _reserved_words - v; 26.282 } 26.283 26.284 +#define assert_committed_below_limit() \ 26.285 + assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 26.286 + err_msg("Too much committed memory. Committed: " SIZE_FORMAT \ 26.287 + " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 26.288 + MetaspaceAux::committed_bytes(), MaxMetaspaceSize)); 26.289 + 26.290 void VirtualSpaceList::inc_committed_words(size_t v) { 26.291 assert_lock_strong(SpaceManager::expand_lock()); 26.292 _committed_words = _committed_words + v; 26.293 + 26.294 + assert_committed_below_limit(); 26.295 } 26.296 void VirtualSpaceList::dec_committed_words(size_t v) { 26.297 assert_lock_strong(SpaceManager::expand_lock()); 26.298 _committed_words = _committed_words - v; 26.299 + 26.300 + assert_committed_below_limit(); 26.301 } 26.302 26.303 void VirtualSpaceList::inc_virtual_space_count() { 26.304 @@ -1025,8 +1090,8 @@ 26.305 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 26.306 // Unlink it from the list 26.307 if (prev_vsl == vsl) { 26.308 - // This is the case of the current note being the first note. 26.309 - assert(vsl == virtual_space_list(), "Expected to be the first note"); 26.310 + // This is the case of the current node being the first node. 26.311 + assert(vsl == virtual_space_list(), "Expected to be the first node"); 26.312 set_virtual_space_list(vsl->next()); 26.313 } else { 26.314 prev_vsl->set_next(vsl->next()); 26.315 @@ -1054,7 +1119,7 @@ 26.316 #endif 26.317 } 26.318 26.319 -VirtualSpaceList::VirtualSpaceList(size_t word_size ) : 26.320 +VirtualSpaceList::VirtualSpaceList(size_t word_size) : 26.321 _is_class(false), 26.322 _virtual_space_list(NULL), 26.323 _current_virtual_space(NULL), 26.324 @@ -1063,9 +1128,7 @@ 26.325 _virtual_space_count(0) { 26.326 MutexLockerEx cl(SpaceManager::expand_lock(), 26.327 Mutex::_no_safepoint_check_flag); 26.328 - bool initialization_succeeded = grow_vs(word_size); 26.329 - assert(initialization_succeeded, 26.330 - " VirtualSpaceList initialization should not fail"); 26.331 + create_new_virtual_space(word_size); 26.332 } 26.333 26.334 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 26.335 @@ -1079,8 +1142,9 @@ 26.336 Mutex::_no_safepoint_check_flag); 26.337 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 26.338 bool succeeded = class_entry->initialize(); 26.339 - assert(succeeded, " VirtualSpaceList initialization should not fail"); 26.340 - link_vs(class_entry); 26.341 + if (succeeded) { 26.342 + link_vs(class_entry); 26.343 + } 26.344 } 26.345 26.346 size_t VirtualSpaceList::free_bytes() { 26.347 @@ -1088,14 +1152,24 @@ 26.348 } 26.349 26.350 // Allocate another meta virtual space and add it to the list. 26.351 -bool VirtualSpaceList::grow_vs(size_t vs_word_size) { 26.352 +bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 26.353 assert_lock_strong(SpaceManager::expand_lock()); 26.354 - if (vs_word_size == 0) { 26.355 + 26.356 + if (is_class()) { 26.357 + assert(false, "We currently don't support more than one VirtualSpace for" 26.358 + " the compressed class space. The initialization of the" 26.359 + " CCS uses another code path and should not hit this path."); 26.360 return false; 26.361 } 26.362 + 26.363 + if (vs_word_size == 0) { 26.364 + assert(false, "vs_word_size should always be at least _reserve_alignment large."); 26.365 + return false; 26.366 + } 26.367 + 26.368 // Reserve the space 26.369 size_t vs_byte_size = vs_word_size * BytesPerWord; 26.370 - assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned"); 26.371 + assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 26.372 26.373 // Allocate the meta virtual space and initialize it. 26.374 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 26.375 @@ -1103,7 +1177,8 @@ 26.376 delete new_entry; 26.377 return false; 26.378 } else { 26.379 - assert(new_entry->reserved_words() == vs_word_size, "Must be"); 26.380 + assert(new_entry->reserved_words() == vs_word_size, 26.381 + "Reserved memory size differs from requested memory size"); 26.382 // ensure lock-free iteration sees fully initialized node 26.383 OrderAccess::storestore(); 26.384 link_vs(new_entry); 26.385 @@ -1130,20 +1205,67 @@ 26.386 } 26.387 } 26.388 26.389 -bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) { 26.390 +bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 26.391 + size_t min_words, 26.392 + size_t preferred_words) { 26.393 size_t before = node->committed_words(); 26.394 26.395 - bool result = node->expand_by(word_size, pre_touch); 26.396 + bool result = node->expand_by(min_words, preferred_words); 26.397 26.398 size_t after = node->committed_words(); 26.399 26.400 // after and before can be the same if the memory was pre-committed. 26.401 - assert(after >= before, "Must be"); 26.402 + assert(after >= before, "Inconsistency"); 26.403 inc_committed_words(after - before); 26.404 26.405 return result; 26.406 } 26.407 26.408 +bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 26.409 + assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 26.410 + assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 26.411 + assert(min_words <= preferred_words, "Invalid arguments"); 26.412 + 26.413 + if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 26.414 + return false; 26.415 + } 26.416 + 26.417 + size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 26.418 + if (allowed_expansion_words < min_words) { 26.419 + return false; 26.420 + } 26.421 + 26.422 + size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 26.423 + 26.424 + // Commit more memory from the the current virtual space. 26.425 + bool vs_expanded = expand_node_by(current_virtual_space(), 26.426 + min_words, 26.427 + max_expansion_words); 26.428 + if (vs_expanded) { 26.429 + return true; 26.430 + } 26.431 + 26.432 + // Get another virtual space. 26.433 + size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 26.434 + grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 26.435 + 26.436 + if (create_new_virtual_space(grow_vs_words)) { 26.437 + if (current_virtual_space()->is_pre_committed()) { 26.438 + // The memory was pre-committed, so we are done here. 26.439 + assert(min_words <= current_virtual_space()->committed_words(), 26.440 + "The new VirtualSpace was pre-committed, so it" 26.441 + "should be large enough to fit the alloc request."); 26.442 + return true; 26.443 + } 26.444 + 26.445 + return expand_node_by(current_virtual_space(), 26.446 + min_words, 26.447 + max_expansion_words); 26.448 + } 26.449 + 26.450 + return false; 26.451 +} 26.452 + 26.453 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 26.454 size_t grow_chunks_by_words, 26.455 size_t medium_chunk_bunch) { 26.456 @@ -1151,63 +1273,27 @@ 26.457 // Allocate a chunk out of the current virtual space. 26.458 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 26.459 26.460 - if (next == NULL) { 26.461 - // Not enough room in current virtual space. Try to commit 26.462 - // more space. 26.463 - size_t expand_vs_by_words = MAX2(medium_chunk_bunch, 26.464 - grow_chunks_by_words); 26.465 - size_t page_size_words = os::vm_page_size() / BytesPerWord; 26.466 - size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words, 26.467 - page_size_words); 26.468 - bool vs_expanded = 26.469 - expand_by(current_virtual_space(), aligned_expand_vs_by_words); 26.470 - if (!vs_expanded) { 26.471 - // Should the capacity of the metaspaces be expanded for 26.472 - // this allocation? If it's the virtual space for classes and is 26.473 - // being used for CompressedHeaders, don't allocate a new virtualspace. 26.474 - if (can_grow() && MetaspaceGC::should_expand(this, word_size)) { 26.475 - // Get another virtual space. 26.476 - size_t allocation_aligned_expand_words = 26.477 - align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord); 26.478 - size_t grow_vs_words = 26.479 - MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words); 26.480 - if (grow_vs(grow_vs_words)) { 26.481 - // Got it. It's on the list now. Get a chunk from it. 26.482 - assert(current_virtual_space()->expanded_words() == 0, 26.483 - "New virtual space nodes should not have expanded"); 26.484 - 26.485 - size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words, 26.486 - page_size_words); 26.487 - // We probably want to expand by aligned_expand_vs_by_words here. 26.488 - expand_by(current_virtual_space(), grow_chunks_by_words_aligned); 26.489 - next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 26.490 - } 26.491 - } else { 26.492 - // Allocation will fail and induce a GC 26.493 - if (TraceMetadataChunkAllocation && Verbose) { 26.494 - gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():" 26.495 - " Fail instead of expand the metaspace"); 26.496 - } 26.497 - } 26.498 - } else { 26.499 - // The virtual space expanded, get a new chunk 26.500 - next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 26.501 - assert(next != NULL, "Just expanded, should succeed"); 26.502 - } 26.503 + if (next != NULL) { 26.504 + return next; 26.505 } 26.506 26.507 - assert(next == NULL || (next->next() == NULL && next->prev() == NULL), 26.508 - "New chunk is still on some list"); 26.509 - return next; 26.510 -} 26.511 - 26.512 -Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size, 26.513 - size_t chunk_bunch) { 26.514 - // Get a chunk from the chunk freelist 26.515 - Metachunk* new_chunk = get_new_chunk(chunk_word_size, 26.516 - chunk_word_size, 26.517 - chunk_bunch); 26.518 - return new_chunk; 26.519 + // The expand amount is currently only determined by the requested sizes 26.520 + // and not how much committed memory is left in the current virtual space. 26.521 + 26.522 + size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); 26.523 + size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); 26.524 + if (min_word_size >= preferred_word_size) { 26.525 + // Can happen when humongous chunks are allocated. 26.526 + preferred_word_size = min_word_size; 26.527 + } 26.528 + 26.529 + bool expanded = expand_by(min_word_size, preferred_word_size); 26.530 + if (expanded) { 26.531 + next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 26.532 + assert(next != NULL, "The allocation was expected to succeed after the expansion"); 26.533 + } 26.534 + 26.535 + return next; 26.536 } 26.537 26.538 void VirtualSpaceList::print_on(outputStream* st) const { 26.539 @@ -1256,96 +1342,96 @@ 26.540 // Calculate the amount to increase the high water mark (HWM). 26.541 // Increase by a minimum amount (MinMetaspaceExpansion) so that 26.542 // another expansion is not requested too soon. If that is not 26.543 -// enough to satisfy the allocation (i.e. big enough for a word_size 26.544 -// allocation), increase by MaxMetaspaceExpansion. If that is still 26.545 -// not enough, expand by the size of the allocation (word_size) plus 26.546 -// some. 26.547 -size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) { 26.548 - size_t before_inc = MetaspaceGC::capacity_until_GC(); 26.549 - size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord; 26.550 - size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord; 26.551 - size_t page_size_words = os::vm_page_size() / BytesPerWord; 26.552 - size_t size_delta_words = align_size_up(word_size, page_size_words); 26.553 - size_t delta_words = MAX2(size_delta_words, min_delta_words); 26.554 - if (delta_words > min_delta_words) { 26.555 +// enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 26.556 +// If that is still not enough, expand by the size of the allocation 26.557 +// plus some. 26.558 +size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 26.559 + size_t min_delta = MinMetaspaceExpansion; 26.560 + size_t max_delta = MaxMetaspaceExpansion; 26.561 + size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 26.562 + 26.563 + if (delta <= min_delta) { 26.564 + delta = min_delta; 26.565 + } else if (delta <= max_delta) { 26.566 // Don't want to hit the high water mark on the next 26.567 // allocation so make the delta greater than just enough 26.568 // for this allocation. 26.569 - delta_words = MAX2(delta_words, max_delta_words); 26.570 - if (delta_words > max_delta_words) { 26.571 - // This allocation is large but the next ones are probably not 26.572 - // so increase by the minimum. 26.573 - delta_words = delta_words + min_delta_words; 26.574 - } 26.575 + delta = max_delta; 26.576 + } else { 26.577 + // This allocation is large but the next ones are probably not 26.578 + // so increase by the minimum. 26.579 + delta = delta + min_delta; 26.580 } 26.581 - return delta_words; 26.582 + 26.583 + assert_is_size_aligned(delta, Metaspace::commit_alignment()); 26.584 + 26.585 + return delta; 26.586 } 26.587 26.588 -bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { 26.589 - 26.590 - // If the user wants a limit, impose one. 26.591 - // The reason for someone using this flag is to limit reserved space. So 26.592 - // for non-class virtual space, compare against virtual spaces that are reserved. 26.593 - // For class virtual space, we only compare against the committed space, not 26.594 - // reserved space, because this is a larger space prereserved for compressed 26.595 - // class pointers. 26.596 - if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { 26.597 - size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 26.598 - size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); 26.599 - size_t real_allocated = nonclass_allocated + class_allocated; 26.600 - if (real_allocated >= MaxMetaspaceSize) { 26.601 +size_t MetaspaceGC::capacity_until_GC() { 26.602 + size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 26.603 + assert(value >= MetaspaceSize, "Not initialied properly?"); 26.604 + return value; 26.605 +} 26.606 + 26.607 +size_t MetaspaceGC::inc_capacity_until_GC(size_t v) { 26.608 + assert_is_size_aligned(v, Metaspace::commit_alignment()); 26.609 + 26.610 + return (size_t)Atomic::add_ptr(v, &_capacity_until_GC); 26.611 +} 26.612 + 26.613 +size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 26.614 + assert_is_size_aligned(v, Metaspace::commit_alignment()); 26.615 + 26.616 + return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 26.617 +} 26.618 + 26.619 +bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 26.620 + // Check if the compressed class space is full. 26.621 + if (is_class && Metaspace::using_class_space()) { 26.622 + size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 26.623 + if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 26.624 return false; 26.625 } 26.626 } 26.627 26.628 - // Class virtual space should always be expanded. Call GC for the other 26.629 - // metadata virtual space. 26.630 - if (Metaspace::using_class_space() && 26.631 - (vsl == Metaspace::class_space_list())) return true; 26.632 - 26.633 - // If this is part of an allocation after a GC, expand 26.634 - // unconditionally. 26.635 - if (MetaspaceGC::expand_after_GC()) { 26.636 - return true; 26.637 + // Check if the user has imposed a limit on the metaspace memory. 26.638 + size_t committed_bytes = MetaspaceAux::committed_bytes(); 26.639 + if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 26.640 + return false; 26.641 } 26.642 26.643 - 26.644 - // If the capacity is below the minimum capacity, allow the 26.645 - // expansion. Also set the high-water-mark (capacity_until_GC) 26.646 - // to that minimum capacity so that a GC will not be induced 26.647 - // until that minimum capacity is exceeded. 26.648 - size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); 26.649 - size_t metaspace_size_bytes = MetaspaceSize; 26.650 - if (committed_capacity_bytes < metaspace_size_bytes || 26.651 - capacity_until_GC() == 0) { 26.652 - set_capacity_until_GC(metaspace_size_bytes); 26.653 - return true; 26.654 - } else { 26.655 - if (committed_capacity_bytes < capacity_until_GC()) { 26.656 - return true; 26.657 - } else { 26.658 - if (TraceMetadataChunkAllocation && Verbose) { 26.659 - gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT 26.660 - " capacity_until_GC " SIZE_FORMAT 26.661 - " allocated_capacity_bytes " SIZE_FORMAT, 26.662 - word_size, 26.663 - capacity_until_GC(), 26.664 - MetaspaceAux::allocated_capacity_bytes()); 26.665 - } 26.666 - return false; 26.667 - } 26.668 + return true; 26.669 +} 26.670 + 26.671 +size_t MetaspaceGC::allowed_expansion() { 26.672 + size_t committed_bytes = MetaspaceAux::committed_bytes(); 26.673 + 26.674 + size_t left_until_max = MaxMetaspaceSize - committed_bytes; 26.675 + 26.676 + // Always grant expansion if we are initiating the JVM, 26.677 + // or if the GC_locker is preventing GCs. 26.678 + if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) { 26.679 + return left_until_max / BytesPerWord; 26.680 } 26.681 + 26.682 + size_t capacity_until_gc = capacity_until_GC(); 26.683 + 26.684 + if (capacity_until_gc <= committed_bytes) { 26.685 + return 0; 26.686 + } 26.687 + 26.688 + size_t left_until_GC = capacity_until_gc - committed_bytes; 26.689 + size_t left_to_commit = MIN2(left_until_GC, left_until_max); 26.690 + 26.691 + return left_to_commit / BytesPerWord; 26.692 } 26.693 26.694 - 26.695 - 26.696 void MetaspaceGC::compute_new_size() { 26.697 assert(_shrink_factor <= 100, "invalid shrink factor"); 26.698 uint current_shrink_factor = _shrink_factor; 26.699 _shrink_factor = 0; 26.700 26.701 - // Until a faster way of calculating the "used" quantity is implemented, 26.702 - // use "capacity". 26.703 const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); 26.704 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 26.705 26.706 @@ -1377,9 +1463,10 @@ 26.707 // If we have less capacity below the metaspace HWM, then 26.708 // increment the HWM. 26.709 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 26.710 + expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 26.711 // Don't expand unless it's significant 26.712 if (expand_bytes >= MinMetaspaceExpansion) { 26.713 - MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); 26.714 + MetaspaceGC::inc_capacity_until_GC(expand_bytes); 26.715 } 26.716 if (PrintGCDetails && Verbose) { 26.717 size_t new_capacity_until_GC = capacity_until_GC; 26.718 @@ -1436,6 +1523,9 @@ 26.719 // on the third call, and 100% by the fourth call. But if we recompute 26.720 // size without shrinking, it goes back to 0%. 26.721 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 26.722 + 26.723 + shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 26.724 + 26.725 assert(shrink_bytes <= max_shrink_bytes, 26.726 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 26.727 shrink_bytes, max_shrink_bytes)); 26.728 @@ -1467,7 +1557,7 @@ 26.729 // Don't shrink unless it's significant 26.730 if (shrink_bytes >= MinMetaspaceExpansion && 26.731 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 26.732 - MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); 26.733 + MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 26.734 } 26.735 } 26.736 26.737 @@ -1700,7 +1790,6 @@ 26.738 assert(free_list != NULL, "Sanity check"); 26.739 26.740 chunk = free_list->head(); 26.741 - debug_only(Metachunk* debug_head = chunk;) 26.742 26.743 if (chunk == NULL) { 26.744 return NULL; 26.745 @@ -1709,9 +1798,6 @@ 26.746 // Remove the chunk as the head of the list. 26.747 free_list->remove_chunk(chunk); 26.748 26.749 - // Chunk is being removed from the chunks free list. 26.750 - dec_free_chunks_total(chunk->capacity_word_size()); 26.751 - 26.752 if (TraceMetadataChunkAllocation && Verbose) { 26.753 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " 26.754 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 26.755 @@ -1722,21 +1808,22 @@ 26.756 word_size, 26.757 FreeBlockDictionary<Metachunk>::atLeast); 26.758 26.759 - if (chunk != NULL) { 26.760 - if (TraceMetadataHumongousAllocation) { 26.761 - size_t waste = chunk->word_size() - word_size; 26.762 - gclog_or_tty->print_cr("Free list allocate humongous chunk size " 26.763 - SIZE_FORMAT " for requested size " SIZE_FORMAT 26.764 - " waste " SIZE_FORMAT, 26.765 - chunk->word_size(), word_size, waste); 26.766 - } 26.767 - // Chunk is being removed from the chunks free list. 26.768 - dec_free_chunks_total(chunk->capacity_word_size()); 26.769 - } else { 26.770 + if (chunk == NULL) { 26.771 return NULL; 26.772 } 26.773 + 26.774 + if (TraceMetadataHumongousAllocation) { 26.775 + size_t waste = chunk->word_size() - word_size; 26.776 + gclog_or_tty->print_cr("Free list allocate humongous chunk size " 26.777 + SIZE_FORMAT " for requested size " SIZE_FORMAT 26.778 + " waste " SIZE_FORMAT, 26.779 + chunk->word_size(), word_size, waste); 26.780 + } 26.781 } 26.782 26.783 + // Chunk is being removed from the chunks free list. 26.784 + dec_free_chunks_total(chunk->capacity_word_size()); 26.785 + 26.786 // Remove it from the links to this freelist 26.787 chunk->set_next(NULL); 26.788 chunk->set_prev(NULL); 26.789 @@ -1977,6 +2064,15 @@ 26.790 return chunk_word_size; 26.791 } 26.792 26.793 +void SpaceManager::track_metaspace_memory_usage() { 26.794 + if (is_init_completed()) { 26.795 + if (is_class()) { 26.796 + MemoryService::track_compressed_class_memory_usage(); 26.797 + } 26.798 + MemoryService::track_metaspace_memory_usage(); 26.799 + } 26.800 +} 26.801 + 26.802 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 26.803 assert(vs_list()->current_virtual_space() != NULL, 26.804 "Should have been set"); 26.805 @@ -2002,15 +2098,24 @@ 26.806 size_t grow_chunks_by_words = calc_chunk_size(word_size); 26.807 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 26.808 26.809 + if (next != NULL) { 26.810 + Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words); 26.811 + } 26.812 + 26.813 + MetaWord* mem = NULL; 26.814 + 26.815 // If a chunk was available, add it to the in-use chunk list 26.816 // and do an allocation from it. 26.817 if (next != NULL) { 26.818 - Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words); 26.819 // Add to this manager's list of chunks in use. 26.820 add_chunk(next, false); 26.821 - return next->allocate(word_size); 26.822 + mem = next->allocate(word_size); 26.823 } 26.824 - return NULL; 26.825 + 26.826 + // Track metaspace memory usage statistic. 26.827 + track_metaspace_memory_usage(); 26.828 + 26.829 + return mem; 26.830 } 26.831 26.832 void SpaceManager::print_on(outputStream* st) const { 26.833 @@ -2366,6 +2471,7 @@ 26.834 inc_used_metrics(word_size); 26.835 return current_chunk()->allocate(word_size); // caller handles null result 26.836 } 26.837 + 26.838 if (current_chunk() != NULL) { 26.839 result = current_chunk()->allocate(word_size); 26.840 } 26.841 @@ -2373,7 +2479,8 @@ 26.842 if (result == NULL) { 26.843 result = grow_and_allocate(word_size); 26.844 } 26.845 - if (result != 0) { 26.846 + 26.847 + if (result != NULL) { 26.848 inc_used_metrics(word_size); 26.849 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 26.850 "Head of the list is being allocated"); 26.851 @@ -2639,24 +2746,26 @@ 26.852 void MetaspaceAux::print_on(outputStream* out) { 26.853 Metaspace::MetadataType nct = Metaspace::NonClassType; 26.854 26.855 - out->print_cr(" Metaspace total " 26.856 - SIZE_FORMAT "K, used " SIZE_FORMAT "K," 26.857 - " reserved " SIZE_FORMAT "K", 26.858 - allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K); 26.859 - 26.860 - out->print_cr(" data space " 26.861 - SIZE_FORMAT "K, used " SIZE_FORMAT "K," 26.862 - " reserved " SIZE_FORMAT "K", 26.863 - allocated_capacity_bytes(nct)/K, 26.864 - allocated_used_bytes(nct)/K, 26.865 - reserved_bytes(nct)/K); 26.866 + out->print_cr(" Metaspace " 26.867 + "used " SIZE_FORMAT "K, " 26.868 + "capacity " SIZE_FORMAT "K, " 26.869 + "committed " SIZE_FORMAT "K, " 26.870 + "reserved " SIZE_FORMAT "K", 26.871 + allocated_used_bytes()/K, 26.872 + allocated_capacity_bytes()/K, 26.873 + committed_bytes()/K, 26.874 + reserved_bytes()/K); 26.875 + 26.876 if (Metaspace::using_class_space()) { 26.877 Metaspace::MetadataType ct = Metaspace::ClassType; 26.878 out->print_cr(" class space " 26.879 - SIZE_FORMAT "K, used " SIZE_FORMAT "K," 26.880 - " reserved " SIZE_FORMAT "K", 26.881 + "used " SIZE_FORMAT "K, " 26.882 + "capacity " SIZE_FORMAT "K, " 26.883 + "committed " SIZE_FORMAT "K, " 26.884 + "reserved " SIZE_FORMAT "K", 26.885 + allocated_used_bytes(ct)/K, 26.886 allocated_capacity_bytes(ct)/K, 26.887 - allocated_used_bytes(ct)/K, 26.888 + committed_bytes(ct)/K, 26.889 reserved_bytes(ct)/K); 26.890 } 26.891 } 26.892 @@ -2808,6 +2917,9 @@ 26.893 size_t Metaspace::_first_chunk_word_size = 0; 26.894 size_t Metaspace::_first_class_chunk_word_size = 0; 26.895 26.896 +size_t Metaspace::_commit_alignment = 0; 26.897 +size_t Metaspace::_reserve_alignment = 0; 26.898 + 26.899 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 26.900 initialize(lock, type); 26.901 } 26.902 @@ -2869,21 +2981,30 @@ 26.903 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 26.904 assert(class_metaspace_size() < KlassEncodingMetaspaceMax, 26.905 "Metaspace size is too big"); 26.906 + assert_is_ptr_aligned(requested_addr, _reserve_alignment); 26.907 + assert_is_ptr_aligned(cds_base, _reserve_alignment); 26.908 + assert_is_size_aligned(class_metaspace_size(), _reserve_alignment); 26.909 + 26.910 + // Don't use large pages for the class space. 26.911 + bool large_pages = false; 26.912 26.913 ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(), 26.914 - os::vm_allocation_granularity(), 26.915 - false, requested_addr, 0); 26.916 + _reserve_alignment, 26.917 + large_pages, 26.918 + requested_addr, 0); 26.919 if (!metaspace_rs.is_reserved()) { 26.920 if (UseSharedSpaces) { 26.921 + size_t increment = align_size_up(1*G, _reserve_alignment); 26.922 + 26.923 // Keep trying to allocate the metaspace, increasing the requested_addr 26.924 // by 1GB each time, until we reach an address that will no longer allow 26.925 // use of CDS with compressed klass pointers. 26.926 char *addr = requested_addr; 26.927 - while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) && 26.928 - can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) { 26.929 - addr = addr + 1*G; 26.930 + while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 26.931 + can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 26.932 + addr = addr + increment; 26.933 metaspace_rs = ReservedSpace(class_metaspace_size(), 26.934 - os::vm_allocation_granularity(), false, addr, 0); 26.935 + _reserve_alignment, large_pages, addr, 0); 26.936 } 26.937 } 26.938 26.939 @@ -2894,7 +3015,7 @@ 26.940 // So, UseCompressedClassPointers cannot be turned off at this point. 26.941 if (!metaspace_rs.is_reserved()) { 26.942 metaspace_rs = ReservedSpace(class_metaspace_size(), 26.943 - os::vm_allocation_granularity(), false); 26.944 + _reserve_alignment, large_pages); 26.945 if (!metaspace_rs.is_reserved()) { 26.946 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", 26.947 class_metaspace_size())); 26.948 @@ -2933,34 +3054,96 @@ 26.949 assert(using_class_space(), "Must be using class space"); 26.950 _class_space_list = new VirtualSpaceList(rs); 26.951 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 26.952 + 26.953 + if (!_class_space_list->initialization_succeeded()) { 26.954 + vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 26.955 + } 26.956 } 26.957 26.958 #endif 26.959 26.960 +// Align down. If the aligning result in 0, return 'alignment'. 26.961 +static size_t restricted_align_down(size_t size, size_t alignment) { 26.962 + return MAX2(alignment, align_size_down_(size, alignment)); 26.963 +} 26.964 + 26.965 +void Metaspace::ergo_initialize() { 26.966 + if (DumpSharedSpaces) { 26.967 + // Using large pages when dumping the shared archive is currently not implemented. 26.968 + FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 26.969 + } 26.970 + 26.971 + size_t page_size = os::vm_page_size(); 26.972 + if (UseLargePages && UseLargePagesInMetaspace) { 26.973 + page_size = os::large_page_size(); 26.974 + } 26.975 + 26.976 + _commit_alignment = page_size; 26.977 + _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 26.978 + 26.979 + // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 26.980 + // override if MaxMetaspaceSize was set on the command line or not. 26.981 + // This information is needed later to conform to the specification of the 26.982 + // java.lang.management.MemoryUsage API. 26.983 + // 26.984 + // Ideally, we would be able to set the default value of MaxMetaspaceSize in 26.985 + // globals.hpp to the aligned value, but this is not possible, since the 26.986 + // alignment depends on other flags being parsed. 26.987 + MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment); 26.988 + 26.989 + if (MetaspaceSize > MaxMetaspaceSize) { 26.990 + MetaspaceSize = MaxMetaspaceSize; 26.991 + } 26.992 + 26.993 + MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment); 26.994 + 26.995 + assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 26.996 + 26.997 + if (MetaspaceSize < 256*K) { 26.998 + vm_exit_during_initialization("Too small initial Metaspace size"); 26.999 + } 26.1000 + 26.1001 + MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment); 26.1002 + MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment); 26.1003 + 26.1004 + CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment); 26.1005 + set_class_metaspace_size(CompressedClassSpaceSize); 26.1006 +} 26.1007 + 26.1008 void Metaspace::global_initialize() { 26.1009 // Initialize the alignment for shared spaces. 26.1010 int max_alignment = os::vm_page_size(); 26.1011 size_t cds_total = 0; 26.1012 26.1013 - set_class_metaspace_size(align_size_up(CompressedClassSpaceSize, 26.1014 - os::vm_allocation_granularity())); 26.1015 - 26.1016 MetaspaceShared::set_max_alignment(max_alignment); 26.1017 26.1018 if (DumpSharedSpaces) { 26.1019 - SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 26.1020 + SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 26.1021 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 26.1022 - SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 26.1023 - SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 26.1024 + SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 26.1025 + SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 26.1026 26.1027 // Initialize with the sum of the shared space sizes. The read-only 26.1028 // and read write metaspace chunks will be allocated out of this and the 26.1029 // remainder is the misc code and data chunks. 26.1030 cds_total = FileMapInfo::shared_spaces_size(); 26.1031 + cds_total = align_size_up(cds_total, _reserve_alignment); 26.1032 _space_list = new VirtualSpaceList(cds_total/wordSize); 26.1033 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 26.1034 26.1035 + if (!_space_list->initialization_succeeded()) { 26.1036 + vm_exit_during_initialization("Unable to dump shared archive.", NULL); 26.1037 + } 26.1038 + 26.1039 #ifdef _LP64 26.1040 + if (cds_total + class_metaspace_size() > (uint64_t)max_juint) { 26.1041 + vm_exit_during_initialization("Unable to dump shared archive.", 26.1042 + err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 26.1043 + SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 26.1044 + "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(), 26.1045 + cds_total + class_metaspace_size(), (size_t)max_juint)); 26.1046 + } 26.1047 + 26.1048 // Set the compressed klass pointer base so that decoding of these pointers works 26.1049 // properly when creating the shared archive. 26.1050 assert(UseCompressedOops && UseCompressedClassPointers, 26.1051 @@ -2971,9 +3154,6 @@ 26.1052 _space_list->current_virtual_space()->bottom()); 26.1053 } 26.1054 26.1055 - // Set the shift to zero. 26.1056 - assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total, 26.1057 - "CDS region is too large"); 26.1058 Universe::set_narrow_klass_shift(0); 26.1059 #endif 26.1060 26.1061 @@ -2992,12 +3172,12 @@ 26.1062 // Map in spaces now also 26.1063 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 26.1064 FileMapInfo::set_current_info(mapinfo); 26.1065 + cds_total = FileMapInfo::shared_spaces_size(); 26.1066 + cds_address = (address)mapinfo->region_base(0); 26.1067 } else { 26.1068 assert(!mapinfo->is_open() && !UseSharedSpaces, 26.1069 "archive file not closed or shared spaces not disabled."); 26.1070 } 26.1071 - cds_total = FileMapInfo::shared_spaces_size(); 26.1072 - cds_address = (address)mapinfo->region_base(0); 26.1073 } 26.1074 26.1075 #ifdef _LP64 26.1076 @@ -3005,7 +3185,9 @@ 26.1077 // above the heap and above the CDS area (if it exists). 26.1078 if (using_class_space()) { 26.1079 if (UseSharedSpaces) { 26.1080 - allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address); 26.1081 + char* cds_end = (char*)(cds_address + cds_total); 26.1082 + cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 26.1083 + allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 26.1084 } else { 26.1085 allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0); 26.1086 } 26.1087 @@ -3023,11 +3205,19 @@ 26.1088 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 26.1089 // Arbitrarily set the initial virtual space to a multiple 26.1090 // of the boot class loader size. 26.1091 - size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size(); 26.1092 + size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 26.1093 + word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 26.1094 + 26.1095 // Initialize the list of virtual spaces. 26.1096 _space_list = new VirtualSpaceList(word_size); 26.1097 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 26.1098 + 26.1099 + if (!_space_list->initialization_succeeded()) { 26.1100 + vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 26.1101 + } 26.1102 } 26.1103 + 26.1104 + MetaspaceGC::initialize(); 26.1105 } 26.1106 26.1107 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 26.1108 @@ -3039,7 +3229,7 @@ 26.1109 return chunk; 26.1110 } 26.1111 26.1112 - return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch); 26.1113 + return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); 26.1114 } 26.1115 26.1116 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 26.1117 @@ -3112,19 +3302,18 @@ 26.1118 } 26.1119 26.1120 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 26.1121 - MetaWord* result; 26.1122 - MetaspaceGC::set_expand_after_GC(true); 26.1123 - size_t before_inc = MetaspaceGC::capacity_until_GC(); 26.1124 - size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; 26.1125 - MetaspaceGC::inc_capacity_until_GC(delta_bytes); 26.1126 + size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 26.1127 + assert(delta_bytes > 0, "Must be"); 26.1128 + 26.1129 + size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes); 26.1130 + size_t before_inc = after_inc - delta_bytes; 26.1131 + 26.1132 if (PrintGCDetails && Verbose) { 26.1133 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT 26.1134 - " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); 26.1135 + " to " SIZE_FORMAT, before_inc, after_inc); 26.1136 } 26.1137 26.1138 - result = allocate(word_size, mdtype); 26.1139 - 26.1140 - return result; 26.1141 + return allocate(word_size, mdtype); 26.1142 } 26.1143 26.1144 // Space allocated in the Metaspace. This may 26.1145 @@ -3206,6 +3395,7 @@ 26.1146 } 26.1147 } 26.1148 26.1149 + 26.1150 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 26.1151 bool read_only, MetaspaceObj::Type type, TRAPS) { 26.1152 if (HAS_PENDING_EXCEPTION) { 26.1153 @@ -3213,20 +3403,16 @@ 26.1154 return NULL; // caller does a CHECK_NULL too 26.1155 } 26.1156 26.1157 - MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 26.1158 - 26.1159 - // SSS: Should we align the allocations and make sure the sizes are aligned. 26.1160 - MetaWord* result = NULL; 26.1161 - 26.1162 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 26.1163 "ClassLoaderData::the_null_class_loader_data() should have been used."); 26.1164 + 26.1165 // Allocate in metaspaces without taking out a lock, because it deadlocks 26.1166 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 26.1167 // to revisit this for application class data sharing. 26.1168 if (DumpSharedSpaces) { 26.1169 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 26.1170 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 26.1171 - result = space->allocate(word_size, NonClassType); 26.1172 + MetaWord* result = space->allocate(word_size, NonClassType); 26.1173 if (result == NULL) { 26.1174 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 26.1175 } else { 26.1176 @@ -3235,42 +3421,64 @@ 26.1177 return Metablock::initialize(result, word_size); 26.1178 } 26.1179 26.1180 - result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 26.1181 + MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 26.1182 + 26.1183 + // Try to allocate metadata. 26.1184 + MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 26.1185 26.1186 if (result == NULL) { 26.1187 - // Try to clean out some memory and retry. 26.1188 - result = 26.1189 - Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 26.1190 - loader_data, word_size, mdtype); 26.1191 - 26.1192 - // If result is still null, we are out of memory. 26.1193 - if (result == NULL) { 26.1194 - if (Verbose && TraceMetadataChunkAllocation) { 26.1195 - gclog_or_tty->print_cr("Metaspace allocation failed for size " 26.1196 - SIZE_FORMAT, word_size); 26.1197 - if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty); 26.1198 - MetaspaceAux::dump(gclog_or_tty); 26.1199 - } 26.1200 - // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 26.1201 - const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" : 26.1202 - "Metadata space"; 26.1203 - report_java_out_of_memory(space_string); 26.1204 - 26.1205 - if (JvmtiExport::should_post_resource_exhausted()) { 26.1206 - JvmtiExport::post_resource_exhausted( 26.1207 - JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 26.1208 - space_string); 26.1209 - } 26.1210 - if (is_class_space_allocation(mdtype)) { 26.1211 - THROW_OOP_0(Universe::out_of_memory_error_class_metaspace()); 26.1212 - } else { 26.1213 - THROW_OOP_0(Universe::out_of_memory_error_metaspace()); 26.1214 - } 26.1215 + // Allocation failed. 26.1216 + if (is_init_completed()) { 26.1217 + // Only start a GC if the bootstrapping has completed. 26.1218 + 26.1219 + // Try to clean out some memory and retry. 26.1220 + result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 26.1221 + loader_data, word_size, mdtype); 26.1222 } 26.1223 } 26.1224 + 26.1225 + if (result == NULL) { 26.1226 + report_metadata_oome(loader_data, word_size, mdtype, THREAD); 26.1227 + // Will not reach here. 26.1228 + return NULL; 26.1229 + } 26.1230 + 26.1231 return Metablock::initialize(result, word_size); 26.1232 } 26.1233 26.1234 +void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) { 26.1235 + // If result is still null, we are out of memory. 26.1236 + if (Verbose && TraceMetadataChunkAllocation) { 26.1237 + gclog_or_tty->print_cr("Metaspace allocation failed for size " 26.1238 + SIZE_FORMAT, word_size); 26.1239 + if (loader_data->metaspace_or_null() != NULL) { 26.1240 + loader_data->dump(gclog_or_tty); 26.1241 + } 26.1242 + MetaspaceAux::dump(gclog_or_tty); 26.1243 + } 26.1244 + 26.1245 + // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 26.1246 + const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" : 26.1247 + "Metadata space"; 26.1248 + report_java_out_of_memory(space_string); 26.1249 + 26.1250 + if (JvmtiExport::should_post_resource_exhausted()) { 26.1251 + JvmtiExport::post_resource_exhausted( 26.1252 + JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 26.1253 + space_string); 26.1254 + } 26.1255 + 26.1256 + if (!is_init_completed()) { 26.1257 + vm_exit_during_initialization("OutOfMemoryError", space_string); 26.1258 + } 26.1259 + 26.1260 + if (is_class_space_allocation(mdtype)) { 26.1261 + THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 26.1262 + } else { 26.1263 + THROW_OOP(Universe::out_of_memory_error_metaspace()); 26.1264 + } 26.1265 +} 26.1266 + 26.1267 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 26.1268 assert(DumpSharedSpaces, "sanity"); 26.1269
27.1 --- a/src/share/vm/memory/metaspace.hpp Fri Oct 11 17:08:22 2013 -0400 27.2 +++ b/src/share/vm/memory/metaspace.hpp Fri Oct 11 22:22:19 2013 -0400 27.3 @@ -87,9 +87,10 @@ 27.4 friend class MetaspaceAux; 27.5 27.6 public: 27.7 - enum MetadataType {ClassType = 0, 27.8 - NonClassType = ClassType + 1, 27.9 - MetadataTypeCount = ClassType + 2 27.10 + enum MetadataType { 27.11 + ClassType, 27.12 + NonClassType, 27.13 + MetadataTypeCount 27.14 }; 27.15 enum MetaspaceType { 27.16 StandardMetaspaceType, 27.17 @@ -103,6 +104,9 @@ 27.18 private: 27.19 void initialize(Mutex* lock, MetaspaceType type); 27.20 27.21 + // Get the first chunk for a Metaspace. Used for 27.22 + // special cases such as the boot class loader, reflection 27.23 + // class loader and anonymous class loader. 27.24 Metachunk* get_initialization_chunk(MetadataType mdtype, 27.25 size_t chunk_word_size, 27.26 size_t chunk_bunch); 27.27 @@ -123,6 +127,9 @@ 27.28 static size_t _first_chunk_word_size; 27.29 static size_t _first_class_chunk_word_size; 27.30 27.31 + static size_t _commit_alignment; 27.32 + static size_t _reserve_alignment; 27.33 + 27.34 SpaceManager* _vsm; 27.35 SpaceManager* vsm() const { return _vsm; } 27.36 27.37 @@ -191,12 +198,17 @@ 27.38 Metaspace(Mutex* lock, MetaspaceType type); 27.39 ~Metaspace(); 27.40 27.41 - // Initialize globals for Metaspace 27.42 + static void ergo_initialize(); 27.43 static void global_initialize(); 27.44 27.45 static size_t first_chunk_word_size() { return _first_chunk_word_size; } 27.46 static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } 27.47 27.48 + static size_t reserve_alignment() { return _reserve_alignment; } 27.49 + static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; } 27.50 + static size_t commit_alignment() { return _commit_alignment; } 27.51 + static size_t commit_alignment_words() { return _commit_alignment / BytesPerWord; } 27.52 + 27.53 char* bottom() const; 27.54 size_t used_words_slow(MetadataType mdtype) const; 27.55 size_t free_words_slow(MetadataType mdtype) const; 27.56 @@ -219,6 +231,9 @@ 27.57 static void purge(MetadataType mdtype); 27.58 static void purge(); 27.59 27.60 + static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, 27.61 + MetadataType mdtype, TRAPS); 27.62 + 27.63 void print_on(outputStream* st) const; 27.64 // Debugging support 27.65 void verify(); 27.66 @@ -352,17 +367,10 @@ 27.67 27.68 class MetaspaceGC : AllStatic { 27.69 27.70 - // The current high-water-mark for inducing a GC. When 27.71 - // the capacity of all space in the virtual lists reaches this value, 27.72 - // a GC is induced and the value is increased. This should be changed 27.73 - // to the space actually used for allocations to avoid affects of 27.74 - // fragmentation losses to partially used chunks. Size is in words. 27.75 - static size_t _capacity_until_GC; 27.76 - 27.77 - // After a GC is done any allocation that fails should try to expand 27.78 - // the capacity of the Metaspaces. This flag is set during attempts 27.79 - // to allocate in the VMGCOperation that does the GC. 27.80 - static bool _expand_after_GC; 27.81 + // The current high-water-mark for inducing a GC. 27.82 + // When committed memory of all metaspaces reaches this value, 27.83 + // a GC is induced and the value is increased. Size is in bytes. 27.84 + static volatile intptr_t _capacity_until_GC; 27.85 27.86 // For a CMS collection, signal that a concurrent collection should 27.87 // be started. 27.88 @@ -370,20 +378,16 @@ 27.89 27.90 static uint _shrink_factor; 27.91 27.92 - static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; } 27.93 - 27.94 static size_t shrink_factor() { return _shrink_factor; } 27.95 void set_shrink_factor(uint v) { _shrink_factor = v; } 27.96 27.97 public: 27.98 27.99 - static size_t capacity_until_GC() { return _capacity_until_GC; } 27.100 - static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; } 27.101 - static void dec_capacity_until_GC(size_t v) { 27.102 - _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0; 27.103 - } 27.104 - static bool expand_after_GC() { return _expand_after_GC; } 27.105 - static void set_expand_after_GC(bool v) { _expand_after_GC = v; } 27.106 + static void initialize() { _capacity_until_GC = MetaspaceSize; } 27.107 + 27.108 + static size_t capacity_until_GC(); 27.109 + static size_t inc_capacity_until_GC(size_t v); 27.110 + static size_t dec_capacity_until_GC(size_t v); 27.111 27.112 static bool should_concurrent_collect() { return _should_concurrent_collect; } 27.113 static void set_should_concurrent_collect(bool v) { 27.114 @@ -391,11 +395,14 @@ 27.115 } 27.116 27.117 // The amount to increase the high-water-mark (_capacity_until_GC) 27.118 - static size_t delta_capacity_until_GC(size_t word_size); 27.119 + static size_t delta_capacity_until_GC(size_t bytes); 27.120 27.121 - // It is expected that this will be called when the current capacity 27.122 - // has been used and a GC should be considered. 27.123 - static bool should_expand(VirtualSpaceList* vsl, size_t word_size); 27.124 + // Tells if we have can expand metaspace without hitting set limits. 27.125 + static bool can_expand(size_t words, bool is_class); 27.126 + 27.127 + // Returns amount that we can expand without hitting a GC, 27.128 + // measured in words. 27.129 + static size_t allowed_expansion(); 27.130 27.131 // Calculate the new high-water mark at which to induce 27.132 // a GC.
28.1 --- a/src/share/vm/opto/graphKit.cpp Fri Oct 11 17:08:22 2013 -0400 28.2 +++ b/src/share/vm/opto/graphKit.cpp Fri Oct 11 22:22:19 2013 -0400 28.3 @@ -3713,7 +3713,8 @@ 28.4 Node* no_base = __ top(); 28.5 float likely = PROB_LIKELY(0.999); 28.6 float unlikely = PROB_UNLIKELY(0.999); 28.7 - Node* zero = __ ConI(0); 28.8 + Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val()); 28.9 + Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val()); 28.10 Node* zeroX = __ ConX(0); 28.11 28.12 // Get the alias_index for raw card-mark memory 28.13 @@ -3769,8 +3770,16 @@ 28.14 // load the original value of the card 28.15 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 28.16 28.17 - __ if_then(card_val, BoolTest::ne, zero); { 28.18 - g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); 28.19 + __ if_then(card_val, BoolTest::ne, young_card); { 28.20 + sync_kit(ideal); 28.21 + // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier. 28.22 + insert_mem_bar(Op_MemBarVolatile, oop_store); 28.23 + __ sync_kit(this); 28.24 + 28.25 + Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 28.26 + __ if_then(card_val_reload, BoolTest::ne, dirty_card); { 28.27 + g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); 28.28 + } __ end_if(); 28.29 } __ end_if(); 28.30 } __ end_if(); 28.31 } __ end_if();
29.1 --- a/src/share/vm/runtime/arguments.cpp Fri Oct 11 17:08:22 2013 -0400 29.2 +++ b/src/share/vm/runtime/arguments.cpp Fri Oct 11 22:22:19 2013 -0400 29.3 @@ -2657,16 +2657,16 @@ 29.4 FLAG_SET_CMDLINE(bool, BackgroundCompilation, false); 29.5 // -Xmn for compatibility with other JVM vendors 29.6 } else if (match_option(option, "-Xmn", &tail)) { 29.7 - julong long_initial_eden_size = 0; 29.8 - ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1); 29.9 + julong long_initial_young_size = 0; 29.10 + ArgsRange errcode = parse_memory_size(tail, &long_initial_young_size, 1); 29.11 if (errcode != arg_in_range) { 29.12 jio_fprintf(defaultStream::error_stream(), 29.13 - "Invalid initial eden size: %s\n", option->optionString); 29.14 + "Invalid initial young generation size: %s\n", option->optionString); 29.15 describe_range_error(errcode); 29.16 return JNI_EINVAL; 29.17 } 29.18 - FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_eden_size); 29.19 - FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_eden_size); 29.20 + FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_young_size); 29.21 + FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_young_size); 29.22 // -Xms 29.23 } else if (match_option(option, "-Xms", &tail)) { 29.24 julong long_initial_heap_size = 0; 29.25 @@ -3666,6 +3666,9 @@ 29.26 assert(verify_serial_gc_flags(), "SerialGC unset"); 29.27 #endif // INCLUDE_ALL_GCS 29.28 29.29 + // Initialize Metaspace flags and alignments. 29.30 + Metaspace::ergo_initialize(); 29.31 + 29.32 // Set bytecode rewriting flags 29.33 set_bytecode_flags(); 29.34
30.1 --- a/src/share/vm/runtime/globals.hpp Fri Oct 11 17:08:22 2013 -0400 30.2 +++ b/src/share/vm/runtime/globals.hpp Fri Oct 11 22:22:19 2013 -0400 30.3 @@ -481,21 +481,21 @@ 30.4 #define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \ 30.5 \ 30.6 lp64_product(bool, UseCompressedOops, false, \ 30.7 - "Use 32-bit object references in 64-bit VM " \ 30.8 - "lp64_product means flag is always constant in 32 bit VM") \ 30.9 + "Use 32-bit object references in 64-bit VM. " \ 30.10 + "lp64_product means flag is always constant in 32 bit VM") \ 30.11 \ 30.12 lp64_product(bool, UseCompressedClassPointers, false, \ 30.13 - "Use 32-bit class pointers in 64-bit VM " \ 30.14 - "lp64_product means flag is always constant in 32 bit VM") \ 30.15 + "Use 32-bit class pointers in 64-bit VM. " \ 30.16 + "lp64_product means flag is always constant in 32 bit VM") \ 30.17 \ 30.18 notproduct(bool, CheckCompressedOops, true, \ 30.19 - "generate checks in encoding/decoding code in debug VM") \ 30.20 + "Generate checks in encoding/decoding code in debug VM") \ 30.21 \ 30.22 product_pd(uintx, HeapBaseMinAddress, \ 30.23 - "OS specific low limit for heap base address") \ 30.24 + "OS specific low limit for heap base address") \ 30.25 \ 30.26 diagnostic(bool, PrintCompressedOopsMode, false, \ 30.27 - "Print compressed oops base address and encoding mode") \ 30.28 + "Print compressed oops base address and encoding mode") \ 30.29 \ 30.30 lp64_product(intx, ObjectAlignmentInBytes, 8, \ 30.31 "Default object alignment in bytes, 8 is minimum") \ 30.32 @@ -517,7 +517,7 @@ 30.33 "Use lwsync instruction if true, else use slower sync") \ 30.34 \ 30.35 develop(bool, CleanChunkPoolAsync, falseInEmbedded, \ 30.36 - "Whether to clean the chunk pool asynchronously") \ 30.37 + "Clean the chunk pool asynchronously") \ 30.38 \ 30.39 /* Temporary: See 6948537 */ \ 30.40 experimental(bool, UseMemSetInBOT, true, \ 30.41 @@ -527,10 +527,12 @@ 30.42 "Enable normal processing of flags relating to field diagnostics")\ 30.43 \ 30.44 experimental(bool, UnlockExperimentalVMOptions, false, \ 30.45 - "Enable normal processing of flags relating to experimental features")\ 30.46 + "Enable normal processing of flags relating to experimental " \ 30.47 + "features") \ 30.48 \ 30.49 product(bool, JavaMonitorsInStackTrace, true, \ 30.50 - "Print info. about Java monitor locks when the stacks are dumped")\ 30.51 + "Print information about Java monitor locks when the stacks are" \ 30.52 + "dumped") \ 30.53 \ 30.54 product_pd(bool, UseLargePages, \ 30.55 "Use large page memory") \ 30.56 @@ -541,8 +543,12 @@ 30.57 develop(bool, LargePagesIndividualAllocationInjectError, false, \ 30.58 "Fail large pages individual allocation") \ 30.59 \ 30.60 + product(bool, UseLargePagesInMetaspace, false, \ 30.61 + "Use large page memory in metaspace. " \ 30.62 + "Only used if UseLargePages is enabled.") \ 30.63 + \ 30.64 develop(bool, TracePageSizes, false, \ 30.65 - "Trace page size selection and usage.") \ 30.66 + "Trace page size selection and usage") \ 30.67 \ 30.68 product(bool, UseNUMA, false, \ 30.69 "Use NUMA if available") \ 30.70 @@ -557,12 +563,12 @@ 30.71 "Force NUMA optimizations on single-node/UMA systems") \ 30.72 \ 30.73 product(uintx, NUMAChunkResizeWeight, 20, \ 30.74 - "Percentage (0-100) used to weigh the current sample when " \ 30.75 + "Percentage (0-100) used to weigh the current sample when " \ 30.76 "computing exponentially decaying average for " \ 30.77 "AdaptiveNUMAChunkSizing") \ 30.78 \ 30.79 product(uintx, NUMASpaceResizeRate, 1*G, \ 30.80 - "Do not reallocate more that this amount per collection") \ 30.81 + "Do not reallocate more than this amount per collection") \ 30.82 \ 30.83 product(bool, UseAdaptiveNUMAChunkSizing, true, \ 30.84 "Enable adaptive chunk sizing for NUMA") \ 30.85 @@ -579,17 +585,17 @@ 30.86 product(intx, UseSSE, 99, \ 30.87 "Highest supported SSE instructions set on x86/x64") \ 30.88 \ 30.89 - product(bool, UseAES, false, \ 30.90 + product(bool, UseAES, false, \ 30.91 "Control whether AES instructions can be used on x86/x64") \ 30.92 \ 30.93 product(uintx, LargePageSizeInBytes, 0, \ 30.94 - "Large page size (0 to let VM choose the page size") \ 30.95 + "Large page size (0 to let VM choose the page size)") \ 30.96 \ 30.97 product(uintx, LargePageHeapSizeThreshold, 128*M, \ 30.98 - "Use large pages if max heap is at least this big") \ 30.99 + "Use large pages if maximum heap is at least this big") \ 30.100 \ 30.101 product(bool, ForceTimeHighResolution, false, \ 30.102 - "Using high time resolution(For Win32 only)") \ 30.103 + "Using high time resolution (for Win32 only)") \ 30.104 \ 30.105 develop(bool, TraceItables, false, \ 30.106 "Trace initialization and use of itables") \ 30.107 @@ -605,10 +611,10 @@ 30.108 \ 30.109 develop(bool, TraceLongCompiles, false, \ 30.110 "Print out every time compilation is longer than " \ 30.111 - "a given threashold") \ 30.112 + "a given threshold") \ 30.113 \ 30.114 develop(bool, SafepointALot, false, \ 30.115 - "Generates a lot of safepoints. Works with " \ 30.116 + "Generate a lot of safepoints. This works with " \ 30.117 "GuaranteedSafepointInterval") \ 30.118 \ 30.119 product_pd(bool, BackgroundCompilation, \ 30.120 @@ -616,13 +622,13 @@ 30.121 "compilation") \ 30.122 \ 30.123 product(bool, PrintVMQWaitTime, false, \ 30.124 - "Prints out the waiting time in VM operation queue") \ 30.125 + "Print out the waiting time in VM operation queue") \ 30.126 \ 30.127 develop(bool, NoYieldsInMicrolock, false, \ 30.128 "Disable yields in microlock") \ 30.129 \ 30.130 develop(bool, TraceOopMapGeneration, false, \ 30.131 - "Shows oopmap generation") \ 30.132 + "Show OopMapGeneration") \ 30.133 \ 30.134 product(bool, MethodFlushing, true, \ 30.135 "Reclamation of zombie and not-entrant methods") \ 30.136 @@ -631,10 +637,11 @@ 30.137 "Verify stack of each thread when it is entering a runtime call") \ 30.138 \ 30.139 diagnostic(bool, ForceUnreachable, false, \ 30.140 - "Make all non code cache addresses to be unreachable with forcing use of 64bit literal fixups") \ 30.141 + "Make all non code cache addresses to be unreachable by " \ 30.142 + "forcing use of 64bit literal fixups") \ 30.143 \ 30.144 notproduct(bool, StressDerivedPointers, false, \ 30.145 - "Force scavenge when a derived pointers is detected on stack " \ 30.146 + "Force scavenge when a derived pointer is detected on stack " \ 30.147 "after rtm call") \ 30.148 \ 30.149 develop(bool, TraceDerivedPointers, false, \ 30.150 @@ -653,86 +660,86 @@ 30.151 "Use Inline Caches for virtual calls ") \ 30.152 \ 30.153 develop(bool, InlineArrayCopy, true, \ 30.154 - "inline arraycopy native that is known to be part of " \ 30.155 + "Inline arraycopy native that is known to be part of " \ 30.156 "base library DLL") \ 30.157 \ 30.158 develop(bool, InlineObjectHash, true, \ 30.159 - "inline Object::hashCode() native that is known to be part " \ 30.160 + "Inline Object::hashCode() native that is known to be part " \ 30.161 "of base library DLL") \ 30.162 \ 30.163 develop(bool, InlineNatives, true, \ 30.164 - "inline natives that are known to be part of base library DLL") \ 30.165 + "Inline natives that are known to be part of base library DLL") \ 30.166 \ 30.167 develop(bool, InlineMathNatives, true, \ 30.168 - "inline SinD, CosD, etc.") \ 30.169 + "Inline SinD, CosD, etc.") \ 30.170 \ 30.171 develop(bool, InlineClassNatives, true, \ 30.172 - "inline Class.isInstance, etc") \ 30.173 + "Inline Class.isInstance, etc") \ 30.174 \ 30.175 develop(bool, InlineThreadNatives, true, \ 30.176 - "inline Thread.currentThread, etc") \ 30.177 + "Inline Thread.currentThread, etc") \ 30.178 \ 30.179 develop(bool, InlineUnsafeOps, true, \ 30.180 - "inline memory ops (native methods) from sun.misc.Unsafe") \ 30.181 + "Inline memory ops (native methods) from sun.misc.Unsafe") \ 30.182 \ 30.183 product(bool, CriticalJNINatives, true, \ 30.184 - "check for critical JNI entry points") \ 30.185 + "Check for critical JNI entry points") \ 30.186 \ 30.187 notproduct(bool, StressCriticalJNINatives, false, \ 30.188 - "Exercise register saving code in critical natives") \ 30.189 + "Exercise register saving code in critical natives") \ 30.190 \ 30.191 product(bool, UseSSE42Intrinsics, false, \ 30.192 "SSE4.2 versions of intrinsics") \ 30.193 \ 30.194 product(bool, UseAESIntrinsics, false, \ 30.195 - "use intrinsics for AES versions of crypto") \ 30.196 + "Use intrinsics for AES versions of crypto") \ 30.197 \ 30.198 product(bool, UseCRC32Intrinsics, false, \ 30.199 "use intrinsics for java.util.zip.CRC32") \ 30.200 \ 30.201 develop(bool, TraceCallFixup, false, \ 30.202 - "traces all call fixups") \ 30.203 + "Trace all call fixups") \ 30.204 \ 30.205 develop(bool, DeoptimizeALot, false, \ 30.206 - "deoptimize at every exit from the runtime system") \ 30.207 + "Deoptimize at every exit from the runtime system") \ 30.208 \ 30.209 notproduct(ccstrlist, DeoptimizeOnlyAt, "", \ 30.210 - "a comma separated list of bcis to deoptimize at") \ 30.211 + "A comma separated list of bcis to deoptimize at") \ 30.212 \ 30.213 product(bool, DeoptimizeRandom, false, \ 30.214 - "deoptimize random frames on random exit from the runtime system")\ 30.215 + "Deoptimize random frames on random exit from the runtime system")\ 30.216 \ 30.217 notproduct(bool, ZombieALot, false, \ 30.218 - "creates zombies (non-entrant) at exit from the runt. system") \ 30.219 + "Create zombies (non-entrant) at exit from the runtime system") \ 30.220 \ 30.221 product(bool, UnlinkSymbolsALot, false, \ 30.222 - "unlink unreferenced symbols from the symbol table at safepoints")\ 30.223 + "Unlink unreferenced symbols from the symbol table at safepoints")\ 30.224 \ 30.225 notproduct(bool, WalkStackALot, false, \ 30.226 - "trace stack (no print) at every exit from the runtime system") \ 30.227 + "Trace stack (no print) at every exit from the runtime system") \ 30.228 \ 30.229 product(bool, Debugging, false, \ 30.230 - "set when executing debug methods in debug.ccp " \ 30.231 + "Set when executing debug methods in debug.cpp " \ 30.232 "(to prevent triggering assertions)") \ 30.233 \ 30.234 notproduct(bool, StrictSafepointChecks, trueInDebug, \ 30.235 "Enable strict checks that safepoints cannot happen for threads " \ 30.236 - "that used No_Safepoint_Verifier") \ 30.237 + "that use No_Safepoint_Verifier") \ 30.238 \ 30.239 notproduct(bool, VerifyLastFrame, false, \ 30.240 "Verify oops on last frame on entry to VM") \ 30.241 \ 30.242 develop(bool, TraceHandleAllocation, false, \ 30.243 - "Prints out warnings when suspicious many handles are allocated") \ 30.244 + "Print out warnings when suspiciously many handles are allocated")\ 30.245 \ 30.246 product(bool, UseCompilerSafepoints, true, \ 30.247 "Stop at safepoints in compiled code") \ 30.248 \ 30.249 product(bool, FailOverToOldVerifier, true, \ 30.250 - "fail over to old verifier when split verifier fails") \ 30.251 + "Fail over to old verifier when split verifier fails") \ 30.252 \ 30.253 develop(bool, ShowSafepointMsgs, false, \ 30.254 - "Show msg. about safepoint synch.") \ 30.255 + "Show message about safepoint synchronization") \ 30.256 \ 30.257 product(bool, SafepointTimeout, false, \ 30.258 "Time out and warn or fail after SafepointTimeoutDelay " \ 30.259 @@ -756,19 +763,19 @@ 30.260 "Trace external suspend wait failures") \ 30.261 \ 30.262 product(bool, MaxFDLimit, true, \ 30.263 - "Bump the number of file descriptors to max in solaris.") \ 30.264 + "Bump the number of file descriptors to maximum in Solaris") \ 30.265 \ 30.266 diagnostic(bool, LogEvents, true, \ 30.267 - "Enable the various ring buffer event logs") \ 30.268 + "Enable the various ring buffer event logs") \ 30.269 \ 30.270 diagnostic(uintx, LogEventsBufferEntries, 10, \ 30.271 - "Enable the various ring buffer event logs") \ 30.272 + "Number of ring buffer event logs") \ 30.273 \ 30.274 product(bool, BytecodeVerificationRemote, true, \ 30.275 - "Enables the Java bytecode verifier for remote classes") \ 30.276 + "Enable the Java bytecode verifier for remote classes") \ 30.277 \ 30.278 product(bool, BytecodeVerificationLocal, false, \ 30.279 - "Enables the Java bytecode verifier for local classes") \ 30.280 + "Enable the Java bytecode verifier for local classes") \ 30.281 \ 30.282 develop(bool, ForceFloatExceptions, trueInDebug, \ 30.283 "Force exceptions on FP stack under/overflow") \ 30.284 @@ -780,7 +787,7 @@ 30.285 "Trace java language assertions") \ 30.286 \ 30.287 notproduct(bool, CheckAssertionStatusDirectives, false, \ 30.288 - "temporary - see javaClasses.cpp") \ 30.289 + "Temporary - see javaClasses.cpp") \ 30.290 \ 30.291 notproduct(bool, PrintMallocFree, false, \ 30.292 "Trace calls to C heap malloc/free allocation") \ 30.293 @@ -799,16 +806,16 @@ 30.294 "entering the VM") \ 30.295 \ 30.296 notproduct(bool, CheckOopishValues, false, \ 30.297 - "Warn if value contains oop ( requires ZapDeadLocals)") \ 30.298 + "Warn if value contains oop (requires ZapDeadLocals)") \ 30.299 \ 30.300 develop(bool, UseMallocOnly, false, \ 30.301 - "use only malloc/free for allocation (no resource area/arena)") \ 30.302 + "Use only malloc/free for allocation (no resource area/arena)") \ 30.303 \ 30.304 develop(bool, PrintMalloc, false, \ 30.305 - "print all malloc/free calls") \ 30.306 + "Print all malloc/free calls") \ 30.307 \ 30.308 develop(bool, PrintMallocStatistics, false, \ 30.309 - "print malloc/free statistics") \ 30.310 + "Print malloc/free statistics") \ 30.311 \ 30.312 develop(bool, ZapResourceArea, trueInDebug, \ 30.313 "Zap freed resource/arena space with 0xABABABAB") \ 30.314 @@ -820,7 +827,7 @@ 30.315 "Zap freed JNI handle space with 0xFEFEFEFE") \ 30.316 \ 30.317 notproduct(bool, ZapStackSegments, trueInDebug, \ 30.318 - "Zap allocated/freed Stack segments with 0xFADFADED") \ 30.319 + "Zap allocated/freed stack segments with 0xFADFADED") \ 30.320 \ 30.321 develop(bool, ZapUnusedHeapArea, trueInDebug, \ 30.322 "Zap unused heap space with 0xBAADBABE") \ 30.323 @@ -835,7 +842,7 @@ 30.324 "Zap filler objects with 0xDEAFBABE") \ 30.325 \ 30.326 develop(bool, PrintVMMessages, true, \ 30.327 - "Print vm messages on console") \ 30.328 + "Print VM messages on console") \ 30.329 \ 30.330 product(bool, PrintGCApplicationConcurrentTime, false, \ 30.331 "Print the time the application has been running") \ 30.332 @@ -844,21 +851,21 @@ 30.333 "Print the time the application has been stopped") \ 30.334 \ 30.335 diagnostic(bool, VerboseVerification, false, \ 30.336 - "Display detailed verification details") \ 30.337 + "Display detailed verification details") \ 30.338 \ 30.339 notproduct(uintx, ErrorHandlerTest, 0, \ 30.340 - "If > 0, provokes an error after VM initialization; the value" \ 30.341 - "determines which error to provoke. See test_error_handler()" \ 30.342 + "If > 0, provokes an error after VM initialization; the value " \ 30.343 + "determines which error to provoke. See test_error_handler() " \ 30.344 "in debug.cpp.") \ 30.345 \ 30.346 develop(bool, Verbose, false, \ 30.347 - "Prints additional debugging information from other modes") \ 30.348 + "Print additional debugging information from other modes") \ 30.349 \ 30.350 develop(bool, PrintMiscellaneous, false, \ 30.351 - "Prints uncategorized debugging information (requires +Verbose)") \ 30.352 + "Print uncategorized debugging information (requires +Verbose)") \ 30.353 \ 30.354 develop(bool, WizardMode, false, \ 30.355 - "Prints much more debugging information") \ 30.356 + "Print much more debugging information") \ 30.357 \ 30.358 product(bool, ShowMessageBoxOnError, false, \ 30.359 "Keep process alive on VM fatal error") \ 30.360 @@ -870,7 +877,7 @@ 30.361 "Let VM fatal error propagate to the OS (ie. WER on Windows)") \ 30.362 \ 30.363 product(bool, SuppressFatalErrorMessage, false, \ 30.364 - "Do NO Fatal Error report [Avoid deadlock]") \ 30.365 + "Report NO fatal error message (avoid deadlock)") \ 30.366 \ 30.367 product(ccstrlist, OnError, "", \ 30.368 "Run user-defined commands on fatal error; see VMError.cpp " \ 30.369 @@ -880,17 +887,17 @@ 30.370 "Run user-defined commands on first java.lang.OutOfMemoryError") \ 30.371 \ 30.372 manageable(bool, HeapDumpBeforeFullGC, false, \ 30.373 - "Dump heap to file before any major stop-world GC") \ 30.374 + "Dump heap to file before any major stop-the-world GC") \ 30.375 \ 30.376 manageable(bool, HeapDumpAfterFullGC, false, \ 30.377 - "Dump heap to file after any major stop-world GC") \ 30.378 + "Dump heap to file after any major stop-the-world GC") \ 30.379 \ 30.380 manageable(bool, HeapDumpOnOutOfMemoryError, false, \ 30.381 "Dump heap to file when java.lang.OutOfMemoryError is thrown") \ 30.382 \ 30.383 manageable(ccstr, HeapDumpPath, NULL, \ 30.384 - "When HeapDumpOnOutOfMemoryError is on, the path (filename or" \ 30.385 - "directory) of the dump file (defaults to java_pid<pid>.hprof" \ 30.386 + "When HeapDumpOnOutOfMemoryError is on, the path (filename or " \ 30.387 + "directory) of the dump file (defaults to java_pid<pid>.hprof " \ 30.388 "in the working directory)") \ 30.389 \ 30.390 develop(uintx, SegmentedHeapDumpThreshold, 2*G, \ 30.391 @@ -904,10 +911,10 @@ 30.392 "Execute breakpoint upon encountering VM warning") \ 30.393 \ 30.394 develop(bool, TraceVMOperation, false, \ 30.395 - "Trace vm operations") \ 30.396 + "Trace VM operations") \ 30.397 \ 30.398 develop(bool, UseFakeTimers, false, \ 30.399 - "Tells whether the VM should use system time or a fake timer") \ 30.400 + "Tell whether the VM should use system time or a fake timer") \ 30.401 \ 30.402 product(ccstr, NativeMemoryTracking, "off", \ 30.403 "Native memory tracking options") \ 30.404 @@ -917,7 +924,7 @@ 30.405 \ 30.406 diagnostic(bool, AutoShutdownNMT, true, \ 30.407 "Automatically shutdown native memory tracking under stress " \ 30.408 - "situation. When set to false, native memory tracking tries to " \ 30.409 + "situations. When set to false, native memory tracking tries to " \ 30.410 "stay alive at the expense of JVM performance") \ 30.411 \ 30.412 diagnostic(bool, LogCompilation, false, \ 30.413 @@ -927,12 +934,12 @@ 30.414 "Print compilations") \ 30.415 \ 30.416 diagnostic(bool, TraceNMethodInstalls, false, \ 30.417 - "Trace nmethod intallation") \ 30.418 + "Trace nmethod installation") \ 30.419 \ 30.420 diagnostic(intx, ScavengeRootsInCode, 2, \ 30.421 - "0: do not allow scavengable oops in the code cache; " \ 30.422 - "1: allow scavenging from the code cache; " \ 30.423 - "2: emit as many constants as the compiler can see") \ 30.424 + "0: do not allow scavengable oops in the code cache; " \ 30.425 + "1: allow scavenging from the code cache; " \ 30.426 + "2: emit as many constants as the compiler can see") \ 30.427 \ 30.428 product(bool, AlwaysRestoreFPU, false, \ 30.429 "Restore the FPU control word after every JNI call (expensive)") \ 30.430 @@ -953,7 +960,7 @@ 30.431 "Print assembly code (using external disassembler.so)") \ 30.432 \ 30.433 diagnostic(ccstr, PrintAssemblyOptions, NULL, \ 30.434 - "Options string passed to disassembler.so") \ 30.435 + "Print options string passed to disassembler.so") \ 30.436 \ 30.437 diagnostic(bool, PrintNMethods, false, \ 30.438 "Print assembly code for nmethods when generated") \ 30.439 @@ -974,20 +981,21 @@ 30.440 "Print exception handler tables for all nmethods when generated") \ 30.441 \ 30.442 develop(bool, StressCompiledExceptionHandlers, false, \ 30.443 - "Exercise compiled exception handlers") \ 30.444 + "Exercise compiled exception handlers") \ 30.445 \ 30.446 develop(bool, InterceptOSException, false, \ 30.447 - "Starts debugger when an implicit OS (e.g., NULL) " \ 30.448 + "Start debugger when an implicit OS (e.g. NULL) " \ 30.449 "exception happens") \ 30.450 \ 30.451 product(bool, PrintCodeCache, false, \ 30.452 "Print the code cache memory usage when exiting") \ 30.453 \ 30.454 develop(bool, PrintCodeCache2, false, \ 30.455 - "Print detailed usage info on the code cache when exiting") \ 30.456 + "Print detailed usage information on the code cache when exiting")\ 30.457 \ 30.458 product(bool, PrintCodeCacheOnCompilation, false, \ 30.459 - "Print the code cache memory usage each time a method is compiled") \ 30.460 + "Print the code cache memory usage each time a method is " \ 30.461 + "compiled") \ 30.462 \ 30.463 diagnostic(bool, PrintStubCode, false, \ 30.464 "Print generated stub code") \ 30.465 @@ -999,40 +1007,40 @@ 30.466 "Omit backtraces for some 'hot' exceptions in optimized code") \ 30.467 \ 30.468 product(bool, ProfilerPrintByteCodeStatistics, false, \ 30.469 - "Prints byte code statictics when dumping profiler output") \ 30.470 + "Print bytecode statistics when dumping profiler output") \ 30.471 \ 30.472 product(bool, ProfilerRecordPC, false, \ 30.473 - "Collects tick for each 16 byte interval of compiled code") \ 30.474 + "Collect ticks for each 16 byte interval of compiled code") \ 30.475 \ 30.476 product(bool, ProfileVM, false, \ 30.477 - "Profiles ticks that fall within VM (either in the VM Thread " \ 30.478 + "Profile ticks that fall within VM (either in the VM Thread " \ 30.479 "or VM code called through stubs)") \ 30.480 \ 30.481 product(bool, ProfileIntervals, false, \ 30.482 - "Prints profiles for each interval (see ProfileIntervalsTicks)") \ 30.483 + "Print profiles for each interval (see ProfileIntervalsTicks)") \ 30.484 \ 30.485 notproduct(bool, ProfilerCheckIntervals, false, \ 30.486 - "Collect and print info on spacing of profiler ticks") \ 30.487 + "Collect and print information on spacing of profiler ticks") \ 30.488 \ 30.489 develop(bool, PrintJVMWarnings, false, \ 30.490 - "Prints warnings for unimplemented JVM functions") \ 30.491 + "Print warnings for unimplemented JVM functions") \ 30.492 \ 30.493 product(bool, PrintWarnings, true, \ 30.494 - "Prints JVM warnings to output stream") \ 30.495 + "Print JVM warnings to output stream") \ 30.496 \ 30.497 notproduct(uintx, WarnOnStalledSpinLock, 0, \ 30.498 - "Prints warnings for stalled SpinLocks") \ 30.499 + "Print warnings for stalled SpinLocks") \ 30.500 \ 30.501 product(bool, RegisterFinalizersAtInit, true, \ 30.502 "Register finalizable objects at end of Object.<init> or " \ 30.503 "after allocation") \ 30.504 \ 30.505 develop(bool, RegisterReferences, true, \ 30.506 - "Tells whether the VM should register soft/weak/final/phantom " \ 30.507 + "Tell whether the VM should register soft/weak/final/phantom " \ 30.508 "references") \ 30.509 \ 30.510 develop(bool, IgnoreRewrites, false, \ 30.511 - "Supress rewrites of bytecodes in the oopmap generator. " \ 30.512 + "Suppress rewrites of bytecodes in the oopmap generator. " \ 30.513 "This is unsafe!") \ 30.514 \ 30.515 develop(bool, PrintCodeCacheExtension, false, \ 30.516 @@ -1042,8 +1050,7 @@ 30.517 "Enable the security JVM functions") \ 30.518 \ 30.519 develop(bool, ProtectionDomainVerification, true, \ 30.520 - "Verifies protection domain before resolution in system " \ 30.521 - "dictionary") \ 30.522 + "Verify protection domain before resolution in system dictionary")\ 30.523 \ 30.524 product(bool, ClassUnloading, true, \ 30.525 "Do unloading of classes") \ 30.526 @@ -1056,14 +1063,14 @@ 30.527 "Write memory usage profiling to log file") \ 30.528 \ 30.529 notproduct(bool, PrintSystemDictionaryAtExit, false, \ 30.530 - "Prints the system dictionary at exit") \ 30.531 + "Print the system dictionary at exit") \ 30.532 \ 30.533 experimental(intx, PredictedLoadedClassCount, 0, \ 30.534 - "Experimental: Tune loaded class cache starting size.") \ 30.535 + "Experimental: Tune loaded class cache starting size") \ 30.536 \ 30.537 diagnostic(bool, UnsyncloadClass, false, \ 30.538 "Unstable: VM calls loadClass unsynchronized. Custom " \ 30.539 - "class loader must call VM synchronized for findClass " \ 30.540 + "class loader must call VM synchronized for findClass " \ 30.541 "and defineClass.") \ 30.542 \ 30.543 product(bool, AlwaysLockClassLoader, false, \ 30.544 @@ -1079,22 +1086,22 @@ 30.545 "Call loadClassInternal() rather than loadClass()") \ 30.546 \ 30.547 product_pd(bool, DontYieldALot, \ 30.548 - "Throw away obvious excess yield calls (for SOLARIS only)") \ 30.549 + "Throw away obvious excess yield calls (for Solaris only)") \ 30.550 \ 30.551 product_pd(bool, ConvertSleepToYield, \ 30.552 - "Converts sleep(0) to thread yield " \ 30.553 - "(may be off for SOLARIS to improve GUI)") \ 30.554 + "Convert sleep(0) to thread yield " \ 30.555 + "(may be off for Solaris to improve GUI)") \ 30.556 \ 30.557 product(bool, ConvertYieldToSleep, false, \ 30.558 - "Converts yield to a sleep of MinSleepInterval to simulate Win32 "\ 30.559 - "behavior (SOLARIS only)") \ 30.560 + "Convert yield to a sleep of MinSleepInterval to simulate Win32 " \ 30.561 + "behavior (Solaris only)") \ 30.562 \ 30.563 product(bool, UseBoundThreads, true, \ 30.564 - "Bind user level threads to kernel threads (for SOLARIS only)") \ 30.565 + "Bind user level threads to kernel threads (for Solaris only)") \ 30.566 \ 30.567 develop(bool, UseDetachedThreads, true, \ 30.568 "Use detached threads that are recycled upon termination " \ 30.569 - "(for SOLARIS only)") \ 30.570 + "(for Solaris only)") \ 30.571 \ 30.572 product(bool, UseLWPSynchronization, true, \ 30.573 "Use LWP-based instead of libthread-based synchronization " \ 30.574 @@ -1104,41 +1111,43 @@ 30.575 "(Unstable) Various monitor synchronization tunables") \ 30.576 \ 30.577 product(intx, EmitSync, 0, \ 30.578 - "(Unsafe,Unstable) " \ 30.579 - " Controls emission of inline sync fast-path code") \ 30.580 + "(Unsafe, Unstable) " \ 30.581 + "Control emission of inline sync fast-path code") \ 30.582 \ 30.583 product(intx, MonitorBound, 0, "Bound Monitor population") \ 30.584 \ 30.585 product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \ 30.586 \ 30.587 - product(intx, SyncFlags, 0, "(Unsafe,Unstable) Experimental Sync flags" ) \ 30.588 - \ 30.589 - product(intx, SyncVerbose, 0, "(Unstable)" ) \ 30.590 - \ 30.591 - product(intx, ClearFPUAtPark, 0, "(Unsafe,Unstable)" ) \ 30.592 + product(intx, SyncFlags, 0, "(Unsafe, Unstable) Experimental Sync flags") \ 30.593 + \ 30.594 + product(intx, SyncVerbose, 0, "(Unstable)") \ 30.595 + \ 30.596 + product(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)") \ 30.597 \ 30.598 product(intx, hashCode, 5, \ 30.599 - "(Unstable) select hashCode generation algorithm" ) \ 30.600 + "(Unstable) select hashCode generation algorithm") \ 30.601 \ 30.602 product(intx, WorkAroundNPTLTimedWaitHang, 1, \ 30.603 - "(Unstable, Linux-specific)" \ 30.604 - " avoid NPTL-FUTEX hang pthread_cond_timedwait" ) \ 30.605 + "(Unstable, Linux-specific) " \ 30.606 + "avoid NPTL-FUTEX hang pthread_cond_timedwait") \ 30.607 \ 30.608 product(bool, FilterSpuriousWakeups, true, \ 30.609 "Prevent spurious or premature wakeups from object.wait " \ 30.610 "(Solaris only)") \ 30.611 \ 30.612 - product(intx, NativeMonitorTimeout, -1, "(Unstable)" ) \ 30.613 - product(intx, NativeMonitorFlags, 0, "(Unstable)" ) \ 30.614 - product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" ) \ 30.615 + product(intx, NativeMonitorTimeout, -1, "(Unstable)") \ 30.616 + \ 30.617 + product(intx, NativeMonitorFlags, 0, "(Unstable)") \ 30.618 + \ 30.619 + product(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \ 30.620 \ 30.621 develop(bool, UsePthreads, false, \ 30.622 "Use pthread-based instead of libthread-based synchronization " \ 30.623 "(SPARC only)") \ 30.624 \ 30.625 product(bool, AdjustConcurrency, false, \ 30.626 - "call thr_setconcurrency at thread create time to avoid " \ 30.627 - "LWP starvation on MP systems (For Solaris Only)") \ 30.628 + "Call thr_setconcurrency at thread creation time to avoid " \ 30.629 + "LWP starvation on MP systems (for Solaris Only)") \ 30.630 \ 30.631 product(bool, ReduceSignalUsage, false, \ 30.632 "Reduce the use of OS signals in Java and/or the VM") \ 30.633 @@ -1147,13 +1156,14 @@ 30.634 "Share vtable stubs (smaller code but worse branch prediction") \ 30.635 \ 30.636 develop(bool, LoadLineNumberTables, true, \ 30.637 - "Tells whether the class file parser loads line number tables") \ 30.638 + "Tell whether the class file parser loads line number tables") \ 30.639 \ 30.640 develop(bool, LoadLocalVariableTables, true, \ 30.641 - "Tells whether the class file parser loads local variable tables")\ 30.642 + "Tell whether the class file parser loads local variable tables") \ 30.643 \ 30.644 develop(bool, LoadLocalVariableTypeTables, true, \ 30.645 - "Tells whether the class file parser loads local variable type tables")\ 30.646 + "Tell whether the class file parser loads local variable type" \ 30.647 + "tables") \ 30.648 \ 30.649 product(bool, AllowUserSignalHandlers, false, \ 30.650 "Do not complain if the application installs signal handlers " \ 30.651 @@ -1184,10 +1194,12 @@ 30.652 \ 30.653 product(bool, EagerXrunInit, false, \ 30.654 "Eagerly initialize -Xrun libraries; allows startup profiling, " \ 30.655 - " but not all -Xrun libraries may support the state of the VM at this time") \ 30.656 + "but not all -Xrun libraries may support the state of the VM " \ 30.657 + "at this time") \ 30.658 \ 30.659 product(bool, PreserveAllAnnotations, false, \ 30.660 - "Preserve RuntimeInvisibleAnnotations as well as RuntimeVisibleAnnotations") \ 30.661 + "Preserve RuntimeInvisibleAnnotations as well " \ 30.662 + "as RuntimeVisibleAnnotations") \ 30.663 \ 30.664 develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \ 30.665 "Number of OutOfMemoryErrors preallocated with backtrace") \ 30.666 @@ -1262,7 +1274,7 @@ 30.667 "Trace level for JVMTI RedefineClasses") \ 30.668 \ 30.669 develop(bool, StressMethodComparator, false, \ 30.670 - "run the MethodComparator on all loaded methods") \ 30.671 + "Run the MethodComparator on all loaded methods") \ 30.672 \ 30.673 /* change to false by default sometime after Mustang */ \ 30.674 product(bool, VerifyMergedCPBytecodes, true, \ 30.675 @@ -1296,7 +1308,7 @@ 30.676 "Trace dependencies") \ 30.677 \ 30.678 develop(bool, VerifyDependencies, trueInDebug, \ 30.679 - "Exercise and verify the compilation dependency mechanism") \ 30.680 + "Exercise and verify the compilation dependency mechanism") \ 30.681 \ 30.682 develop(bool, TraceNewOopMapGeneration, false, \ 30.683 "Trace OopMapGeneration") \ 30.684 @@ -1314,7 +1326,7 @@ 30.685 "Trace monitor matching failures during OopMapGeneration") \ 30.686 \ 30.687 develop(bool, TraceOopMapRewrites, false, \ 30.688 - "Trace rewritting of method oops during oop map generation") \ 30.689 + "Trace rewriting of method oops during oop map generation") \ 30.690 \ 30.691 develop(bool, TraceSafepoint, false, \ 30.692 "Trace safepoint operations") \ 30.693 @@ -1332,10 +1344,10 @@ 30.694 "Trace setup time") \ 30.695 \ 30.696 develop(bool, TraceProtectionDomainVerification, false, \ 30.697 - "Trace protection domain verifcation") \ 30.698 + "Trace protection domain verification") \ 30.699 \ 30.700 develop(bool, TraceClearedExceptions, false, \ 30.701 - "Prints when an exception is forcibly cleared") \ 30.702 + "Print when an exception is forcibly cleared") \ 30.703 \ 30.704 product(bool, TraceClassResolution, false, \ 30.705 "Trace all constant pool resolutions (for debugging)") \ 30.706 @@ -1349,7 +1361,7 @@ 30.707 /* gc */ \ 30.708 \ 30.709 product(bool, UseSerialGC, false, \ 30.710 - "Use the serial garbage collector") \ 30.711 + "Use the Serial garbage collector") \ 30.712 \ 30.713 product(bool, UseG1GC, false, \ 30.714 "Use the Garbage-First garbage collector") \ 30.715 @@ -1368,16 +1380,16 @@ 30.716 "The collection count for the first maximum compaction") \ 30.717 \ 30.718 product(bool, UseMaximumCompactionOnSystemGC, true, \ 30.719 - "In the Parallel Old garbage collector maximum compaction for " \ 30.720 - "a system GC") \ 30.721 + "Use maximum compaction in the Parallel Old garbage collector " \ 30.722 + "for a system GC") \ 30.723 \ 30.724 product(uintx, ParallelOldDeadWoodLimiterMean, 50, \ 30.725 - "The mean used by the par compact dead wood" \ 30.726 - "limiter (a number between 0-100).") \ 30.727 + "The mean used by the parallel compact dead wood " \ 30.728 + "limiter (a number between 0-100)") \ 30.729 \ 30.730 product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \ 30.731 - "The standard deviation used by the par compact dead wood" \ 30.732 - "limiter (a number between 0-100).") \ 30.733 + "The standard deviation used by the parallel compact dead wood " \ 30.734 + "limiter (a number between 0-100)") \ 30.735 \ 30.736 product(uintx, ParallelGCThreads, 0, \ 30.737 "Number of parallel threads parallel gc will use") \ 30.738 @@ -1387,7 +1399,7 @@ 30.739 "parallel gc will use") \ 30.740 \ 30.741 diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \ 30.742 - "Force dynamic selection of the number of" \ 30.743 + "Force dynamic selection of the number of " \ 30.744 "parallel threads parallel gc will use to aid debugging") \ 30.745 \ 30.746 product(uintx, HeapSizePerGCThread, ScaleForWordSize(64*M), \ 30.747 @@ -1398,7 +1410,7 @@ 30.748 "Trace the dynamic GC thread usage") \ 30.749 \ 30.750 develop(bool, ParallelOldGCSplitALot, false, \ 30.751 - "Provoke splitting (copying data from a young gen space to" \ 30.752 + "Provoke splitting (copying data from a young gen space to " \ 30.753 "multiple destination spaces)") \ 30.754 \ 30.755 develop(uintx, ParallelOldGCSplitInterval, 3, \ 30.756 @@ -1408,19 +1420,19 @@ 30.757 "Number of threads concurrent gc will use") \ 30.758 \ 30.759 product(uintx, YoungPLABSize, 4096, \ 30.760 - "Size of young gen promotion labs (in HeapWords)") \ 30.761 + "Size of young gen promotion LAB's (in HeapWords)") \ 30.762 \ 30.763 product(uintx, OldPLABSize, 1024, \ 30.764 - "Size of old gen promotion labs (in HeapWords)") \ 30.765 + "Size of old gen promotion LAB's (in HeapWords)") \ 30.766 \ 30.767 product(uintx, GCTaskTimeStampEntries, 200, \ 30.768 "Number of time stamp entries per gc worker thread") \ 30.769 \ 30.770 product(bool, AlwaysTenure, false, \ 30.771 - "Always tenure objects in eden. (ParallelGC only)") \ 30.772 + "Always tenure objects in eden (ParallelGC only)") \ 30.773 \ 30.774 product(bool, NeverTenure, false, \ 30.775 - "Never tenure objects in eden, May tenure on overflow " \ 30.776 + "Never tenure objects in eden, may tenure on overflow " \ 30.777 "(ParallelGC only)") \ 30.778 \ 30.779 product(bool, ScavengeBeforeFullGC, true, \ 30.780 @@ -1428,14 +1440,14 @@ 30.781 "used with UseParallelGC") \ 30.782 \ 30.783 develop(bool, ScavengeWithObjectsInToSpace, false, \ 30.784 - "Allow scavenges to occur when to_space contains objects.") \ 30.785 + "Allow scavenges to occur when to-space contains objects") \ 30.786 \ 30.787 product(bool, UseConcMarkSweepGC, false, \ 30.788 "Use Concurrent Mark-Sweep GC in the old generation") \ 30.789 \ 30.790 product(bool, ExplicitGCInvokesConcurrent, false, \ 30.791 - "A System.gc() request invokes a concurrent collection;" \ 30.792 - " (effective only when UseConcMarkSweepGC)") \ 30.793 + "A System.gc() request invokes a concurrent collection; " \ 30.794 + "(effective only when UseConcMarkSweepGC)") \ 30.795 \ 30.796 product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \ 30.797 "A System.gc() request invokes a concurrent collection and " \ 30.798 @@ -1443,19 +1455,19 @@ 30.799 "(effective only when UseConcMarkSweepGC)") \ 30.800 \ 30.801 product(bool, GCLockerInvokesConcurrent, false, \ 30.802 - "The exit of a JNI CS necessitating a scavenge also" \ 30.803 - " kicks off a bkgrd concurrent collection") \ 30.804 + "The exit of a JNI critical section necessitating a scavenge, " \ 30.805 + "also kicks off a background concurrent collection") \ 30.806 \ 30.807 product(uintx, GCLockerEdenExpansionPercent, 5, \ 30.808 - "How much the GC can expand the eden by while the GC locker " \ 30.809 + "How much the GC can expand the eden by while the GC locker " \ 30.810 "is active (as a percentage)") \ 30.811 \ 30.812 diagnostic(intx, GCLockerRetryAllocationCount, 2, \ 30.813 - "Number of times to retry allocations when" \ 30.814 - " blocked by the GC locker") \ 30.815 + "Number of times to retry allocations when " \ 30.816 + "blocked by the GC locker") \ 30.817 \ 30.818 develop(bool, UseCMSAdaptiveFreeLists, true, \ 30.819 - "Use Adaptive Free Lists in the CMS generation") \ 30.820 + "Use adaptive free lists in the CMS generation") \ 30.821 \ 30.822 develop(bool, UseAsyncConcMarkSweepGC, true, \ 30.823 "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\ 30.824 @@ -1470,44 +1482,46 @@ 30.825 "Use passing of collection from background to foreground") \ 30.826 \ 30.827 product(bool, UseParNewGC, false, \ 30.828 - "Use parallel threads in the new generation.") \ 30.829 + "Use parallel threads in the new generation") \ 30.830 \ 30.831 product(bool, ParallelGCVerbose, false, \ 30.832 - "Verbose output for parallel GC.") \ 30.833 + "Verbose output for parallel gc") \ 30.834 \ 30.835 product(uintx, ParallelGCBufferWastePct, 10, \ 30.836 - "Wasted fraction of parallel allocation buffer.") \ 30.837 + "Wasted fraction of parallel allocation buffer") \ 30.838 \ 30.839 diagnostic(bool, ParallelGCRetainPLAB, false, \ 30.840 - "Retain parallel allocation buffers across scavenges; " \ 30.841 - " -- disabled because this currently conflicts with " \ 30.842 - " parallel card scanning under certain conditions ") \ 30.843 + "Retain parallel allocation buffers across scavenges; " \ 30.844 + "it is disabled because this currently conflicts with " \ 30.845 + "parallel card scanning under certain conditions.") \ 30.846 \ 30.847 product(uintx, TargetPLABWastePct, 10, \ 30.848 "Target wasted space in last buffer as percent of overall " \ 30.849 "allocation") \ 30.850 \ 30.851 product(uintx, PLABWeight, 75, \ 30.852 - "Percentage (0-100) used to weight the current sample when" \ 30.853 - "computing exponentially decaying average for ResizePLAB.") \ 30.854 + "Percentage (0-100) used to weigh the current sample when " \ 30.855 + "computing exponentially decaying average for ResizePLAB") \ 30.856 \ 30.857 product(bool, ResizePLAB, true, \ 30.858 - "Dynamically resize (survivor space) promotion labs") \ 30.859 + "Dynamically resize (survivor space) promotion LAB's") \ 30.860 \ 30.861 product(bool, PrintPLAB, false, \ 30.862 - "Print (survivor space) promotion labs sizing decisions") \ 30.863 + "Print (survivor space) promotion LAB's sizing decisions") \ 30.864 \ 30.865 product(intx, ParGCArrayScanChunk, 50, \ 30.866 - "Scan a subset and push remainder, if array is bigger than this") \ 30.867 + "Scan a subset of object array and push remainder, if array is " \ 30.868 + "bigger than this") \ 30.869 \ 30.870 product(bool, ParGCUseLocalOverflow, false, \ 30.871 "Instead of a global overflow list, use local overflow stacks") \ 30.872 \ 30.873 product(bool, ParGCTrimOverflow, true, \ 30.874 - "Eagerly trim the local overflow lists (when ParGCUseLocalOverflow") \ 30.875 + "Eagerly trim the local overflow lists " \ 30.876 + "(when ParGCUseLocalOverflow)") \ 30.877 \ 30.878 notproduct(bool, ParGCWorkQueueOverflowALot, false, \ 30.879 - "Whether we should simulate work queue overflow in ParNew") \ 30.880 + "Simulate work queue overflow in ParNew") \ 30.881 \ 30.882 notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \ 30.883 "An `interval' counter that determines how frequently " \ 30.884 @@ -1525,43 +1539,46 @@ 30.885 "during card table scanning") \ 30.886 \ 30.887 product(uintx, CMSParPromoteBlocksToClaim, 16, \ 30.888 - "Number of blocks to attempt to claim when refilling CMS LAB for "\ 30.889 - "parallel GC.") \ 30.890 + "Number of blocks to attempt to claim when refilling CMS LAB's " \ 30.891 + "for parallel GC") \ 30.892 \ 30.893 product(uintx, OldPLABWeight, 50, \ 30.894 - "Percentage (0-100) used to weight the current sample when" \ 30.895 - "computing exponentially decaying average for resizing CMSParPromoteBlocksToClaim.") \ 30.896 + "Percentage (0-100) used to weight the current sample when " \ 30.897 + "computing exponentially decaying average for resizing " \ 30.898 + "CMSParPromoteBlocksToClaim") \ 30.899 \ 30.900 product(bool, ResizeOldPLAB, true, \ 30.901 - "Dynamically resize (old gen) promotion labs") \ 30.902 + "Dynamically resize (old gen) promotion LAB's") \ 30.903 \ 30.904 product(bool, PrintOldPLAB, false, \ 30.905 - "Print (old gen) promotion labs sizing decisions") \ 30.906 + "Print (old gen) promotion LAB's sizing decisions") \ 30.907 \ 30.908 product(uintx, CMSOldPLABMin, 16, \ 30.909 - "Min size of CMS gen promotion lab caches per worker per blksize")\ 30.910 + "Minimum size of CMS gen promotion LAB caches per worker " \ 30.911 + "per block size") \ 30.912 \ 30.913 product(uintx, CMSOldPLABMax, 1024, \ 30.914 - "Max size of CMS gen promotion lab caches per worker per blksize")\ 30.915 + "Maximum size of CMS gen promotion LAB caches per worker " \ 30.916 + "per block size") \ 30.917 \ 30.918 product(uintx, CMSOldPLABNumRefills, 4, \ 30.919 - "Nominal number of refills of CMS gen promotion lab cache" \ 30.920 - " per worker per block size") \ 30.921 + "Nominal number of refills of CMS gen promotion LAB cache " \ 30.922 + "per worker per block size") \ 30.923 \ 30.924 product(bool, CMSOldPLABResizeQuicker, false, \ 30.925 - "Whether to react on-the-fly during a scavenge to a sudden" \ 30.926 - " change in block demand rate") \ 30.927 + "React on-the-fly during a scavenge to a sudden " \ 30.928 + "change in block demand rate") \ 30.929 \ 30.930 product(uintx, CMSOldPLABToleranceFactor, 4, \ 30.931 - "The tolerance of the phase-change detector for on-the-fly" \ 30.932 - " PLAB resizing during a scavenge") \ 30.933 + "The tolerance of the phase-change detector for on-the-fly " \ 30.934 + "PLAB resizing during a scavenge") \ 30.935 \ 30.936 product(uintx, CMSOldPLABReactivityFactor, 2, \ 30.937 - "The gain in the feedback loop for on-the-fly PLAB resizing" \ 30.938 - " during a scavenge") \ 30.939 + "The gain in the feedback loop for on-the-fly PLAB resizing " \ 30.940 + "during a scavenge") \ 30.941 \ 30.942 product(bool, AlwaysPreTouch, false, \ 30.943 - "It forces all freshly committed pages to be pre-touched.") \ 30.944 + "Force all freshly committed pages to be pre-touched") \ 30.945 \ 30.946 product_pd(uintx, CMSYoungGenPerWorker, \ 30.947 "The maximum size of young gen chosen by default per GC worker " \ 30.948 @@ -1571,64 +1588,67 @@ 30.949 "Whether CMS GC should operate in \"incremental\" mode") \ 30.950 \ 30.951 product(uintx, CMSIncrementalDutyCycle, 10, \ 30.952 - "CMS incremental mode duty cycle (a percentage, 0-100). If" \ 30.953 - "CMSIncrementalPacing is enabled, then this is just the initial" \ 30.954 - "value") \ 30.955 + "Percentage (0-100) of CMS incremental mode duty cycle. If " \ 30.956 + "CMSIncrementalPacing is enabled, then this is just the initial " \ 30.957 + "value.") \ 30.958 \ 30.959 product(bool, CMSIncrementalPacing, true, \ 30.960 "Whether the CMS incremental mode duty cycle should be " \ 30.961 "automatically adjusted") \ 30.962 \ 30.963 product(uintx, CMSIncrementalDutyCycleMin, 0, \ 30.964 - "Lower bound on the duty cycle when CMSIncrementalPacing is " \ 30.965 - "enabled (a percentage, 0-100)") \ 30.966 + "Minimum percentage (0-100) of the CMS incremental duty cycle " \ 30.967 + "used when CMSIncrementalPacing is enabled") \ 30.968 \ 30.969 product(uintx, CMSIncrementalSafetyFactor, 10, \ 30.970 "Percentage (0-100) used to add conservatism when computing the " \ 30.971 "duty cycle") \ 30.972 \ 30.973 product(uintx, CMSIncrementalOffset, 0, \ 30.974 - "Percentage (0-100) by which the CMS incremental mode duty cycle" \ 30.975 - " is shifted to the right within the period between young GCs") \ 30.976 + "Percentage (0-100) by which the CMS incremental mode duty cycle "\ 30.977 + "is shifted to the right within the period between young GCs") \ 30.978 \ 30.979 product(uintx, CMSExpAvgFactor, 50, \ 30.980 - "Percentage (0-100) used to weight the current sample when" \ 30.981 - "computing exponential averages for CMS statistics.") \ 30.982 + "Percentage (0-100) used to weigh the current sample when " \ 30.983 + "computing exponential averages for CMS statistics") \ 30.984 \ 30.985 product(uintx, CMS_FLSWeight, 75, \ 30.986 - "Percentage (0-100) used to weight the current sample when" \ 30.987 - "computing exponentially decating averages for CMS FLS statistics.") \ 30.988 + "Percentage (0-100) used to weigh the current sample when " \ 30.989 + "computing exponentially decaying averages for CMS FLS " \ 30.990 + "statistics") \ 30.991 \ 30.992 product(uintx, CMS_FLSPadding, 1, \ 30.993 - "The multiple of deviation from mean to use for buffering" \ 30.994 - "against volatility in free list demand.") \ 30.995 + "The multiple of deviation from mean to use for buffering " \ 30.996 + "against volatility in free list demand") \ 30.997 \ 30.998 product(uintx, FLSCoalescePolicy, 2, \ 30.999 - "CMS: Aggression level for coalescing, increasing from 0 to 4") \ 30.1000 + "CMS: aggressiveness level for coalescing, increasing " \ 30.1001 + "from 0 to 4") \ 30.1002 \ 30.1003 product(bool, FLSAlwaysCoalesceLarge, false, \ 30.1004 - "CMS: Larger free blocks are always available for coalescing") \ 30.1005 + "CMS: larger free blocks are always available for coalescing") \ 30.1006 \ 30.1007 product(double, FLSLargestBlockCoalesceProximity, 0.99, \ 30.1008 - "CMS: the smaller the percentage the greater the coalition force")\ 30.1009 + "CMS: the smaller the percentage the greater the coalescing " \ 30.1010 + "force") \ 30.1011 \ 30.1012 product(double, CMSSmallCoalSurplusPercent, 1.05, \ 30.1013 - "CMS: the factor by which to inflate estimated demand of small" \ 30.1014 - " block sizes to prevent coalescing with an adjoining block") \ 30.1015 + "CMS: the factor by which to inflate estimated demand of small " \ 30.1016 + "block sizes to prevent coalescing with an adjoining block") \ 30.1017 \ 30.1018 product(double, CMSLargeCoalSurplusPercent, 0.95, \ 30.1019 - "CMS: the factor by which to inflate estimated demand of large" \ 30.1020 - " block sizes to prevent coalescing with an adjoining block") \ 30.1021 + "CMS: the factor by which to inflate estimated demand of large " \ 30.1022 + "block sizes to prevent coalescing with an adjoining block") \ 30.1023 \ 30.1024 product(double, CMSSmallSplitSurplusPercent, 1.10, \ 30.1025 - "CMS: the factor by which to inflate estimated demand of small" \ 30.1026 - " block sizes to prevent splitting to supply demand for smaller" \ 30.1027 - " blocks") \ 30.1028 + "CMS: the factor by which to inflate estimated demand of small " \ 30.1029 + "block sizes to prevent splitting to supply demand for smaller " \ 30.1030 + "blocks") \ 30.1031 \ 30.1032 product(double, CMSLargeSplitSurplusPercent, 1.00, \ 30.1033 - "CMS: the factor by which to inflate estimated demand of large" \ 30.1034 - " block sizes to prevent splitting to supply demand for smaller" \ 30.1035 - " blocks") \ 30.1036 + "CMS: the factor by which to inflate estimated demand of large " \ 30.1037 + "block sizes to prevent splitting to supply demand for smaller " \ 30.1038 + "blocks") \ 30.1039 \ 30.1040 product(bool, CMSExtrapolateSweep, false, \ 30.1041 "CMS: cushion for block demand during sweep") \ 30.1042 @@ -1640,11 +1660,11 @@ 30.1043 \ 30.1044 product(uintx, CMS_SweepPadding, 1, \ 30.1045 "The multiple of deviation from mean to use for buffering " \ 30.1046 - "against volatility in inter-sweep duration.") \ 30.1047 + "against volatility in inter-sweep duration") \ 30.1048 \ 30.1049 product(uintx, CMS_SweepTimerThresholdMillis, 10, \ 30.1050 "Skip block flux-rate sampling for an epoch unless inter-sweep " \ 30.1051 - "duration exceeds this threhold in milliseconds") \ 30.1052 + "duration exceeds this threshold in milliseconds") \ 30.1053 \ 30.1054 develop(bool, CMSTraceIncrementalMode, false, \ 30.1055 "Trace CMS incremental mode") \ 30.1056 @@ -1659,14 +1679,15 @@ 30.1057 "Whether class unloading enabled when using CMS GC") \ 30.1058 \ 30.1059 product(uintx, CMSClassUnloadingMaxInterval, 0, \ 30.1060 - "When CMS class unloading is enabled, the maximum CMS cycle count"\ 30.1061 - " for which classes may not be unloaded") \ 30.1062 + "When CMS class unloading is enabled, the maximum CMS cycle " \ 30.1063 + "count for which classes may not be unloaded") \ 30.1064 \ 30.1065 product(bool, CMSCompactWhenClearAllSoftRefs, true, \ 30.1066 - "Compact when asked to collect CMS gen with clear_all_soft_refs") \ 30.1067 + "Compact when asked to collect CMS gen with " \ 30.1068 + "clear_all_soft_refs()") \ 30.1069 \ 30.1070 product(bool, UseCMSCompactAtFullCollection, true, \ 30.1071 - "Use mark sweep compact at full collections") \ 30.1072 + "Use Mark-Sweep-Compact algorithm at full collections") \ 30.1073 \ 30.1074 product(uintx, CMSFullGCsBeforeCompaction, 0, \ 30.1075 "Number of CMS full collection done before compaction if > 0") \ 30.1076 @@ -1688,38 +1709,37 @@ 30.1077 "Warn in case of excessive CMS looping") \ 30.1078 \ 30.1079 develop(bool, CMSOverflowEarlyRestoration, false, \ 30.1080 - "Whether preserved marks should be restored early") \ 30.1081 + "Restore preserved marks early") \ 30.1082 \ 30.1083 product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \ 30.1084 "Size of marking stack") \ 30.1085 \ 30.1086 product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \ 30.1087 - "Max size of marking stack") \ 30.1088 + "Maximum size of marking stack") \ 30.1089 \ 30.1090 notproduct(bool, CMSMarkStackOverflowALot, false, \ 30.1091 - "Whether we should simulate frequent marking stack / work queue" \ 30.1092 - " overflow") \ 30.1093 + "Simulate frequent marking stack / work queue overflow") \ 30.1094 \ 30.1095 notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \ 30.1096 - "An `interval' counter that determines how frequently" \ 30.1097 - " we simulate overflow; a smaller number increases frequency") \ 30.1098 + "An \"interval\" counter that determines how frequently " \ 30.1099 + "to simulate overflow; a smaller number increases frequency") \ 30.1100 \ 30.1101 product(uintx, CMSMaxAbortablePrecleanLoops, 0, \ 30.1102 - "(Temporary, subject to experimentation)" \ 30.1103 + "(Temporary, subject to experimentation) " \ 30.1104 "Maximum number of abortable preclean iterations, if > 0") \ 30.1105 \ 30.1106 product(intx, CMSMaxAbortablePrecleanTime, 5000, \ 30.1107 - "(Temporary, subject to experimentation)" \ 30.1108 - "Maximum time in abortable preclean in ms") \ 30.1109 + "(Temporary, subject to experimentation) " \ 30.1110 + "Maximum time in abortable preclean (in milliseconds)") \ 30.1111 \ 30.1112 product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \ 30.1113 - "(Temporary, subject to experimentation)" \ 30.1114 + "(Temporary, subject to experimentation) " \ 30.1115 "Nominal minimum work per abortable preclean iteration") \ 30.1116 \ 30.1117 manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \ 30.1118 - "(Temporary, subject to experimentation)" \ 30.1119 - " Time that we sleep between iterations when not given" \ 30.1120 - " enough work per iteration") \ 30.1121 + "(Temporary, subject to experimentation) " \ 30.1122 + "Time that we sleep between iterations when not given " \ 30.1123 + "enough work per iteration") \ 30.1124 \ 30.1125 product(uintx, CMSRescanMultiple, 32, \ 30.1126 "Size (in cards) of CMS parallel rescan task") \ 30.1127 @@ -1737,23 +1757,24 @@ 30.1128 "Whether parallel remark enabled (only if ParNewGC)") \ 30.1129 \ 30.1130 product(bool, CMSParallelSurvivorRemarkEnabled, true, \ 30.1131 - "Whether parallel remark of survivor space" \ 30.1132 - " enabled (effective only if CMSParallelRemarkEnabled)") \ 30.1133 + "Whether parallel remark of survivor space " \ 30.1134 + "enabled (effective only if CMSParallelRemarkEnabled)") \ 30.1135 \ 30.1136 product(bool, CMSPLABRecordAlways, true, \ 30.1137 - "Whether to always record survivor space PLAB bdries" \ 30.1138 - " (effective only if CMSParallelSurvivorRemarkEnabled)") \ 30.1139 + "Always record survivor space PLAB boundaries (effective only " \ 30.1140 + "if CMSParallelSurvivorRemarkEnabled)") \ 30.1141 \ 30.1142 product(bool, CMSEdenChunksRecordAlways, true, \ 30.1143 - "Whether to always record eden chunks used for " \ 30.1144 - "the parallel initial mark or remark of eden" ) \ 30.1145 + "Always record eden chunks used for the parallel initial mark " \ 30.1146 + "or remark of eden") \ 30.1147 \ 30.1148 product(bool, CMSPrintEdenSurvivorChunks, false, \ 30.1149 "Print the eden and the survivor chunks used for the parallel " \ 30.1150 "initial mark or remark of the eden/survivor spaces") \ 30.1151 \ 30.1152 product(bool, CMSConcurrentMTEnabled, true, \ 30.1153 - "Whether multi-threaded concurrent work enabled (if ParNewGC)") \ 30.1154 + "Whether multi-threaded concurrent work enabled " \ 30.1155 + "(effective only if ParNewGC)") \ 30.1156 \ 30.1157 product(bool, CMSPrecleaningEnabled, true, \ 30.1158 "Whether concurrent precleaning enabled") \ 30.1159 @@ -1762,12 +1783,12 @@ 30.1160 "Maximum number of precleaning iteration passes") \ 30.1161 \ 30.1162 product(uintx, CMSPrecleanNumerator, 2, \ 30.1163 - "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ 30.1164 - " ratio") \ 30.1165 + "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 30.1166 + "ratio") \ 30.1167 \ 30.1168 product(uintx, CMSPrecleanDenominator, 3, \ 30.1169 - "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ 30.1170 - " ratio") \ 30.1171 + "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 30.1172 + "ratio") \ 30.1173 \ 30.1174 product(bool, CMSPrecleanRefLists1, true, \ 30.1175 "Preclean ref lists during (initial) preclean phase") \ 30.1176 @@ -1782,7 +1803,7 @@ 30.1177 "Preclean survivors during abortable preclean phase") \ 30.1178 \ 30.1179 product(uintx, CMSPrecleanThreshold, 1000, \ 30.1180 - "Don't re-iterate if #dirty cards less than this") \ 30.1181 + "Do not iterate again if number of dirty cards is less than this")\ 30.1182 \ 30.1183 product(bool, CMSCleanOnEnter, true, \ 30.1184 "Clean-on-enter optimization for reducing number of dirty cards") \ 30.1185 @@ -1791,14 +1812,16 @@ 30.1186 "Choose variant (1,2) of verification following remark") \ 30.1187 \ 30.1188 product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M, \ 30.1189 - "If Eden used is below this value, don't try to schedule remark") \ 30.1190 + "If Eden size is below this, do not try to schedule remark") \ 30.1191 \ 30.1192 product(uintx, CMSScheduleRemarkEdenPenetration, 50, \ 30.1193 - "The Eden occupancy % at which to try and schedule remark pause") \ 30.1194 + "The Eden occupancy percentage (0-100) at which " \ 30.1195 + "to try and schedule remark pause") \ 30.1196 \ 30.1197 product(uintx, CMSScheduleRemarkSamplingRatio, 5, \ 30.1198 - "Start sampling Eden top at least before yg occupancy reaches" \ 30.1199 - " 1/<ratio> of the size at which we plan to schedule remark") \ 30.1200 + "Start sampling eden top at least before young gen " \ 30.1201 + "occupancy reaches 1/<ratio> of the size at which " \ 30.1202 + "we plan to schedule remark") \ 30.1203 \ 30.1204 product(uintx, CMSSamplingGrain, 16*K, \ 30.1205 "The minimum distance between eden samples for CMS (see above)") \ 30.1206 @@ -1820,27 +1843,27 @@ 30.1207 "should start a collection cycle") \ 30.1208 \ 30.1209 product(bool, CMSYield, true, \ 30.1210 - "Yield between steps of concurrent mark & sweep") \ 30.1211 + "Yield between steps of CMS") \ 30.1212 \ 30.1213 product(uintx, CMSBitMapYieldQuantum, 10*M, \ 30.1214 - "Bitmap operations should process at most this many bits" \ 30.1215 + "Bitmap operations should process at most this many bits " \ 30.1216 "between yields") \ 30.1217 \ 30.1218 product(bool, CMSDumpAtPromotionFailure, false, \ 30.1219 "Dump useful information about the state of the CMS old " \ 30.1220 - " generation upon a promotion failure.") \ 30.1221 + "generation upon a promotion failure") \ 30.1222 \ 30.1223 product(bool, CMSPrintChunksInDump, false, \ 30.1224 "In a dump enabled by CMSDumpAtPromotionFailure, include " \ 30.1225 - " more detailed information about the free chunks.") \ 30.1226 + "more detailed information about the free chunks") \ 30.1227 \ 30.1228 product(bool, CMSPrintObjectsInDump, false, \ 30.1229 "In a dump enabled by CMSDumpAtPromotionFailure, include " \ 30.1230 - " more detailed information about the allocated objects.") \ 30.1231 + "more detailed information about the allocated objects") \ 30.1232 \ 30.1233 diagnostic(bool, FLSVerifyAllHeapReferences, false, \ 30.1234 - "Verify that all refs across the FLS boundary " \ 30.1235 - " are to valid objects") \ 30.1236 + "Verify that all references across the FLS boundary " \ 30.1237 + "are to valid objects") \ 30.1238 \ 30.1239 diagnostic(bool, FLSVerifyLists, false, \ 30.1240 "Do lots of (expensive) FreeListSpace verification") \ 30.1241 @@ -1852,17 +1875,18 @@ 30.1242 "Do lots of (expensive) FLS dictionary verification") \ 30.1243 \ 30.1244 develop(bool, VerifyBlockOffsetArray, false, \ 30.1245 - "Do (expensive!) block offset array verification") \ 30.1246 + "Do (expensive) block offset array verification") \ 30.1247 \ 30.1248 diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ 30.1249 - "Maintain _unallocated_block in BlockOffsetArray" \ 30.1250 - " (currently applicable only to CMS collector)") \ 30.1251 + "Maintain _unallocated_block in BlockOffsetArray " \ 30.1252 + "(currently applicable only to CMS collector)") \ 30.1253 \ 30.1254 develop(bool, TraceCMSState, false, \ 30.1255 "Trace the state of the CMS collection") \ 30.1256 \ 30.1257 product(intx, RefDiscoveryPolicy, 0, \ 30.1258 - "Whether reference-based(0) or referent-based(1)") \ 30.1259 + "Select type of reference discovery policy: " \ 30.1260 + "reference-based(0) or referent-based(1)") \ 30.1261 \ 30.1262 product(bool, ParallelRefProcEnabled, false, \ 30.1263 "Enable parallel reference processing whenever possible") \ 30.1264 @@ -1890,7 +1914,7 @@ 30.1265 "denotes 'do constant GC cycles'.") \ 30.1266 \ 30.1267 product(bool, UseCMSInitiatingOccupancyOnly, false, \ 30.1268 - "Only use occupancy as a crierion for starting a CMS collection") \ 30.1269 + "Only use occupancy as a criterion for starting a CMS collection")\ 30.1270 \ 30.1271 product(uintx, CMSIsTooFullPercentage, 98, \ 30.1272 "An absolute ceiling above which CMS will always consider the " \ 30.1273 @@ -1902,7 +1926,7 @@ 30.1274 \ 30.1275 notproduct(bool, CMSVerifyReturnedBytes, false, \ 30.1276 "Check that all the garbage collected was returned to the " \ 30.1277 - "free lists.") \ 30.1278 + "free lists") \ 30.1279 \ 30.1280 notproduct(bool, ScavengeALot, false, \ 30.1281 "Force scavenge at every Nth exit from the runtime system " \ 30.1282 @@ -1917,16 +1941,16 @@ 30.1283 \ 30.1284 product(bool, PrintPromotionFailure, false, \ 30.1285 "Print additional diagnostic information following " \ 30.1286 - " promotion failure") \ 30.1287 + "promotion failure") \ 30.1288 \ 30.1289 notproduct(bool, PromotionFailureALot, false, \ 30.1290 "Use promotion failure handling on every youngest generation " \ 30.1291 "collection") \ 30.1292 \ 30.1293 develop(uintx, PromotionFailureALotCount, 1000, \ 30.1294 - "Number of promotion failures occurring at ParGCAllocBuffer" \ 30.1295 + "Number of promotion failures occurring at ParGCAllocBuffer " \ 30.1296 "refill attempts (ParNew) or promotion attempts " \ 30.1297 - "(other young collectors) ") \ 30.1298 + "(other young collectors)") \ 30.1299 \ 30.1300 develop(uintx, PromotionFailureALotInterval, 5, \ 30.1301 "Total collections between promotion failures alot") \ 30.1302 @@ -1945,7 +1969,7 @@ 30.1303 "Ratio of hard spins to calls to yield") \ 30.1304 \ 30.1305 develop(uintx, ObjArrayMarkingStride, 512, \ 30.1306 - "Number of ObjArray elements to push onto the marking stack" \ 30.1307 + "Number of object array elements to push onto the marking stack " \ 30.1308 "before pushing a continuation entry") \ 30.1309 \ 30.1310 develop(bool, MetadataAllocationFailALot, false, \ 30.1311 @@ -1953,7 +1977,7 @@ 30.1312 "MetadataAllocationFailALotInterval") \ 30.1313 \ 30.1314 develop(uintx, MetadataAllocationFailALotInterval, 1000, \ 30.1315 - "metadata allocation failure alot interval") \ 30.1316 + "Metadata allocation failure a lot interval") \ 30.1317 \ 30.1318 develop(bool, MetaDataDeallocateALot, false, \ 30.1319 "Deallocation bunches of metadata at intervals controlled by " \ 30.1320 @@ -1972,7 +1996,7 @@ 30.1321 "Trace virtual space metadata allocations") \ 30.1322 \ 30.1323 notproduct(bool, ExecuteInternalVMTests, false, \ 30.1324 - "Enable execution of internal VM tests.") \ 30.1325 + "Enable execution of internal VM tests") \ 30.1326 \ 30.1327 notproduct(bool, VerboseInternalVMTests, false, \ 30.1328 "Turn on logging for internal VM tests.") \ 30.1329 @@ -1980,7 +2004,7 @@ 30.1330 product_pd(bool, UseTLAB, "Use thread-local object allocation") \ 30.1331 \ 30.1332 product_pd(bool, ResizeTLAB, \ 30.1333 - "Dynamically resize tlab size for threads") \ 30.1334 + "Dynamically resize TLAB size for threads") \ 30.1335 \ 30.1336 product(bool, ZeroTLAB, false, \ 30.1337 "Zero out the newly created TLAB") \ 30.1338 @@ -1992,7 +2016,8 @@ 30.1339 "Print various TLAB related information") \ 30.1340 \ 30.1341 product(bool, TLABStats, true, \ 30.1342 - "Print various TLAB related information") \ 30.1343 + "Provide more detailed and expensive TLAB statistics " \ 30.1344 + "(with PrintTLAB)") \ 30.1345 \ 30.1346 EMBEDDED_ONLY(product(bool, LowMemoryProtection, true, \ 30.1347 "Enable LowMemoryProtection")) \ 30.1348 @@ -2026,14 +2051,14 @@ 30.1349 "Fraction (1/n) of real memory used for initial heap size") \ 30.1350 \ 30.1351 develop(uintx, MaxVirtMemFraction, 2, \ 30.1352 - "Maximum fraction (1/n) of virtual memory used for ergonomically" \ 30.1353 + "Maximum fraction (1/n) of virtual memory used for ergonomically "\ 30.1354 "determining maximum heap size") \ 30.1355 \ 30.1356 product(bool, UseAutoGCSelectPolicy, false, \ 30.1357 "Use automatic collection selection policy") \ 30.1358 \ 30.1359 product(uintx, AutoGCSelectPauseMillis, 5000, \ 30.1360 - "Automatic GC selection pause threshhold in ms") \ 30.1361 + "Automatic GC selection pause threshold in milliseconds") \ 30.1362 \ 30.1363 product(bool, UseAdaptiveSizePolicy, true, \ 30.1364 "Use adaptive generation sizing policies") \ 30.1365 @@ -2048,7 +2073,7 @@ 30.1366 "Use adaptive young-old sizing policies at major collections") \ 30.1367 \ 30.1368 product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \ 30.1369 - "Use statistics from System.GC for adaptive size policy") \ 30.1370 + "Include statistics from System.gc() for adaptive size policy") \ 30.1371 \ 30.1372 product(bool, UseAdaptiveGCBoundary, false, \ 30.1373 "Allow young-old boundary to move") \ 30.1374 @@ -2060,16 +2085,16 @@ 30.1375 "Resize the virtual spaces of the young or old generations") \ 30.1376 \ 30.1377 product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ 30.1378 - "Policy for changeing generation size for throughput goals") \ 30.1379 + "Policy for changing generation size for throughput goals") \ 30.1380 \ 30.1381 product(uintx, AdaptiveSizePausePolicy, 0, \ 30.1382 "Policy for changing generation size for pause goals") \ 30.1383 \ 30.1384 develop(bool, PSAdjustTenuredGenForMinorPause, false, \ 30.1385 - "Adjust tenured generation to achive a minor pause goal") \ 30.1386 + "Adjust tenured generation to achieve a minor pause goal") \ 30.1387 \ 30.1388 develop(bool, PSAdjustYoungGenForMajorPause, false, \ 30.1389 - "Adjust young generation to achive a major pause goal") \ 30.1390 + "Adjust young generation to achieve a major pause goal") \ 30.1391 \ 30.1392 product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \ 30.1393 "Number of steps where heuristics is used before data is used") \ 30.1394 @@ -2124,14 +2149,15 @@ 30.1395 "Decay factor to TenuredGenerationSizeIncrement") \ 30.1396 \ 30.1397 product(uintx, MaxGCPauseMillis, max_uintx, \ 30.1398 - "Adaptive size policy maximum GC pause time goal in msec, " \ 30.1399 - "or (G1 Only) the max. GC time per MMU time slice") \ 30.1400 + "Adaptive size policy maximum GC pause time goal in millisecond, "\ 30.1401 + "or (G1 Only) the maximum GC time per MMU time slice") \ 30.1402 \ 30.1403 product(uintx, GCPauseIntervalMillis, 0, \ 30.1404 "Time slice for MMU specification") \ 30.1405 \ 30.1406 product(uintx, MaxGCMinorPauseMillis, max_uintx, \ 30.1407 - "Adaptive size policy maximum GC minor pause time goal in msec") \ 30.1408 + "Adaptive size policy maximum GC minor pause time goal " \ 30.1409 + "in millisecond") \ 30.1410 \ 30.1411 product(uintx, GCTimeRatio, 99, \ 30.1412 "Adaptive size policy application time to GC time ratio") \ 30.1413 @@ -2159,8 +2185,8 @@ 30.1414 "before an OutOfMemory error is thrown") \ 30.1415 \ 30.1416 product(uintx, GCTimeLimit, 98, \ 30.1417 - "Limit of proportion of time spent in GC before an OutOfMemory" \ 30.1418 - "error is thrown (used with GCHeapFreeLimit)") \ 30.1419 + "Limit of the proportion of time spent in GC before " \ 30.1420 + "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \ 30.1421 \ 30.1422 product(uintx, GCHeapFreeLimit, 2, \ 30.1423 "Minimum percentage of free space after a full GC before an " \ 30.1424 @@ -2182,7 +2208,7 @@ 30.1425 "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ 30.1426 \ 30.1427 diagnostic(bool, VerifySilently, false, \ 30.1428 - "Don't print print the verification progress") \ 30.1429 + "Do not print the verification progress") \ 30.1430 \ 30.1431 diagnostic(bool, VerifyDuringStartup, false, \ 30.1432 "Verify memory system before executing any Java code " \ 30.1433 @@ -2205,7 +2231,7 @@ 30.1434 \ 30.1435 diagnostic(bool, DeferInitialCardMark, false, \ 30.1436 "When +ReduceInitialCardMarks, explicitly defer any that " \ 30.1437 - "may arise from new_pre_store_barrier") \ 30.1438 + "may arise from new_pre_store_barrier") \ 30.1439 \ 30.1440 diagnostic(bool, VerifyRememberedSets, false, \ 30.1441 "Verify GC remembered sets") \ 30.1442 @@ -2214,10 +2240,10 @@ 30.1443 "Verify GC object start array if verify before/after") \ 30.1444 \ 30.1445 product(bool, DisableExplicitGC, false, \ 30.1446 - "Tells whether calling System.gc() does a full GC") \ 30.1447 + "Ignore calls to System.gc()") \ 30.1448 \ 30.1449 notproduct(bool, CheckMemoryInitialization, false, \ 30.1450 - "Checks memory initialization") \ 30.1451 + "Check memory initialization") \ 30.1452 \ 30.1453 product(bool, CollectGen0First, false, \ 30.1454 "Collect youngest generation before each full GC") \ 30.1455 @@ -2238,44 +2264,45 @@ 30.1456 "Stride through processors when distributing processes") \ 30.1457 \ 30.1458 product(uintx, CMSCoordinatorYieldSleepCount, 10, \ 30.1459 - "number of times the coordinator GC thread will sleep while " \ 30.1460 + "Number of times the coordinator GC thread will sleep while " \ 30.1461 "yielding before giving up and resuming GC") \ 30.1462 \ 30.1463 product(uintx, CMSYieldSleepCount, 0, \ 30.1464 - "number of times a GC thread (minus the coordinator) " \ 30.1465 + "Number of times a GC thread (minus the coordinator) " \ 30.1466 "will sleep while yielding before giving up and resuming GC") \ 30.1467 \ 30.1468 /* gc tracing */ \ 30.1469 manageable(bool, PrintGC, false, \ 30.1470 - "Print message at garbage collect") \ 30.1471 + "Print message at garbage collection") \ 30.1472 \ 30.1473 manageable(bool, PrintGCDetails, false, \ 30.1474 - "Print more details at garbage collect") \ 30.1475 + "Print more details at garbage collection") \ 30.1476 \ 30.1477 manageable(bool, PrintGCDateStamps, false, \ 30.1478 - "Print date stamps at garbage collect") \ 30.1479 + "Print date stamps at garbage collection") \ 30.1480 \ 30.1481 manageable(bool, PrintGCTimeStamps, false, \ 30.1482 - "Print timestamps at garbage collect") \ 30.1483 + "Print timestamps at garbage collection") \ 30.1484 \ 30.1485 product(bool, PrintGCTaskTimeStamps, false, \ 30.1486 "Print timestamps for individual gc worker thread tasks") \ 30.1487 \ 30.1488 develop(intx, ConcGCYieldTimeout, 0, \ 30.1489 - "If non-zero, assert that GC threads yield within this # of ms.") \ 30.1490 + "If non-zero, assert that GC threads yield within this " \ 30.1491 + "number of milliseconds") \ 30.1492 \ 30.1493 notproduct(bool, TraceMarkSweep, false, \ 30.1494 "Trace mark sweep") \ 30.1495 \ 30.1496 product(bool, PrintReferenceGC, false, \ 30.1497 "Print times spent handling reference objects during GC " \ 30.1498 - " (enabled only when PrintGCDetails)") \ 30.1499 + "(enabled only when PrintGCDetails)") \ 30.1500 \ 30.1501 develop(bool, TraceReferenceGC, false, \ 30.1502 "Trace handling of soft/weak/final/phantom references") \ 30.1503 \ 30.1504 develop(bool, TraceFinalizerRegistration, false, \ 30.1505 - "Trace registration of final references") \ 30.1506 + "Trace registration of final references") \ 30.1507 \ 30.1508 notproduct(bool, TraceScavenge, false, \ 30.1509 "Trace scavenge") \ 30.1510 @@ -2312,7 +2339,7 @@ 30.1511 "Print heap layout before and after each GC") \ 30.1512 \ 30.1513 product_rw(bool, PrintHeapAtGCExtended, false, \ 30.1514 - "Prints extended information about the layout of the heap " \ 30.1515 + "Print extended information about the layout of the heap " \ 30.1516 "when -XX:+PrintHeapAtGC is set") \ 30.1517 \ 30.1518 product(bool, PrintHeapAtSIGBREAK, true, \ 30.1519 @@ -2349,45 +2376,45 @@ 30.1520 "Trace actions of the GC task threads") \ 30.1521 \ 30.1522 product(bool, PrintParallelOldGCPhaseTimes, false, \ 30.1523 - "Print the time taken by each parallel old gc phase." \ 30.1524 - "PrintGCDetails must also be enabled.") \ 30.1525 + "Print the time taken by each phase in ParallelOldGC " \ 30.1526 + "(PrintGCDetails must also be enabled)") \ 30.1527 \ 30.1528 develop(bool, TraceParallelOldGCMarkingPhase, false, \ 30.1529 - "Trace parallel old gc marking phase") \ 30.1530 + "Trace marking phase in ParallelOldGC") \ 30.1531 \ 30.1532 develop(bool, TraceParallelOldGCSummaryPhase, false, \ 30.1533 - "Trace parallel old gc summary phase") \ 30.1534 + "Trace summary phase in ParallelOldGC") \ 30.1535 \ 30.1536 develop(bool, TraceParallelOldGCCompactionPhase, false, \ 30.1537 - "Trace parallel old gc compaction phase") \ 30.1538 + "Trace compaction phase in ParallelOldGC") \ 30.1539 \ 30.1540 develop(bool, TraceParallelOldGCDensePrefix, false, \ 30.1541 - "Trace parallel old gc dense prefix computation") \ 30.1542 + "Trace dense prefix computation for ParallelOldGC") \ 30.1543 \ 30.1544 develop(bool, IgnoreLibthreadGPFault, false, \ 30.1545 "Suppress workaround for libthread GP fault") \ 30.1546 \ 30.1547 product(bool, PrintJNIGCStalls, false, \ 30.1548 - "Print diagnostic message when GC is stalled" \ 30.1549 + "Print diagnostic message when GC is stalled " \ 30.1550 "by JNI critical section") \ 30.1551 \ 30.1552 experimental(double, ObjectCountCutOffPercent, 0.5, \ 30.1553 "The percentage of the used heap that the instances of a class " \ 30.1554 - "must occupy for the class to generate a trace event.") \ 30.1555 + "must occupy for the class to generate a trace event") \ 30.1556 \ 30.1557 /* GC log rotation setting */ \ 30.1558 \ 30.1559 product(bool, UseGCLogFileRotation, false, \ 30.1560 - "Prevent large gclog file for long running app. " \ 30.1561 - "Requires -Xloggc:<filename>") \ 30.1562 + "Rotate gclog files (for long running applications). It requires "\ 30.1563 + "-Xloggc:<filename>") \ 30.1564 \ 30.1565 product(uintx, NumberOfGCLogFiles, 0, \ 30.1566 - "Number of gclog files in rotation, " \ 30.1567 - "Default: 0, no rotation") \ 30.1568 + "Number of gclog files in rotation " \ 30.1569 + "(default: 0, no rotation)") \ 30.1570 \ 30.1571 product(uintx, GCLogFileSize, 0, \ 30.1572 - "GC log file size, Default: 0 bytes, no rotation " \ 30.1573 - "Only valid with UseGCLogFileRotation") \ 30.1574 + "GC log file size (default: 0 bytes, no rotation). " \ 30.1575 + "It requires UseGCLogFileRotation") \ 30.1576 \ 30.1577 /* JVMTI heap profiling */ \ 30.1578 \ 30.1579 @@ -2464,40 +2491,40 @@ 30.1580 "Generate range checks for array accesses") \ 30.1581 \ 30.1582 develop_pd(bool, ImplicitNullChecks, \ 30.1583 - "generate code for implicit null checks") \ 30.1584 + "Generate code for implicit null checks") \ 30.1585 \ 30.1586 product(bool, PrintSafepointStatistics, false, \ 30.1587 - "print statistics about safepoint synchronization") \ 30.1588 + "Print statistics about safepoint synchronization") \ 30.1589 \ 30.1590 product(intx, PrintSafepointStatisticsCount, 300, \ 30.1591 - "total number of safepoint statistics collected " \ 30.1592 + "Total number of safepoint statistics collected " \ 30.1593 "before printing them out") \ 30.1594 \ 30.1595 product(intx, PrintSafepointStatisticsTimeout, -1, \ 30.1596 - "print safepoint statistics only when safepoint takes" \ 30.1597 - " more than PrintSafepointSatisticsTimeout in millis") \ 30.1598 + "Print safepoint statistics only when safepoint takes " \ 30.1599 + "more than PrintSafepointSatisticsTimeout in millis") \ 30.1600 \ 30.1601 product(bool, TraceSafepointCleanupTime, false, \ 30.1602 - "print the break down of clean up tasks performed during" \ 30.1603 - " safepoint") \ 30.1604 + "Print the break down of clean up tasks performed during " \ 30.1605 + "safepoint") \ 30.1606 \ 30.1607 product(bool, Inline, true, \ 30.1608 - "enable inlining") \ 30.1609 + "Enable inlining") \ 30.1610 \ 30.1611 product(bool, ClipInlining, true, \ 30.1612 - "clip inlining if aggregate method exceeds DesiredMethodLimit") \ 30.1613 + "Clip inlining if aggregate method exceeds DesiredMethodLimit") \ 30.1614 \ 30.1615 develop(bool, UseCHA, true, \ 30.1616 - "enable CHA") \ 30.1617 + "Enable CHA") \ 30.1618 \ 30.1619 product(bool, UseTypeProfile, true, \ 30.1620 "Check interpreter profile for historically monomorphic calls") \ 30.1621 \ 30.1622 notproduct(bool, TimeCompiler, false, \ 30.1623 - "time the compiler") \ 30.1624 + "Time the compiler") \ 30.1625 \ 30.1626 diagnostic(bool, PrintInlining, false, \ 30.1627 - "prints inlining optimizations") \ 30.1628 + "Print inlining optimizations") \ 30.1629 \ 30.1630 product(bool, UsePopCountInstruction, false, \ 30.1631 "Use population count instruction") \ 30.1632 @@ -2509,57 +2536,59 @@ 30.1633 "Print when methods are replaced do to recompilation") \ 30.1634 \ 30.1635 develop(bool, PrintMethodFlushing, false, \ 30.1636 - "print the nmethods being flushed") \ 30.1637 + "Print the nmethods being flushed") \ 30.1638 \ 30.1639 develop(bool, UseRelocIndex, false, \ 30.1640 - "use an index to speed random access to relocations") \ 30.1641 + "Use an index to speed random access to relocations") \ 30.1642 \ 30.1643 develop(bool, StressCodeBuffers, false, \ 30.1644 - "Exercise code buffer expansion and other rare state changes") \ 30.1645 + "Exercise code buffer expansion and other rare state changes") \ 30.1646 \ 30.1647 diagnostic(bool, DebugNonSafepoints, trueInDebug, \ 30.1648 - "Generate extra debugging info for non-safepoints in nmethods") \ 30.1649 + "Generate extra debugging information for non-safepoints in " \ 30.1650 + "nmethods") \ 30.1651 \ 30.1652 product(bool, PrintVMOptions, false, \ 30.1653 - "Print flags that appeared on the command line") \ 30.1654 + "Print flags that appeared on the command line") \ 30.1655 \ 30.1656 product(bool, IgnoreUnrecognizedVMOptions, false, \ 30.1657 - "Ignore unrecognized VM options") \ 30.1658 + "Ignore unrecognized VM options") \ 30.1659 \ 30.1660 product(bool, PrintCommandLineFlags, false, \ 30.1661 - "Print flags specified on command line or set by ergonomics") \ 30.1662 + "Print flags specified on command line or set by ergonomics") \ 30.1663 \ 30.1664 product(bool, PrintFlagsInitial, false, \ 30.1665 - "Print all VM flags before argument processing and exit VM") \ 30.1666 + "Print all VM flags before argument processing and exit VM") \ 30.1667 \ 30.1668 product(bool, PrintFlagsFinal, false, \ 30.1669 - "Print all VM flags after argument and ergonomic processing") \ 30.1670 + "Print all VM flags after argument and ergonomic processing") \ 30.1671 \ 30.1672 notproduct(bool, PrintFlagsWithComments, false, \ 30.1673 - "Print all VM flags with default values and descriptions and exit")\ 30.1674 + "Print all VM flags with default values and descriptions and " \ 30.1675 + "exit") \ 30.1676 \ 30.1677 diagnostic(bool, SerializeVMOutput, true, \ 30.1678 - "Use a mutex to serialize output to tty and LogFile") \ 30.1679 + "Use a mutex to serialize output to tty and LogFile") \ 30.1680 \ 30.1681 diagnostic(bool, DisplayVMOutput, true, \ 30.1682 - "Display all VM output on the tty, independently of LogVMOutput") \ 30.1683 + "Display all VM output on the tty, independently of LogVMOutput") \ 30.1684 \ 30.1685 diagnostic(bool, LogVMOutput, false, \ 30.1686 - "Save VM output to LogFile") \ 30.1687 + "Save VM output to LogFile") \ 30.1688 \ 30.1689 diagnostic(ccstr, LogFile, NULL, \ 30.1690 - "If LogVMOutput or LogCompilation is on, save VM output to " \ 30.1691 - "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)") \ 30.1692 + "If LogVMOutput or LogCompilation is on, save VM output to " \ 30.1693 + "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\ 30.1694 \ 30.1695 product(ccstr, ErrorFile, NULL, \ 30.1696 - "If an error occurs, save the error data to this file " \ 30.1697 - "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ 30.1698 + "If an error occurs, save the error data to this file " \ 30.1699 + "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ 30.1700 \ 30.1701 product(bool, DisplayVMOutputToStderr, false, \ 30.1702 - "If DisplayVMOutput is true, display all VM output to stderr") \ 30.1703 + "If DisplayVMOutput is true, display all VM output to stderr") \ 30.1704 \ 30.1705 product(bool, DisplayVMOutputToStdout, false, \ 30.1706 - "If DisplayVMOutput is true, display all VM output to stdout") \ 30.1707 + "If DisplayVMOutput is true, display all VM output to stdout") \ 30.1708 \ 30.1709 product(bool, UseHeavyMonitors, false, \ 30.1710 "use heavyweight instead of lightweight Java monitors") \ 30.1711 @@ -2583,7 +2612,7 @@ 30.1712 \ 30.1713 notproduct(ccstr, AbortVMOnExceptionMessage, NULL, \ 30.1714 "Call fatal if the exception pointed by AbortVMOnException " \ 30.1715 - "has this message.") \ 30.1716 + "has this message") \ 30.1717 \ 30.1718 develop(bool, DebugVtables, false, \ 30.1719 "add debugging code to vtable dispatch") \ 30.1720 @@ -2650,29 +2679,29 @@ 30.1721 \ 30.1722 /* statistics */ \ 30.1723 develop(bool, CountCompiledCalls, false, \ 30.1724 - "counts method invocations") \ 30.1725 + "Count method invocations") \ 30.1726 \ 30.1727 notproduct(bool, CountRuntimeCalls, false, \ 30.1728 - "counts VM runtime calls") \ 30.1729 + "Count VM runtime calls") \ 30.1730 \ 30.1731 develop(bool, CountJNICalls, false, \ 30.1732 - "counts jni method invocations") \ 30.1733 + "Count jni method invocations") \ 30.1734 \ 30.1735 notproduct(bool, CountJVMCalls, false, \ 30.1736 - "counts jvm method invocations") \ 30.1737 + "Count jvm method invocations") \ 30.1738 \ 30.1739 notproduct(bool, CountRemovableExceptions, false, \ 30.1740 - "count exceptions that could be replaced by branches due to " \ 30.1741 + "Count exceptions that could be replaced by branches due to " \ 30.1742 "inlining") \ 30.1743 \ 30.1744 notproduct(bool, ICMissHistogram, false, \ 30.1745 - "produce histogram of IC misses") \ 30.1746 + "Produce histogram of IC misses") \ 30.1747 \ 30.1748 notproduct(bool, PrintClassStatistics, false, \ 30.1749 - "prints class statistics at end of run") \ 30.1750 + "Print class statistics at end of run") \ 30.1751 \ 30.1752 notproduct(bool, PrintMethodStatistics, false, \ 30.1753 - "prints method statistics at end of run") \ 30.1754 + "Print method statistics at end of run") \ 30.1755 \ 30.1756 /* interpreter */ \ 30.1757 develop(bool, ClearInterpreterLocals, false, \ 30.1758 @@ -2686,7 +2715,7 @@ 30.1759 "Rewrite frequently used bytecode pairs into a single bytecode") \ 30.1760 \ 30.1761 diagnostic(bool, PrintInterpreter, false, \ 30.1762 - "Prints the generated interpreter code") \ 30.1763 + "Print the generated interpreter code") \ 30.1764 \ 30.1765 product(bool, UseInterpreter, true, \ 30.1766 "Use interpreter for non-compiled methods") \ 30.1767 @@ -2704,8 +2733,8 @@ 30.1768 "Use fast method entry code for accessor methods") \ 30.1769 \ 30.1770 product_pd(bool, UseOnStackReplacement, \ 30.1771 - "Use on stack replacement, calls runtime if invoc. counter " \ 30.1772 - "overflows in loop") \ 30.1773 + "Use on stack replacement, calls runtime if invoc. counter " \ 30.1774 + "overflows in loop") \ 30.1775 \ 30.1776 notproduct(bool, TraceOnStackReplacement, false, \ 30.1777 "Trace on stack replacement") \ 30.1778 @@ -2753,10 +2782,10 @@ 30.1779 "Trace frequency based inlining") \ 30.1780 \ 30.1781 develop_pd(bool, InlineIntrinsics, \ 30.1782 - "Inline intrinsics that can be statically resolved") \ 30.1783 + "Inline intrinsics that can be statically resolved") \ 30.1784 \ 30.1785 product_pd(bool, ProfileInterpreter, \ 30.1786 - "Profile at the bytecode level during interpretation") \ 30.1787 + "Profile at the bytecode level during interpretation") \ 30.1788 \ 30.1789 develop_pd(bool, ProfileTraps, \ 30.1790 "Profile deoptimization traps at the bytecode level") \ 30.1791 @@ -2766,7 +2795,7 @@ 30.1792 "CompileThreshold) before using the method's profile") \ 30.1793 \ 30.1794 develop(bool, PrintMethodData, false, \ 30.1795 - "Print the results of +ProfileInterpreter at end of run") \ 30.1796 + "Print the results of +ProfileInterpreter at end of run") \ 30.1797 \ 30.1798 develop(bool, VerifyDataPointer, trueInDebug, \ 30.1799 "Verify the method data pointer during interpreter profiling") \ 30.1800 @@ -2781,7 +2810,7 @@ 30.1801 \ 30.1802 /* compilation */ \ 30.1803 product(bool, UseCompiler, true, \ 30.1804 - "use compilation") \ 30.1805 + "Use Just-In-Time compilation") \ 30.1806 \ 30.1807 develop(bool, TraceCompilationPolicy, false, \ 30.1808 "Trace compilation policy") \ 30.1809 @@ -2790,20 +2819,21 @@ 30.1810 "Time the compilation policy") \ 30.1811 \ 30.1812 product(bool, UseCounterDecay, true, \ 30.1813 - "adjust recompilation counters") \ 30.1814 + "Adjust recompilation counters") \ 30.1815 \ 30.1816 develop(intx, CounterHalfLifeTime, 30, \ 30.1817 - "half-life time of invocation counters (in secs)") \ 30.1818 + "Half-life time of invocation counters (in seconds)") \ 30.1819 \ 30.1820 develop(intx, CounterDecayMinIntervalLength, 500, \ 30.1821 - "Min. ms. between invocation of CounterDecay") \ 30.1822 + "The minimum interval (in milliseconds) between invocation of " \ 30.1823 + "CounterDecay") \ 30.1824 \ 30.1825 product(bool, AlwaysCompileLoopMethods, false, \ 30.1826 - "when using recompilation, never interpret methods " \ 30.1827 + "When using recompilation, never interpret methods " \ 30.1828 "containing loops") \ 30.1829 \ 30.1830 product(bool, DontCompileHugeMethods, true, \ 30.1831 - "don't compile methods > HugeMethodLimit") \ 30.1832 + "Do not compile methods > HugeMethodLimit") \ 30.1833 \ 30.1834 /* Bytecode escape analysis estimation. */ \ 30.1835 product(bool, EstimateArgEscape, true, \ 30.1836 @@ -2813,10 +2843,10 @@ 30.1837 "How much tracing to do of bytecode escape analysis estimates") \ 30.1838 \ 30.1839 product(intx, MaxBCEAEstimateLevel, 5, \ 30.1840 - "Maximum number of nested calls that are analyzed by BC EA.") \ 30.1841 + "Maximum number of nested calls that are analyzed by BC EA") \ 30.1842 \ 30.1843 product(intx, MaxBCEAEstimateSize, 150, \ 30.1844 - "Maximum bytecode size of a method to be analyzed by BC EA.") \ 30.1845 + "Maximum bytecode size of a method to be analyzed by BC EA") \ 30.1846 \ 30.1847 product(intx, AllocatePrefetchStyle, 1, \ 30.1848 "0 = no prefetch, " \ 30.1849 @@ -2831,7 +2861,8 @@ 30.1850 "Number of lines to prefetch ahead of array allocation pointer") \ 30.1851 \ 30.1852 product(intx, AllocateInstancePrefetchLines, 1, \ 30.1853 - "Number of lines to prefetch ahead of instance allocation pointer") \ 30.1854 + "Number of lines to prefetch ahead of instance allocation " \ 30.1855 + "pointer") \ 30.1856 \ 30.1857 product(intx, AllocatePrefetchStepSize, 16, \ 30.1858 "Step size in bytes of sequential prefetch instructions") \ 30.1859 @@ -2851,8 +2882,8 @@ 30.1860 "(0 means off)") \ 30.1861 \ 30.1862 product(intx, MaxJavaStackTraceDepth, 1024, \ 30.1863 - "Max. no. of lines in the stack trace for Java exceptions " \ 30.1864 - "(0 means all)") \ 30.1865 + "The maximum number of lines in the stack trace for Java " \ 30.1866 + "exceptions (0 means all)") \ 30.1867 \ 30.1868 NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \ 30.1869 "Guarantee a safepoint (at least) every so many milliseconds " \ 30.1870 @@ -2876,10 +2907,10 @@ 30.1871 "result in more aggressive sweeping") \ 30.1872 \ 30.1873 notproduct(bool, LogSweeper, false, \ 30.1874 - "Keep a ring buffer of sweeper activity") \ 30.1875 + "Keep a ring buffer of sweeper activity") \ 30.1876 \ 30.1877 notproduct(intx, SweeperLogEntries, 1024, \ 30.1878 - "Number of records in the ring buffer of sweeper activity") \ 30.1879 + "Number of records in the ring buffer of sweeper activity") \ 30.1880 \ 30.1881 notproduct(intx, MemProfilingInterval, 500, \ 30.1882 "Time between each invocation of the MemProfiler") \ 30.1883 @@ -2922,34 +2953,35 @@ 30.1884 "less than this") \ 30.1885 \ 30.1886 product(intx, MaxInlineSize, 35, \ 30.1887 - "maximum bytecode size of a method to be inlined") \ 30.1888 + "The maximum bytecode size of a method to be inlined") \ 30.1889 \ 30.1890 product_pd(intx, FreqInlineSize, \ 30.1891 - "maximum bytecode size of a frequent method to be inlined") \ 30.1892 + "The maximum bytecode size of a frequent method to be inlined") \ 30.1893 \ 30.1894 product(intx, MaxTrivialSize, 6, \ 30.1895 - "maximum bytecode size of a trivial method to be inlined") \ 30.1896 + "The maximum bytecode size of a trivial method to be inlined") \ 30.1897 \ 30.1898 product(intx, MinInliningThreshold, 250, \ 30.1899 - "min. invocation count a method needs to have to be inlined") \ 30.1900 + "The minimum invocation count a method needs to have to be " \ 30.1901 + "inlined") \ 30.1902 \ 30.1903 develop(intx, MethodHistogramCutoff, 100, \ 30.1904 - "cutoff value for method invoc. histogram (+CountCalls)") \ 30.1905 + "The cutoff value for method invocation histogram (+CountCalls)") \ 30.1906 \ 30.1907 develop(intx, ProfilerNumberOfInterpretedMethods, 25, \ 30.1908 - "# of interpreted methods to show in profile") \ 30.1909 + "Number of interpreted methods to show in profile") \ 30.1910 \ 30.1911 develop(intx, ProfilerNumberOfCompiledMethods, 25, \ 30.1912 - "# of compiled methods to show in profile") \ 30.1913 + "Number of compiled methods to show in profile") \ 30.1914 \ 30.1915 develop(intx, ProfilerNumberOfStubMethods, 25, \ 30.1916 - "# of stub methods to show in profile") \ 30.1917 + "Number of stub methods to show in profile") \ 30.1918 \ 30.1919 develop(intx, ProfilerNumberOfRuntimeStubNodes, 25, \ 30.1920 - "# of runtime stub nodes to show in profile") \ 30.1921 + "Number of runtime stub nodes to show in profile") \ 30.1922 \ 30.1923 product(intx, ProfileIntervalsTicks, 100, \ 30.1924 - "# of ticks between printing of interval profile " \ 30.1925 + "Number of ticks between printing of interval profile " \ 30.1926 "(+ProfileIntervals)") \ 30.1927 \ 30.1928 notproduct(intx, ScavengeALotInterval, 1, \ 30.1929 @@ -2970,7 +3002,7 @@ 30.1930 \ 30.1931 develop(intx, MinSleepInterval, 1, \ 30.1932 "Minimum sleep() interval (milliseconds) when " \ 30.1933 - "ConvertSleepToYield is off (used for SOLARIS)") \ 30.1934 + "ConvertSleepToYield is off (used for Solaris)") \ 30.1935 \ 30.1936 develop(intx, ProfilerPCTickThreshold, 15, \ 30.1937 "Number of ticks in a PC buckets to be a hotspot") \ 30.1938 @@ -2985,22 +3017,22 @@ 30.1939 "Mark nmethods non-entrant at registration") \ 30.1940 \ 30.1941 diagnostic(intx, MallocVerifyInterval, 0, \ 30.1942 - "if non-zero, verify C heap after every N calls to " \ 30.1943 + "If non-zero, verify C heap after every N calls to " \ 30.1944 "malloc/realloc/free") \ 30.1945 \ 30.1946 diagnostic(intx, MallocVerifyStart, 0, \ 30.1947 - "if non-zero, start verifying C heap after Nth call to " \ 30.1948 + "If non-zero, start verifying C heap after Nth call to " \ 30.1949 "malloc/realloc/free") \ 30.1950 \ 30.1951 diagnostic(uintx, MallocMaxTestWords, 0, \ 30.1952 - "if non-zero, max # of Words that malloc/realloc can allocate " \ 30.1953 - "(for testing only)") \ 30.1954 + "If non-zero, maximum number of words that malloc/realloc can " \ 30.1955 + "allocate (for testing only)") \ 30.1956 \ 30.1957 product(intx, TypeProfileWidth, 2, \ 30.1958 - "number of receiver types to record in call/cast profile") \ 30.1959 + "Number of receiver types to record in call/cast profile") \ 30.1960 \ 30.1961 develop(intx, BciProfileWidth, 2, \ 30.1962 - "number of return bci's to record in ret profile") \ 30.1963 + "Number of return bci's to record in ret profile") \ 30.1964 \ 30.1965 product(intx, PerMethodRecompilationCutoff, 400, \ 30.1966 "After recompiling N times, stay in the interpreter (-1=>'Inf')") \ 30.1967 @@ -3067,7 +3099,7 @@ 30.1968 "Percentage of Eden that can be wasted") \ 30.1969 \ 30.1970 product(uintx, TLABRefillWasteFraction, 64, \ 30.1971 - "Max TLAB waste at a refill (internal fragmentation)") \ 30.1972 + "Maximum TLAB waste at a refill (internal fragmentation)") \ 30.1973 \ 30.1974 product(uintx, TLABWasteIncrement, 4, \ 30.1975 "Increment allowed waste at slow allocation") \ 30.1976 @@ -3076,7 +3108,7 @@ 30.1977 "Ratio of eden/survivor space size") \ 30.1978 \ 30.1979 product(uintx, NewRatio, 2, \ 30.1980 - "Ratio of new/old generation sizes") \ 30.1981 + "Ratio of old/new generation sizes") \ 30.1982 \ 30.1983 product_pd(uintx, NewSizeThreadIncrease, \ 30.1984 "Additional size added to desired new generation size per " \ 30.1985 @@ -3093,28 +3125,30 @@ 30.1986 "class pointers are used") \ 30.1987 \ 30.1988 product(uintx, MinHeapFreeRatio, 40, \ 30.1989 - "Min percentage of heap free after GC to avoid expansion") \ 30.1990 + "The minimum percentage of heap free after GC to avoid expansion")\ 30.1991 \ 30.1992 product(uintx, MaxHeapFreeRatio, 70, \ 30.1993 - "Max percentage of heap free after GC to avoid shrinking") \ 30.1994 + "The maximum percentage of heap free after GC to avoid shrinking")\ 30.1995 \ 30.1996 product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ 30.1997 "Number of milliseconds per MB of free space in the heap") \ 30.1998 \ 30.1999 product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K), \ 30.2000 - "Min change in heap space due to GC (in bytes)") \ 30.2001 + "The minimum change in heap space due to GC (in bytes)") \ 30.2002 \ 30.2003 product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K), \ 30.2004 - "Min expansion of Metaspace (in bytes)") \ 30.2005 + "The minimum expansion of Metaspace (in bytes)") \ 30.2006 \ 30.2007 product(uintx, MinMetaspaceFreeRatio, 40, \ 30.2008 - "Min percentage of Metaspace free after GC to avoid expansion") \ 30.2009 + "The minimum percentage of Metaspace free after GC to avoid " \ 30.2010 + "expansion") \ 30.2011 \ 30.2012 product(uintx, MaxMetaspaceFreeRatio, 70, \ 30.2013 - "Max percentage of Metaspace free after GC to avoid shrinking") \ 30.2014 + "The maximum percentage of Metaspace free after GC to avoid " \ 30.2015 + "shrinking") \ 30.2016 \ 30.2017 product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \ 30.2018 - "Max expansion of Metaspace without full GC (in bytes)") \ 30.2019 + "The maximum expansion of Metaspace without full GC (in bytes)") \ 30.2020 \ 30.2021 product(uintx, QueuedAllocationWarningCount, 0, \ 30.2022 "Number of times an allocation that queues behind a GC " \ 30.2023 @@ -3136,13 +3170,14 @@ 30.2024 "Desired percentage of survivor space used after scavenge") \ 30.2025 \ 30.2026 product(uintx, MarkSweepDeadRatio, 5, \ 30.2027 - "Percentage (0-100) of the old gen allowed as dead wood." \ 30.2028 - "Serial mark sweep treats this as both the min and max value." \ 30.2029 - "CMS uses this value only if it falls back to mark sweep." \ 30.2030 - "Par compact uses a variable scale based on the density of the" \ 30.2031 - "generation and treats this as the max value when the heap is" \ 30.2032 - "either completely full or completely empty. Par compact also" \ 30.2033 - "has a smaller default value; see arguments.cpp.") \ 30.2034 + "Percentage (0-100) of the old gen allowed as dead wood. " \ 30.2035 + "Serial mark sweep treats this as both the minimum and maximum " \ 30.2036 + "value. " \ 30.2037 + "CMS uses this value only if it falls back to mark sweep. " \ 30.2038 + "Par compact uses a variable scale based on the density of the " \ 30.2039 + "generation and treats this as the maximum value when the heap " \ 30.2040 + "is either completely full or completely empty. Par compact " \ 30.2041 + "also has a smaller default value; see arguments.cpp.") \ 30.2042 \ 30.2043 product(uintx, MarkSweepAlwaysCompactCount, 4, \ 30.2044 "How often should we fully compact the heap (ignoring the dead " \ 30.2045 @@ -3161,27 +3196,27 @@ 30.2046 "Census for CMS' FreeListSpace") \ 30.2047 \ 30.2048 develop(uintx, GCExpandToAllocateDelayMillis, 0, \ 30.2049 - "Delay in ms between expansion and allocation") \ 30.2050 + "Delay between expansion and allocation (in milliseconds)") \ 30.2051 \ 30.2052 develop(uintx, GCWorkerDelayMillis, 0, \ 30.2053 - "Delay in ms in scheduling GC workers") \ 30.2054 + "Delay in scheduling GC workers (in milliseconds)") \ 30.2055 \ 30.2056 product(intx, DeferThrSuspendLoopCount, 4000, \ 30.2057 "(Unstable) Number of times to iterate in safepoint loop " \ 30.2058 - " before blocking VM threads ") \ 30.2059 + "before blocking VM threads ") \ 30.2060 \ 30.2061 product(intx, DeferPollingPageLoopCount, -1, \ 30.2062 "(Unsafe,Unstable) Number of iterations in safepoint loop " \ 30.2063 "before changing safepoint polling page to RO ") \ 30.2064 \ 30.2065 - product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ 30.2066 + product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ 30.2067 \ 30.2068 product(bool, PSChunkLargeArrays, true, \ 30.2069 - "true: process large arrays in chunks") \ 30.2070 + "Process large arrays in chunks") \ 30.2071 \ 30.2072 product(uintx, GCDrainStackTargetSize, 64, \ 30.2073 - "how many entries we'll try to leave on the stack during " \ 30.2074 - "parallel GC") \ 30.2075 + "Number of entries we will try to leave on the stack " \ 30.2076 + "during parallel gc") \ 30.2077 \ 30.2078 /* stack parameters */ \ 30.2079 product_pd(intx, StackYellowPages, \ 30.2080 @@ -3191,8 +3226,8 @@ 30.2081 "Number of red zone (unrecoverable overflows) pages") \ 30.2082 \ 30.2083 product_pd(intx, StackShadowPages, \ 30.2084 - "Number of shadow zone (for overflow checking) pages" \ 30.2085 - " this should exceed the depth of the VM and native call stack") \ 30.2086 + "Number of shadow zone (for overflow checking) pages " \ 30.2087 + "this should exceed the depth of the VM and native call stack") \ 30.2088 \ 30.2089 product_pd(intx, ThreadStackSize, \ 30.2090 "Thread Stack Size (in Kbytes)") \ 30.2091 @@ -3232,16 +3267,16 @@ 30.2092 "Reserved code cache size (in bytes) - maximum code cache size") \ 30.2093 \ 30.2094 product(uintx, CodeCacheMinimumFreeSpace, 500*K, \ 30.2095 - "When less than X space left, we stop compiling.") \ 30.2096 + "When less than X space left, we stop compiling") \ 30.2097 \ 30.2098 product_pd(uintx, CodeCacheExpansionSize, \ 30.2099 "Code cache expansion size (in bytes)") \ 30.2100 \ 30.2101 develop_pd(uintx, CodeCacheMinBlockLength, \ 30.2102 - "Minimum number of segments in a code cache block.") \ 30.2103 + "Minimum number of segments in a code cache block") \ 30.2104 \ 30.2105 notproduct(bool, ExitOnFullCodeCache, false, \ 30.2106 - "Exit the VM if we fill the code cache.") \ 30.2107 + "Exit the VM if we fill the code cache") \ 30.2108 \ 30.2109 product(bool, UseCodeCacheFlushing, true, \ 30.2110 "Attempt to clean the code cache before shutting off compiler") \ 30.2111 @@ -3252,31 +3287,31 @@ 30.2112 "switch") \ 30.2113 \ 30.2114 develop(intx, StopInterpreterAt, 0, \ 30.2115 - "Stops interpreter execution at specified bytecode number") \ 30.2116 + "Stop interpreter execution at specified bytecode number") \ 30.2117 \ 30.2118 develop(intx, TraceBytecodesAt, 0, \ 30.2119 - "Traces bytecodes starting with specified bytecode number") \ 30.2120 + "Trace bytecodes starting with specified bytecode number") \ 30.2121 \ 30.2122 /* compiler interface */ \ 30.2123 develop(intx, CIStart, 0, \ 30.2124 - "the id of the first compilation to permit") \ 30.2125 + "The id of the first compilation to permit") \ 30.2126 \ 30.2127 develop(intx, CIStop, -1, \ 30.2128 - "the id of the last compilation to permit") \ 30.2129 + "The id of the last compilation to permit") \ 30.2130 \ 30.2131 develop(intx, CIStartOSR, 0, \ 30.2132 - "the id of the first osr compilation to permit " \ 30.2133 + "The id of the first osr compilation to permit " \ 30.2134 "(CICountOSR must be on)") \ 30.2135 \ 30.2136 develop(intx, CIStopOSR, -1, \ 30.2137 - "the id of the last osr compilation to permit " \ 30.2138 + "The id of the last osr compilation to permit " \ 30.2139 "(CICountOSR must be on)") \ 30.2140 \ 30.2141 develop(intx, CIBreakAtOSR, -1, \ 30.2142 - "id of osr compilation to break at") \ 30.2143 + "The id of osr compilation to break at") \ 30.2144 \ 30.2145 develop(intx, CIBreakAt, -1, \ 30.2146 - "id of compilation to break at") \ 30.2147 + "The id of compilation to break at") \ 30.2148 \ 30.2149 product(ccstrlist, CompileOnly, "", \ 30.2150 "List of methods (pkg/class.name) to restrict compilation to") \ 30.2151 @@ -3295,11 +3330,11 @@ 30.2152 "[default: ./replay_pid%p.log] (%p replaced with pid)") \ 30.2153 \ 30.2154 develop(intx, ReplaySuppressInitializers, 2, \ 30.2155 - "Controls handling of class initialization during replay" \ 30.2156 - "0 - don't do anything special" \ 30.2157 - "1 - treat all class initializers as empty" \ 30.2158 - "2 - treat class initializers for application classes as empty" \ 30.2159 - "3 - allow all class initializers to run during bootstrap but" \ 30.2160 + "Control handling of class initialization during replay: " \ 30.2161 + "0 - don't do anything special; " \ 30.2162 + "1 - treat all class initializers as empty; " \ 30.2163 + "2 - treat class initializers for application classes as empty; " \ 30.2164 + "3 - allow all class initializers to run during bootstrap but " \ 30.2165 " pretend they are empty after starting replay") \ 30.2166 \ 30.2167 develop(bool, ReplayIgnoreInitErrors, false, \ 30.2168 @@ -3328,14 +3363,15 @@ 30.2169 "0 : Normal. "\ 30.2170 " VM chooses priorities that are appropriate for normal "\ 30.2171 " applications. On Solaris NORM_PRIORITY and above are mapped "\ 30.2172 - " to normal native priority. Java priorities below NORM_PRIORITY"\ 30.2173 - " map to lower native priority values. On Windows applications"\ 30.2174 - " are allowed to use higher native priorities. However, with "\ 30.2175 - " ThreadPriorityPolicy=0, VM will not use the highest possible"\ 30.2176 - " native priority, THREAD_PRIORITY_TIME_CRITICAL, as it may "\ 30.2177 - " interfere with system threads. On Linux thread priorities "\ 30.2178 - " are ignored because the OS does not support static priority "\ 30.2179 - " in SCHED_OTHER scheduling class which is the only choice for"\ 30.2180 + " to normal native priority. Java priorities below " \ 30.2181 + " NORM_PRIORITY map to lower native priority values. On "\ 30.2182 + " Windows applications are allowed to use higher native "\ 30.2183 + " priorities. However, with ThreadPriorityPolicy=0, VM will "\ 30.2184 + " not use the highest possible native priority, "\ 30.2185 + " THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with "\ 30.2186 + " system threads. On Linux thread priorities are ignored "\ 30.2187 + " because the OS does not support static priority in "\ 30.2188 + " SCHED_OTHER scheduling class which is the only choice for "\ 30.2189 " non-root, non-realtime applications. "\ 30.2190 "1 : Aggressive. "\ 30.2191 " Java thread priorities map over to the entire range of "\ 30.2192 @@ -3366,16 +3402,35 @@ 30.2193 product(bool, VMThreadHintNoPreempt, false, \ 30.2194 "(Solaris only) Give VM thread an extra quanta") \ 30.2195 \ 30.2196 - product(intx, JavaPriority1_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 30.2197 - product(intx, JavaPriority2_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 30.2198 - product(intx, JavaPriority3_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 30.2199 - product(intx, JavaPriority4_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 30.2200 - product(intx, JavaPriority5_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 30.2201 - product(intx, JavaPriority6_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 30.2202 - product(intx, JavaPriority7_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 30.2203 - product(intx, JavaPriority8_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 30.2204 - product(intx, JavaPriority9_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 30.2205 - product(intx, JavaPriority10_To_OSPriority,-1, "Map Java priorities to OS priorities") \ 30.2206 + product(intx, JavaPriority1_To_OSPriority, -1, \ 30.2207 + "Map Java priorities to OS priorities") \ 30.2208 + \ 30.2209 + product(intx, JavaPriority2_To_OSPriority, -1, \ 30.2210 + "Map Java priorities to OS priorities") \ 30.2211 + \ 30.2212 + product(intx, JavaPriority3_To_OSPriority, -1, \ 30.2213 + "Map Java priorities to OS priorities") \ 30.2214 + \ 30.2215 + product(intx, JavaPriority4_To_OSPriority, -1, \ 30.2216 + "Map Java priorities to OS priorities") \ 30.2217 + \ 30.2218 + product(intx, JavaPriority5_To_OSPriority, -1, \ 30.2219 + "Map Java priorities to OS priorities") \ 30.2220 + \ 30.2221 + product(intx, JavaPriority6_To_OSPriority, -1, \ 30.2222 + "Map Java priorities to OS priorities") \ 30.2223 + \ 30.2224 + product(intx, JavaPriority7_To_OSPriority, -1, \ 30.2225 + "Map Java priorities to OS priorities") \ 30.2226 + \ 30.2227 + product(intx, JavaPriority8_To_OSPriority, -1, \ 30.2228 + "Map Java priorities to OS priorities") \ 30.2229 + \ 30.2230 + product(intx, JavaPriority9_To_OSPriority, -1, \ 30.2231 + "Map Java priorities to OS priorities") \ 30.2232 + \ 30.2233 + product(intx, JavaPriority10_To_OSPriority,-1, \ 30.2234 + "Map Java priorities to OS priorities") \ 30.2235 \ 30.2236 experimental(bool, UseCriticalJavaThreadPriority, false, \ 30.2237 "Java thread priority 10 maps to critical scheduling priority") \ 30.2238 @@ -3406,37 +3461,38 @@ 30.2239 "Used with +TraceLongCompiles") \ 30.2240 \ 30.2241 product(intx, StarvationMonitorInterval, 200, \ 30.2242 - "Pause between each check in ms") \ 30.2243 + "Pause between each check (in milliseconds)") \ 30.2244 \ 30.2245 /* recompilation */ \ 30.2246 product_pd(intx, CompileThreshold, \ 30.2247 "number of interpreted method invocations before (re-)compiling") \ 30.2248 \ 30.2249 product_pd(intx, BackEdgeThreshold, \ 30.2250 - "Interpreter Back edge threshold at which an OSR compilation is invoked")\ 30.2251 + "Interpreter Back edge threshold at which an OSR compilation is " \ 30.2252 + "invoked") \ 30.2253 \ 30.2254 product(intx, Tier0InvokeNotifyFreqLog, 7, \ 30.2255 - "Interpreter (tier 0) invocation notification frequency.") \ 30.2256 + "Interpreter (tier 0) invocation notification frequency") \ 30.2257 \ 30.2258 product(intx, Tier2InvokeNotifyFreqLog, 11, \ 30.2259 - "C1 without MDO (tier 2) invocation notification frequency.") \ 30.2260 + "C1 without MDO (tier 2) invocation notification frequency") \ 30.2261 \ 30.2262 product(intx, Tier3InvokeNotifyFreqLog, 10, \ 30.2263 "C1 with MDO profiling (tier 3) invocation notification " \ 30.2264 - "frequency.") \ 30.2265 + "frequency") \ 30.2266 \ 30.2267 product(intx, Tier23InlineeNotifyFreqLog, 20, \ 30.2268 "Inlinee invocation (tiers 2 and 3) notification frequency") \ 30.2269 \ 30.2270 product(intx, Tier0BackedgeNotifyFreqLog, 10, \ 30.2271 - "Interpreter (tier 0) invocation notification frequency.") \ 30.2272 + "Interpreter (tier 0) invocation notification frequency") \ 30.2273 \ 30.2274 product(intx, Tier2BackedgeNotifyFreqLog, 14, \ 30.2275 - "C1 without MDO (tier 2) invocation notification frequency.") \ 30.2276 + "C1 without MDO (tier 2) invocation notification frequency") \ 30.2277 \ 30.2278 product(intx, Tier3BackedgeNotifyFreqLog, 13, \ 30.2279 "C1 with MDO profiling (tier 3) invocation notification " \ 30.2280 - "frequency.") \ 30.2281 + "frequency") \ 30.2282 \ 30.2283 product(intx, Tier2CompileThreshold, 0, \ 30.2284 "threshold at which tier 2 compilation is invoked") \ 30.2285 @@ -3453,7 +3509,7 @@ 30.2286 \ 30.2287 product(intx, Tier3CompileThreshold, 2000, \ 30.2288 "Threshold at which tier 3 compilation is invoked (invocation " \ 30.2289 - "minimum must be satisfied.") \ 30.2290 + "minimum must be satisfied") \ 30.2291 \ 30.2292 product(intx, Tier3BackEdgeThreshold, 60000, \ 30.2293 "Back edge threshold at which tier 3 OSR compilation is invoked") \ 30.2294 @@ -3467,7 +3523,7 @@ 30.2295 \ 30.2296 product(intx, Tier4CompileThreshold, 15000, \ 30.2297 "Threshold at which tier 4 compilation is invoked (invocation " \ 30.2298 - "minimum must be satisfied.") \ 30.2299 + "minimum must be satisfied") \ 30.2300 \ 30.2301 product(intx, Tier4BackEdgeThreshold, 40000, \ 30.2302 "Back edge threshold at which tier 4 OSR compilation is invoked") \ 30.2303 @@ -3496,12 +3552,12 @@ 30.2304 "Stop at given compilation level") \ 30.2305 \ 30.2306 product(intx, Tier0ProfilingStartPercentage, 200, \ 30.2307 - "Start profiling in interpreter if the counters exceed tier 3" \ 30.2308 + "Start profiling in interpreter if the counters exceed tier 3 " \ 30.2309 "thresholds by the specified percentage") \ 30.2310 \ 30.2311 product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \ 30.2312 - "Increase the compile threshold for C1 compilation if the code" \ 30.2313 - "cache is filled by the specified percentage.") \ 30.2314 + "Increase the compile threshold for C1 compilation if the code " \ 30.2315 + "cache is filled by the specified percentage") \ 30.2316 \ 30.2317 product(intx, TieredRateUpdateMinTime, 1, \ 30.2318 "Minimum rate sampling interval (in milliseconds)") \ 30.2319 @@ -3516,24 +3572,26 @@ 30.2320 "Print tiered events notifications") \ 30.2321 \ 30.2322 product_pd(intx, OnStackReplacePercentage, \ 30.2323 - "NON_TIERED number of method invocations/branches (expressed as %"\ 30.2324 - "of CompileThreshold) before (re-)compiling OSR code") \ 30.2325 + "NON_TIERED number of method invocations/branches (expressed as " \ 30.2326 + "% of CompileThreshold) before (re-)compiling OSR code") \ 30.2327 \ 30.2328 product(intx, InterpreterProfilePercentage, 33, \ 30.2329 - "NON_TIERED number of method invocations/branches (expressed as %"\ 30.2330 - "of CompileThreshold) before profiling in the interpreter") \ 30.2331 + "NON_TIERED number of method invocations/branches (expressed as " \ 30.2332 + "% of CompileThreshold) before profiling in the interpreter") \ 30.2333 \ 30.2334 develop(intx, MaxRecompilationSearchLength, 10, \ 30.2335 - "max. # frames to inspect searching for recompilee") \ 30.2336 + "The maximum number of frames to inspect when searching for " \ 30.2337 + "recompilee") \ 30.2338 \ 30.2339 develop(intx, MaxInterpretedSearchLength, 3, \ 30.2340 - "max. # interp. frames to skip when searching for recompilee") \ 30.2341 + "The maximum number of interpreted frames to skip when searching "\ 30.2342 + "for recompilee") \ 30.2343 \ 30.2344 develop(intx, DesiredMethodLimit, 8000, \ 30.2345 - "desired max. method size (in bytecodes) after inlining") \ 30.2346 + "The desired maximum method size (in bytecodes) after inlining") \ 30.2347 \ 30.2348 develop(intx, HugeMethodLimit, 8000, \ 30.2349 - "don't compile methods larger than this if " \ 30.2350 + "Don't compile methods larger than this if " \ 30.2351 "+DontCompileHugeMethods") \ 30.2352 \ 30.2353 /* New JDK 1.4 reflection implementation */ \ 30.2354 @@ -3555,7 +3613,7 @@ 30.2355 "in InvocationTargetException. See 6531596") \ 30.2356 \ 30.2357 develop(bool, VerifyLambdaBytecodes, false, \ 30.2358 - "Force verification of jdk 8 lambda metafactory bytecodes.") \ 30.2359 + "Force verification of jdk 8 lambda metafactory bytecodes") \ 30.2360 \ 30.2361 develop(intx, FastSuperclassLimit, 8, \ 30.2362 "Depth of hardwired instanceof accelerator array") \ 30.2363 @@ -3579,18 +3637,19 @@ 30.2364 /* flags for performance data collection */ \ 30.2365 \ 30.2366 product(bool, UsePerfData, falseInEmbedded, \ 30.2367 - "Flag to disable jvmstat instrumentation for performance testing" \ 30.2368 - "and problem isolation purposes.") \ 30.2369 + "Flag to disable jvmstat instrumentation for performance testing "\ 30.2370 + "and problem isolation purposes") \ 30.2371 \ 30.2372 product(bool, PerfDataSaveToFile, false, \ 30.2373 "Save PerfData memory to hsperfdata_<pid> file on exit") \ 30.2374 \ 30.2375 product(ccstr, PerfDataSaveFile, NULL, \ 30.2376 - "Save PerfData memory to the specified absolute pathname," \ 30.2377 - "%p in the file name if present will be replaced by pid") \ 30.2378 - \ 30.2379 - product(intx, PerfDataSamplingInterval, 50 /*ms*/, \ 30.2380 - "Data sampling interval in milliseconds") \ 30.2381 + "Save PerfData memory to the specified absolute pathname. " \ 30.2382 + "The string %p in the file name (if present) " \ 30.2383 + "will be replaced by pid") \ 30.2384 + \ 30.2385 + product(intx, PerfDataSamplingInterval, 50, \ 30.2386 + "Data sampling interval (in milliseconds)") \ 30.2387 \ 30.2388 develop(bool, PerfTraceDataCreation, false, \ 30.2389 "Trace creation of Performance Data Entries") \ 30.2390 @@ -3615,7 +3674,7 @@ 30.2391 "Bypass Win32 file system criteria checks (Windows Only)") \ 30.2392 \ 30.2393 product(intx, UnguardOnExecutionViolation, 0, \ 30.2394 - "Unguard page and retry on no-execute fault (Win32 only)" \ 30.2395 + "Unguard page and retry on no-execute fault (Win32 only) " \ 30.2396 "0=off, 1=conservative, 2=aggressive") \ 30.2397 \ 30.2398 /* Serviceability Support */ \ 30.2399 @@ -3624,7 +3683,7 @@ 30.2400 "Create JMX Management Server") \ 30.2401 \ 30.2402 product(bool, DisableAttachMechanism, false, \ 30.2403 - "Disable mechanism that allows tools to attach to this VM") \ 30.2404 + "Disable mechanism that allows tools to attach to this VM") \ 30.2405 \ 30.2406 product(bool, StartAttachListener, false, \ 30.2407 "Always start Attach Listener at VM startup") \ 30.2408 @@ -3647,9 +3706,9 @@ 30.2409 "Require shared spaces for metadata") \ 30.2410 \ 30.2411 product(bool, DumpSharedSpaces, false, \ 30.2412 - "Special mode: JVM reads a class list, loads classes, builds " \ 30.2413 - "shared spaces, and dumps the shared spaces to a file to be " \ 30.2414 - "used in future JVM runs.") \ 30.2415 + "Special mode: JVM reads a class list, loads classes, builds " \ 30.2416 + "shared spaces, and dumps the shared spaces to a file to be " \ 30.2417 + "used in future JVM runs") \ 30.2418 \ 30.2419 product(bool, PrintSharedSpaces, false, \ 30.2420 "Print usage of shared spaces") \ 30.2421 @@ -3722,7 +3781,7 @@ 30.2422 "Relax the access control checks in the verifier") \ 30.2423 \ 30.2424 diagnostic(bool, PrintDTraceDOF, false, \ 30.2425 - "Print the DTrace DOF passed to the system for JSDT probes") \ 30.2426 + "Print the DTrace DOF passed to the system for JSDT probes") \ 30.2427 \ 30.2428 product(uintx, StringTableSize, defaultStringTableSize, \ 30.2429 "Number of buckets in the interned String table") \ 30.2430 @@ -3738,8 +3797,8 @@ 30.2431 \ 30.2432 product(bool, UseVMInterruptibleIO, false, \ 30.2433 "(Unstable, Solaris-specific) Thread interrupt before or with " \ 30.2434 - "EINTR for I/O operations results in OS_INTRPT. The default value"\ 30.2435 - " of this flag is true for JDK 6 and earlier") \ 30.2436 + "EINTR for I/O operations results in OS_INTRPT. The default " \ 30.2437 + "value of this flag is true for JDK 6 and earlier") \ 30.2438 \ 30.2439 diagnostic(bool, WhiteBoxAPI, false, \ 30.2440 "Enable internal testing APIs") \ 30.2441 @@ -3760,6 +3819,7 @@ 30.2442 \ 30.2443 product(bool, EnableTracing, false, \ 30.2444 "Enable event-based tracing") \ 30.2445 + \ 30.2446 product(bool, UseLockedTracing, false, \ 30.2447 "Use locked-tracing when doing event-based tracing") 30.2448
31.1 --- a/src/share/vm/runtime/virtualspace.cpp Fri Oct 11 17:08:22 2013 -0400 31.2 +++ b/src/share/vm/runtime/virtualspace.cpp Fri Oct 11 22:22:19 2013 -0400 31.3 @@ -368,8 +368,15 @@ 31.4 31.5 31.6 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { 31.7 + const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1); 31.8 + return initialize_with_granularity(rs, committed_size, max_commit_granularity); 31.9 +} 31.10 + 31.11 +bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { 31.12 if(!rs.is_reserved()) return false; // allocation failed. 31.13 assert(_low_boundary == NULL, "VirtualSpace already initialized"); 31.14 + assert(max_commit_granularity > 0, "Granularity must be non-zero."); 31.15 + 31.16 _low_boundary = rs.base(); 31.17 _high_boundary = low_boundary() + rs.size(); 31.18 31.19 @@ -390,7 +397,7 @@ 31.20 // No attempt is made to force large page alignment at the very top and 31.21 // bottom of the space if they are not aligned so already. 31.22 _lower_alignment = os::vm_page_size(); 31.23 - _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1); 31.24 + _middle_alignment = max_commit_granularity; 31.25 _upper_alignment = os::vm_page_size(); 31.26 31.27 // End of each region 31.28 @@ -966,17 +973,52 @@ 31.29 31.30 31.31 class TestVirtualSpace : AllStatic { 31.32 + enum TestLargePages { 31.33 + Default, 31.34 + Disable, 31.35 + Reserve, 31.36 + Commit 31.37 + }; 31.38 + 31.39 + static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) { 31.40 + switch(mode) { 31.41 + default: 31.42 + case Default: 31.43 + case Reserve: 31.44 + return ReservedSpace(reserve_size_aligned); 31.45 + case Disable: 31.46 + case Commit: 31.47 + return ReservedSpace(reserve_size_aligned, 31.48 + os::vm_allocation_granularity(), 31.49 + /* large */ false, /* exec */ false); 31.50 + } 31.51 + } 31.52 + 31.53 + static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) { 31.54 + switch(mode) { 31.55 + default: 31.56 + case Default: 31.57 + case Reserve: 31.58 + return vs.initialize(rs, 0); 31.59 + case Disable: 31.60 + return vs.initialize_with_granularity(rs, 0, os::vm_page_size()); 31.61 + case Commit: 31.62 + return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1)); 31.63 + } 31.64 + } 31.65 + 31.66 public: 31.67 - static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) { 31.68 + static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, 31.69 + TestLargePages mode = Default) { 31.70 size_t granularity = os::vm_allocation_granularity(); 31.71 size_t reserve_size_aligned = align_size_up(reserve_size, granularity); 31.72 31.73 - ReservedSpace reserved(reserve_size_aligned); 31.74 + ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode); 31.75 31.76 assert(reserved.is_reserved(), "Must be"); 31.77 31.78 VirtualSpace vs; 31.79 - bool initialized = vs.initialize(reserved, 0); 31.80 + bool initialized = initialize_virtual_space(vs, reserved, mode); 31.81 assert(initialized, "Failed to initialize VirtualSpace"); 31.82 31.83 vs.expand_by(commit_size, false); 31.84 @@ -986,7 +1028,10 @@ 31.85 } else { 31.86 assert_ge(vs.actual_committed_size(), commit_size); 31.87 // Approximate the commit granularity. 31.88 - size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size(); 31.89 + // Make sure that we don't commit using large pages 31.90 + // if large pages has been disabled for this VirtualSpace. 31.91 + size_t commit_granularity = (mode == Disable || !UseLargePages) ? 31.92 + os::vm_page_size() : os::large_page_size(); 31.93 assert_lt(vs.actual_committed_size(), commit_size + commit_granularity); 31.94 } 31.95 31.96 @@ -1042,9 +1087,40 @@ 31.97 test_virtual_space_actual_committed_space(10 * M, 10 * M); 31.98 } 31.99 31.100 + static void test_virtual_space_disable_large_pages() { 31.101 + if (!UseLargePages) { 31.102 + return; 31.103 + } 31.104 + // These test cases verify that if we force VirtualSpace to disable large pages 31.105 + test_virtual_space_actual_committed_space(10 * M, 0, Disable); 31.106 + test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable); 31.107 + test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable); 31.108 + test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable); 31.109 + test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable); 31.110 + test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable); 31.111 + test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable); 31.112 + 31.113 + test_virtual_space_actual_committed_space(10 * M, 0, Reserve); 31.114 + test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve); 31.115 + test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve); 31.116 + test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve); 31.117 + test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve); 31.118 + test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve); 31.119 + test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve); 31.120 + 31.121 + test_virtual_space_actual_committed_space(10 * M, 0, Commit); 31.122 + test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit); 31.123 + test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit); 31.124 + test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit); 31.125 + test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit); 31.126 + test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit); 31.127 + test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit); 31.128 + } 31.129 + 31.130 static void test_virtual_space() { 31.131 test_virtual_space_actual_committed_space(); 31.132 test_virtual_space_actual_committed_space_one_large_page(); 31.133 + test_virtual_space_disable_large_pages(); 31.134 } 31.135 }; 31.136
32.1 --- a/src/share/vm/runtime/virtualspace.hpp Fri Oct 11 17:08:22 2013 -0400 32.2 +++ b/src/share/vm/runtime/virtualspace.hpp Fri Oct 11 22:22:19 2013 -0400 32.3 @@ -178,6 +178,7 @@ 32.4 public: 32.5 // Initialization 32.6 VirtualSpace(); 32.7 + bool initialize_with_granularity(ReservedSpace rs, size_t committed_byte_size, size_t max_commit_ganularity); 32.8 bool initialize(ReservedSpace rs, size_t committed_byte_size); 32.9 32.10 // Destruction
33.1 --- a/src/share/vm/runtime/vmStructs.cpp Fri Oct 11 17:08:22 2013 -0400 33.2 +++ b/src/share/vm/runtime/vmStructs.cpp Fri Oct 11 22:22:19 2013 -0400 33.3 @@ -716,11 +716,17 @@ 33.4 nonstatic_field(PlaceholderEntry, _loader_data, ClassLoaderData*) \ 33.5 \ 33.6 /**************************/ \ 33.7 - /* ProctectionDomainEntry */ \ 33.8 + /* ProtectionDomainEntry */ \ 33.9 /**************************/ \ 33.10 \ 33.11 nonstatic_field(ProtectionDomainEntry, _next, ProtectionDomainEntry*) \ 33.12 - nonstatic_field(ProtectionDomainEntry, _protection_domain, oop) \ 33.13 + nonstatic_field(ProtectionDomainEntry, _pd_cache, ProtectionDomainCacheEntry*) \ 33.14 + \ 33.15 + /*******************************/ \ 33.16 + /* ProtectionDomainCacheEntry */ \ 33.17 + /*******************************/ \ 33.18 + \ 33.19 + nonstatic_field(ProtectionDomainCacheEntry, _literal, oop) \ 33.20 \ 33.21 /*************************/ \ 33.22 /* LoaderConstraintEntry */ \ 33.23 @@ -1563,6 +1569,7 @@ 33.24 declare_toplevel_type(SystemDictionary) \ 33.25 declare_toplevel_type(vmSymbols) \ 33.26 declare_toplevel_type(ProtectionDomainEntry) \ 33.27 + declare_toplevel_type(ProtectionDomainCacheEntry) \ 33.28 \ 33.29 declare_toplevel_type(GenericGrowableArray) \ 33.30 declare_toplevel_type(GrowableArray<int>) \
34.1 --- a/src/share/vm/services/memoryService.hpp Fri Oct 11 17:08:22 2013 -0400 34.2 +++ b/src/share/vm/services/memoryService.hpp Fri Oct 11 22:22:19 2013 -0400 34.3 @@ -148,6 +148,12 @@ 34.4 static void track_code_cache_memory_usage() { 34.5 track_memory_pool_usage(_code_heap_pool); 34.6 } 34.7 + static void track_metaspace_memory_usage() { 34.8 + track_memory_pool_usage(_metaspace_pool); 34.9 + } 34.10 + static void track_compressed_class_memory_usage() { 34.11 + track_memory_pool_usage(_compressed_class_pool); 34.12 + } 34.13 static void track_memory_pool_usage(MemoryPool* pool); 34.14 34.15 static void gc_begin(bool fullGC, bool recordGCBeginTime,
35.1 --- a/src/share/vm/utilities/globalDefinitions.hpp Fri Oct 11 17:08:22 2013 -0400 35.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp Fri Oct 11 22:22:19 2013 -0400 35.3 @@ -326,12 +326,15 @@ 35.4 35.5 const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (p.134) 35.6 35.7 +// Default ProtectionDomainCacheSize values 35.8 + 35.9 +const int defaultProtectionDomainCacheSize = NOT_LP64(137) LP64_ONLY(2017); 35.10 35.11 //---------------------------------------------------------------------------------------------------- 35.12 // Default and minimum StringTableSize values 35.13 35.14 const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013); 35.15 -const int minimumStringTableSize=1009; 35.16 +const int minimumStringTableSize = 1009; 35.17 35.18 const int defaultSymbolTableSize = 20011; 35.19 const int minimumSymbolTableSize = 1009;
36.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 36.2 +++ b/test/runtime/memory/LargePages/TestLargePagesFlags.java Fri Oct 11 22:22:19 2013 -0400 36.3 @@ -0,0 +1,389 @@ 36.4 +/* 36.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 36.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 36.7 + * 36.8 + * This code is free software; you can redistribute it and/or modify it 36.9 + * under the terms of the GNU General Public License version 2 only, as 36.10 + * published by the Free Software Foundation. 36.11 + * 36.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 36.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 36.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 36.15 + * version 2 for more details (a copy is included in the LICENSE file that 36.16 + * accompanied this code). 36.17 + * 36.18 + * You should have received a copy of the GNU General Public License version 36.19 + * 2 along with this work; if not, write to the Free Software Foundation, 36.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 36.21 + * 36.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 36.23 + * or visit www.oracle.com if you need additional information or have any 36.24 + * questions. 36.25 + */ 36.26 + 36.27 +/* @test TestLargePagesFlags 36.28 + * @summary Tests how large pages are choosen depending on the given large pages flag combinations. 36.29 + * @library /testlibrary 36.30 + * @run main TestLargePagesFlags 36.31 + */ 36.32 + 36.33 +import com.oracle.java.testlibrary.OutputAnalyzer; 36.34 +import com.oracle.java.testlibrary.Platform; 36.35 +import com.oracle.java.testlibrary.ProcessTools; 36.36 +import java.util.ArrayList; 36.37 + 36.38 +public class TestLargePagesFlags { 36.39 + 36.40 + public static void main(String [] args) throws Exception { 36.41 + if (!Platform.isLinux()) { 36.42 + System.out.println("Skipping. TestLargePagesFlags has only been implemented for Linux."); 36.43 + return; 36.44 + } 36.45 + 36.46 + testUseTransparentHugePages(); 36.47 + testUseHugeTLBFS(); 36.48 + testUseSHM(); 36.49 + testCombinations(); 36.50 + } 36.51 + 36.52 + public static void testUseTransparentHugePages() throws Exception { 36.53 + if (!canUse(UseTransparentHugePages(true))) { 36.54 + System.out.println("Skipping testUseTransparentHugePages"); 36.55 + return; 36.56 + } 36.57 + 36.58 + // -XX:-UseLargePages overrides all other flags. 36.59 + new FlagTester() 36.60 + .use(UseLargePages(false), 36.61 + UseTransparentHugePages(true)) 36.62 + .expect( 36.63 + UseLargePages(false), 36.64 + UseTransparentHugePages(false), 36.65 + UseHugeTLBFS(false), 36.66 + UseSHM(false)); 36.67 + 36.68 + // Explicitly turn on UseTransparentHugePages. 36.69 + new FlagTester() 36.70 + .use(UseTransparentHugePages(true)) 36.71 + .expect( 36.72 + UseLargePages(true), 36.73 + UseTransparentHugePages(true), 36.74 + UseHugeTLBFS(false), 36.75 + UseSHM(false)); 36.76 + 36.77 + new FlagTester() 36.78 + .use(UseLargePages(true), 36.79 + UseTransparentHugePages(true)) 36.80 + .expect( 36.81 + UseLargePages(true), 36.82 + UseTransparentHugePages(true), 36.83 + UseHugeTLBFS(false), 36.84 + UseSHM(false)); 36.85 + 36.86 + // Setting a specific large pages flag will turn 36.87 + // off heuristics to choose large pages type. 36.88 + new FlagTester() 36.89 + .use(UseLargePages(true), 36.90 + UseTransparentHugePages(false)) 36.91 + .expect( 36.92 + UseLargePages(false), 36.93 + UseTransparentHugePages(false), 36.94 + UseHugeTLBFS(false), 36.95 + UseSHM(false)); 36.96 + 36.97 + // Don't turn on UseTransparentHugePages 36.98 + // unless the user explicitly asks for them. 36.99 + new FlagTester() 36.100 + .use(UseLargePages(true)) 36.101 + .expect( 36.102 + UseTransparentHugePages(false)); 36.103 + } 36.104 + 36.105 + public static void testUseHugeTLBFS() throws Exception { 36.106 + if (!canUse(UseHugeTLBFS(true))) { 36.107 + System.out.println("Skipping testUseHugeTLBFS"); 36.108 + return; 36.109 + } 36.110 + 36.111 + // -XX:-UseLargePages overrides all other flags. 36.112 + new FlagTester() 36.113 + .use(UseLargePages(false), 36.114 + UseHugeTLBFS(true)) 36.115 + .expect( 36.116 + UseLargePages(false), 36.117 + UseTransparentHugePages(false), 36.118 + UseHugeTLBFS(false), 36.119 + UseSHM(false)); 36.120 + 36.121 + // Explicitly turn on UseHugeTLBFS. 36.122 + new FlagTester() 36.123 + .use(UseHugeTLBFS(true)) 36.124 + .expect( 36.125 + UseLargePages(true), 36.126 + UseTransparentHugePages(false), 36.127 + UseHugeTLBFS(true), 36.128 + UseSHM(false)); 36.129 + 36.130 + new FlagTester() 36.131 + .use(UseLargePages(true), 36.132 + UseHugeTLBFS(true)) 36.133 + .expect( 36.134 + UseLargePages(true), 36.135 + UseTransparentHugePages(false), 36.136 + UseHugeTLBFS(true), 36.137 + UseSHM(false)); 36.138 + 36.139 + // Setting a specific large pages flag will turn 36.140 + // off heuristics to choose large pages type. 36.141 + new FlagTester() 36.142 + .use(UseLargePages(true), 36.143 + UseHugeTLBFS(false)) 36.144 + .expect( 36.145 + UseLargePages(false), 36.146 + UseTransparentHugePages(false), 36.147 + UseHugeTLBFS(false), 36.148 + UseSHM(false)); 36.149 + 36.150 + // Using UseLargePages will default to UseHugeTLBFS large pages. 36.151 + new FlagTester() 36.152 + .use(UseLargePages(true)) 36.153 + .expect( 36.154 + UseLargePages(true), 36.155 + UseTransparentHugePages(false), 36.156 + UseHugeTLBFS(true), 36.157 + UseSHM(false)); 36.158 + } 36.159 + 36.160 + public static void testUseSHM() throws Exception { 36.161 + if (!canUse(UseSHM(true))) { 36.162 + System.out.println("Skipping testUseSHM"); 36.163 + return; 36.164 + } 36.165 + 36.166 + // -XX:-UseLargePages overrides all other flags. 36.167 + new FlagTester() 36.168 + .use(UseLargePages(false), 36.169 + UseSHM(true)) 36.170 + .expect( 36.171 + UseLargePages(false), 36.172 + UseTransparentHugePages(false), 36.173 + UseHugeTLBFS(false), 36.174 + UseSHM(false)); 36.175 + 36.176 + // Explicitly turn on UseSHM. 36.177 + new FlagTester() 36.178 + .use(UseSHM(true)) 36.179 + .expect( 36.180 + UseLargePages(true), 36.181 + UseTransparentHugePages(false), 36.182 + UseHugeTLBFS(false), 36.183 + UseSHM(true)) ; 36.184 + 36.185 + new FlagTester() 36.186 + .use(UseLargePages(true), 36.187 + UseSHM(true)) 36.188 + .expect( 36.189 + UseLargePages(true), 36.190 + UseTransparentHugePages(false), 36.191 + UseHugeTLBFS(false), 36.192 + UseSHM(true)) ; 36.193 + 36.194 + // Setting a specific large pages flag will turn 36.195 + // off heuristics to choose large pages type. 36.196 + new FlagTester() 36.197 + .use(UseLargePages(true), 36.198 + UseSHM(false)) 36.199 + .expect( 36.200 + UseLargePages(false), 36.201 + UseTransparentHugePages(false), 36.202 + UseHugeTLBFS(false), 36.203 + UseSHM(false)); 36.204 + 36.205 + // Setting UseLargePages can allow the system to choose 36.206 + // UseHugeTLBFS instead of UseSHM, but never UseTransparentHugePages. 36.207 + new FlagTester() 36.208 + .use(UseLargePages(true)) 36.209 + .expect( 36.210 + UseLargePages(true), 36.211 + UseTransparentHugePages(false)); 36.212 + } 36.213 + 36.214 + public static void testCombinations() throws Exception { 36.215 + if (!canUse(UseSHM(true)) || !canUse(UseHugeTLBFS(true))) { 36.216 + System.out.println("Skipping testUseHugeTLBFSAndUseSHMCombination"); 36.217 + return; 36.218 + } 36.219 + 36.220 + // UseHugeTLBFS takes precedence over SHM. 36.221 + 36.222 + new FlagTester() 36.223 + .use(UseLargePages(true), 36.224 + UseHugeTLBFS(true), 36.225 + UseSHM(true)) 36.226 + .expect( 36.227 + UseLargePages(true), 36.228 + UseTransparentHugePages(false), 36.229 + UseHugeTLBFS(true), 36.230 + UseSHM(false)); 36.231 + 36.232 + new FlagTester() 36.233 + .use(UseLargePages(true), 36.234 + UseHugeTLBFS(false), 36.235 + UseSHM(true)) 36.236 + .expect( 36.237 + UseLargePages(true), 36.238 + UseTransparentHugePages(false), 36.239 + UseHugeTLBFS(false), 36.240 + UseSHM(true)); 36.241 + 36.242 + new FlagTester() 36.243 + .use(UseLargePages(true), 36.244 + UseHugeTLBFS(true), 36.245 + UseSHM(false)) 36.246 + .expect( 36.247 + UseLargePages(true), 36.248 + UseTransparentHugePages(false), 36.249 + UseHugeTLBFS(true), 36.250 + UseSHM(false)); 36.251 + 36.252 + new FlagTester() 36.253 + .use(UseLargePages(true), 36.254 + UseHugeTLBFS(false), 36.255 + UseSHM(false)) 36.256 + .expect( 36.257 + UseLargePages(false), 36.258 + UseTransparentHugePages(false), 36.259 + UseHugeTLBFS(false), 36.260 + UseSHM(false)); 36.261 + 36.262 + 36.263 + if (!canUse(UseTransparentHugePages(true))) { 36.264 + return; 36.265 + } 36.266 + 36.267 + // UseTransparentHugePages takes precedence. 36.268 + 36.269 + new FlagTester() 36.270 + .use(UseLargePages(true), 36.271 + UseTransparentHugePages(true), 36.272 + UseHugeTLBFS(true), 36.273 + UseSHM(true)) 36.274 + .expect( 36.275 + UseLargePages(true), 36.276 + UseTransparentHugePages(true), 36.277 + UseHugeTLBFS(false), 36.278 + UseSHM(false)); 36.279 + 36.280 + new FlagTester() 36.281 + .use(UseTransparentHugePages(true), 36.282 + UseHugeTLBFS(true), 36.283 + UseSHM(true)) 36.284 + .expect( 36.285 + UseLargePages(true), 36.286 + UseTransparentHugePages(true), 36.287 + UseHugeTLBFS(false), 36.288 + UseSHM(false)); 36.289 + } 36.290 + 36.291 + private static class FlagTester { 36.292 + private Flag [] useFlags; 36.293 + 36.294 + public FlagTester use(Flag... useFlags) { 36.295 + this.useFlags = useFlags; 36.296 + return this; 36.297 + } 36.298 + 36.299 + public void expect(Flag... expectedFlags) throws Exception { 36.300 + if (useFlags == null) { 36.301 + throw new IllegalStateException("Must run use() before expect()"); 36.302 + } 36.303 + 36.304 + OutputAnalyzer output = executeNewJVM(useFlags); 36.305 + 36.306 + for (Flag flag : expectedFlags) { 36.307 + System.out.println("Looking for: " + flag.flagString()); 36.308 + String strValue = output.firstMatch(".* " + flag.name() + " .* :?= (\\S+).*", 1); 36.309 + 36.310 + if (strValue == null) { 36.311 + throw new RuntimeException("Flag " + flag.name() + " couldn't be found"); 36.312 + } 36.313 + 36.314 + if (!flag.value().equals(strValue)) { 36.315 + throw new RuntimeException("Wrong value for: " + flag.name() 36.316 + + " expected: " + flag.value() 36.317 + + " got: " + strValue); 36.318 + } 36.319 + } 36.320 + 36.321 + output.shouldHaveExitValue(0); 36.322 + } 36.323 + } 36.324 + 36.325 + private static OutputAnalyzer executeNewJVM(Flag... flags) throws Exception { 36.326 + ArrayList<String> args = new ArrayList<>(); 36.327 + for (Flag flag : flags) { 36.328 + args.add(flag.flagString()); 36.329 + } 36.330 + args.add("-XX:+PrintFlagsFinal"); 36.331 + args.add("-version"); 36.332 + 36.333 + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[args.size()])); 36.334 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 36.335 + 36.336 + return output; 36.337 + } 36.338 + 36.339 + private static boolean canUse(Flag flag) { 36.340 + try { 36.341 + new FlagTester().use(flag).expect(flag); 36.342 + } catch (Exception e) { 36.343 + return false; 36.344 + } 36.345 + 36.346 + return true; 36.347 + } 36.348 + 36.349 + private static Flag UseLargePages(boolean value) { 36.350 + return new BooleanFlag("UseLargePages", value); 36.351 + } 36.352 + 36.353 + private static Flag UseTransparentHugePages(boolean value) { 36.354 + return new BooleanFlag("UseTransparentHugePages", value); 36.355 + } 36.356 + 36.357 + private static Flag UseHugeTLBFS(boolean value) { 36.358 + return new BooleanFlag("UseHugeTLBFS", value); 36.359 + } 36.360 + 36.361 + private static Flag UseSHM(boolean value) { 36.362 + return new BooleanFlag("UseSHM", value); 36.363 + } 36.364 + 36.365 + private static class BooleanFlag implements Flag { 36.366 + private String name; 36.367 + private boolean value; 36.368 + 36.369 + BooleanFlag(String name, boolean value) { 36.370 + this.name = name; 36.371 + this.value = value; 36.372 + } 36.373 + 36.374 + public String flagString() { 36.375 + return "-XX:" + (value ? "+" : "-") + name; 36.376 + } 36.377 + 36.378 + public String name() { 36.379 + return name; 36.380 + } 36.381 + 36.382 + public String value() { 36.383 + return Boolean.toString(value); 36.384 + } 36.385 + } 36.386 + 36.387 + private static interface Flag { 36.388 + public String flagString(); 36.389 + public String name(); 36.390 + public String value(); 36.391 + } 36.392 +}