Fri, 11 Oct 2013 08:27:21 -0700
Merge
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainCacheEntry.java Fri Oct 11 08:27:21 2013 -0700 1.3 @@ -0,0 +1,56 @@ 1.4 +/* 1.5 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +package sun.jvm.hotspot.memory; 1.29 + 1.30 +import java.util.*; 1.31 +import sun.jvm.hotspot.debugger.*; 1.32 +import sun.jvm.hotspot.oops.*; 1.33 +import sun.jvm.hotspot.runtime.*; 1.34 +import sun.jvm.hotspot.types.*; 1.35 + 1.36 +public class ProtectionDomainCacheEntry extends VMObject { 1.37 + private static sun.jvm.hotspot.types.OopField protectionDomainField; 1.38 + 1.39 + static { 1.40 + VM.registerVMInitializedObserver(new Observer() { 1.41 + public void update(Observable o, Object data) { 1.42 + initialize(VM.getVM().getTypeDataBase()); 1.43 + } 1.44 + }); 1.45 + } 1.46 + 1.47 + private static synchronized void initialize(TypeDataBase db) { 1.48 + Type type = db.lookupType("ProtectionDomainCacheEntry"); 1.49 + protectionDomainField = type.getOopField("_literal"); 1.50 + } 1.51 + 1.52 + public ProtectionDomainCacheEntry(Address addr) { 1.53 + super(addr); 1.54 + } 1.55 + 1.56 + public Oop protectionDomain() { 1.57 + return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr)); 1.58 + } 1.59 +}
2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java Thu Oct 10 13:25:51 2013 -0700 2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java Fri Oct 11 08:27:21 2013 -0700 2.3 @@ -1,5 +1,5 @@ 2.4 /* 2.5 - * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved. 2.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 2.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 2.8 * 2.9 * This code is free software; you can redistribute it and/or modify it 2.10 @@ -32,7 +32,7 @@ 2.11 2.12 public class ProtectionDomainEntry extends VMObject { 2.13 private static AddressField nextField; 2.14 - private static sun.jvm.hotspot.types.OopField protectionDomainField; 2.15 + private static AddressField pdCacheField; 2.16 2.17 static { 2.18 VM.registerVMInitializedObserver(new Observer() { 2.19 @@ -46,7 +46,7 @@ 2.20 Type type = db.lookupType("ProtectionDomainEntry"); 2.21 2.22 nextField = type.getAddressField("_next"); 2.23 - protectionDomainField = type.getOopField("_protection_domain"); 2.24 + pdCacheField = type.getAddressField("_pd_cache"); 2.25 } 2.26 2.27 public ProtectionDomainEntry(Address addr) { 2.28 @@ -54,10 +54,12 @@ 2.29 } 2.30 2.31 public ProtectionDomainEntry next() { 2.32 - return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, addr); 2.33 + return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, nextField.getValue(addr)); 2.34 } 2.35 2.36 public Oop protectionDomain() { 2.37 - return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr)); 2.38 + ProtectionDomainCacheEntry pd_cache = (ProtectionDomainCacheEntry) 2.39 + VMObjectFactory.newObject(ProtectionDomainCacheEntry.class, pdCacheField.getValue(addr)); 2.40 + return pd_cache.protectionDomain(); 2.41 } 2.42 }
3.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Thu Oct 10 13:25:51 2013 -0700 3.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Fri Oct 11 08:27:21 2013 -0700 3.3 @@ -37,6 +37,9 @@ 3.4 #include "runtime/vframeArray.hpp" 3.5 #include "utilities/macros.hpp" 3.6 #include "vmreg_sparc.inline.hpp" 3.7 +#if INCLUDE_ALL_GCS 3.8 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 3.9 +#endif 3.10 3.11 // Implementation of StubAssembler 3.12 3.13 @@ -912,7 +915,7 @@ 3.14 Register tmp2 = G3_scratch; 3.15 jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; 3.16 3.17 - Label not_already_dirty, restart, refill; 3.18 + Label not_already_dirty, restart, refill, young_card; 3.19 3.20 #ifdef _LP64 3.21 __ srlx(addr, CardTableModRefBS::card_shift, addr); 3.22 @@ -924,9 +927,15 @@ 3.23 __ set(rs, cardtable); // cardtable := <card table base> 3.24 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] 3.25 3.26 + __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 3.27 + 3.28 + __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3.29 + __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] 3.30 + 3.31 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 3.32 __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 3.33 3.34 + __ bind(young_card); 3.35 // We didn't take the branch, so we're already dirty: return. 3.36 // Use return-from-leaf 3.37 __ retl();
4.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Oct 10 13:25:51 2013 -0700 4.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Fri Oct 11 08:27:21 2013 -0700 4.3 @@ -3752,7 +3752,7 @@ 4.4 #define __ masm. 4.5 address start = __ pc(); 4.6 4.7 - Label not_already_dirty, restart, refill; 4.8 + Label not_already_dirty, restart, refill, young_card; 4.9 4.10 #ifdef _LP64 4.11 __ srlx(O0, CardTableModRefBS::card_shift, O0); 4.12 @@ -3763,9 +3763,15 @@ 4.13 __ set(addrlit, O1); // O1 := <card table base> 4.14 __ ldub(O0, O1, O2); // O2 := [O0 + O1] 4.15 4.16 + __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); 4.17 + 4.18 + __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 4.19 + __ ldub(O0, O1, O2); // O2 := [O0 + O1] 4.20 + 4.21 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); 4.22 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); 4.23 4.24 + __ bind(young_card); 4.25 // We didn't take the branch, so we're already dirty: return. 4.26 // Use return-from-leaf 4.27 __ retl();
5.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Oct 10 13:25:51 2013 -0700 5.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Fri Oct 11 08:27:21 2013 -0700 5.3 @@ -38,6 +38,9 @@ 5.4 #include "runtime/vframeArray.hpp" 5.5 #include "utilities/macros.hpp" 5.6 #include "vmreg_x86.inline.hpp" 5.7 +#if INCLUDE_ALL_GCS 5.8 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 5.9 +#endif 5.10 5.11 5.12 // Implementation of StubAssembler 5.13 @@ -1753,13 +1756,17 @@ 5.14 __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index))); 5.15 #endif 5.16 5.17 - __ cmpb(Address(card_addr, 0), 0); 5.18 + __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); 5.19 + __ jcc(Assembler::equal, done); 5.20 + 5.21 + __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 5.22 + __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 5.23 __ jcc(Assembler::equal, done); 5.24 5.25 // storing region crossing non-NULL, card is clean. 5.26 // dirty card and log. 5.27 5.28 - __ movb(Address(card_addr, 0), 0); 5.29 + __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 5.30 5.31 __ cmpl(queue_index, 0); 5.32 __ jcc(Assembler::equal, runtime);
6.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp Thu Oct 10 13:25:51 2013 -0700 6.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Oct 11 08:27:21 2013 -0700 6.3 @@ -3389,13 +3389,18 @@ 6.4 const Register card_addr = tmp; 6.5 lea(card_addr, as_Address(ArrayAddress(cardtable, index))); 6.6 #endif 6.7 - cmpb(Address(card_addr, 0), 0); 6.8 + cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); 6.9 jcc(Assembler::equal, done); 6.10 6.11 + membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 6.12 + cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 6.13 + jcc(Assembler::equal, done); 6.14 + 6.15 + 6.16 // storing a region crossing, non-NULL oop, card is clean. 6.17 // dirty card and log. 6.18 6.19 - movb(Address(card_addr, 0), 0); 6.20 + movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); 6.21 6.22 cmpl(queue_index, 0); 6.23 jcc(Assembler::equal, runtime);
7.1 --- a/src/os/linux/vm/globals_linux.hpp Thu Oct 10 13:25:51 2013 -0700 7.2 +++ b/src/os/linux/vm/globals_linux.hpp Fri Oct 11 08:27:21 2013 -0700 7.3 @@ -53,7 +53,7 @@ 7.4 // Defines Linux-specific default values. The flags are available on all 7.5 // platforms, but they may have different default values on other platforms. 7.6 // 7.7 -define_pd_global(bool, UseLargePages, true); 7.8 +define_pd_global(bool, UseLargePages, false); 7.9 define_pd_global(bool, UseLargePagesIndividualAllocation, false); 7.10 define_pd_global(bool, UseOSErrorReporting, false); 7.11 define_pd_global(bool, UseThreadPriorities, true) ;
8.1 --- a/src/os/linux/vm/os_linux.cpp Thu Oct 10 13:25:51 2013 -0700 8.2 +++ b/src/os/linux/vm/os_linux.cpp Fri Oct 11 08:27:21 2013 -0700 8.3 @@ -3361,13 +3361,15 @@ 8.4 if (FLAG_IS_DEFAULT(UseHugeTLBFS) && 8.5 FLAG_IS_DEFAULT(UseSHM) && 8.6 FLAG_IS_DEFAULT(UseTransparentHugePages)) { 8.7 - // If UseLargePages is specified on the command line try all methods, 8.8 - // if it's default, then try only UseTransparentHugePages. 8.9 - if (FLAG_IS_DEFAULT(UseLargePages)) { 8.10 - UseTransparentHugePages = true; 8.11 - } else { 8.12 - UseHugeTLBFS = UseTransparentHugePages = UseSHM = true; 8.13 - } 8.14 + 8.15 + // The type of large pages has not been specified by the user. 8.16 + 8.17 + // Try UseHugeTLBFS and then UseSHM. 8.18 + UseHugeTLBFS = UseSHM = true; 8.19 + 8.20 + // Don't try UseTransparentHugePages since there are known 8.21 + // performance issues with it turned on. This might change in the future. 8.22 + UseTransparentHugePages = false; 8.23 } 8.24 8.25 if (UseTransparentHugePages) { 8.26 @@ -3393,9 +3395,19 @@ 8.27 } 8.28 8.29 void os::large_page_init() { 8.30 - if (!UseLargePages) { 8.31 + if (!UseLargePages && 8.32 + !UseTransparentHugePages && 8.33 + !UseHugeTLBFS && 8.34 + !UseSHM) { 8.35 + // Not using large pages. 8.36 + return; 8.37 + } 8.38 + 8.39 + if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) { 8.40 + // The user explicitly turned off large pages. 8.41 + // Ignore the rest of the large pages flags. 8.42 + UseTransparentHugePages = false; 8.43 UseHugeTLBFS = false; 8.44 - UseTransparentHugePages = false; 8.45 UseSHM = false; 8.46 return; 8.47 }
9.1 --- a/src/share/vm/classfile/dictionary.cpp Thu Oct 10 13:25:51 2013 -0700 9.2 +++ b/src/share/vm/classfile/dictionary.cpp Fri Oct 11 08:27:21 2013 -0700 9.3 @@ -1,5 +1,5 @@ 9.4 /* 9.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 9.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 9.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 9.8 * 9.9 * This code is free software; you can redistribute it and/or modify it 9.10 @@ -25,6 +25,7 @@ 9.11 #include "precompiled.hpp" 9.12 #include "classfile/dictionary.hpp" 9.13 #include "classfile/systemDictionary.hpp" 9.14 +#include "memory/iterator.hpp" 9.15 #include "oops/oop.inline.hpp" 9.16 #include "prims/jvmtiRedefineClassesTrace.hpp" 9.17 #include "utilities/hashtable.inline.hpp" 9.18 @@ -38,17 +39,21 @@ 9.19 : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry)) { 9.20 _current_class_index = 0; 9.21 _current_class_entry = NULL; 9.22 + _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize); 9.23 }; 9.24 9.25 9.26 - 9.27 Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t, 9.28 int number_of_entries) 9.29 : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) { 9.30 _current_class_index = 0; 9.31 _current_class_entry = NULL; 9.32 + _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize); 9.33 }; 9.34 9.35 +ProtectionDomainCacheEntry* Dictionary::cache_get(oop protection_domain) { 9.36 + return _pd_cache_table->get(protection_domain); 9.37 +} 9.38 9.39 DictionaryEntry* Dictionary::new_entry(unsigned int hash, Klass* klass, 9.40 ClassLoaderData* loader_data) { 9.41 @@ -105,11 +110,12 @@ 9.42 } 9.43 9.44 9.45 -void DictionaryEntry::add_protection_domain(oop protection_domain) { 9.46 +void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_domain) { 9.47 assert_locked_or_safepoint(SystemDictionary_lock); 9.48 if (!contains_protection_domain(protection_domain)) { 9.49 + ProtectionDomainCacheEntry* entry = dict->cache_get(protection_domain); 9.50 ProtectionDomainEntry* new_head = 9.51 - new ProtectionDomainEntry(protection_domain, _pd_set); 9.52 + new ProtectionDomainEntry(entry, _pd_set); 9.53 // Warning: Preserve store ordering. The SystemDictionary is read 9.54 // without locks. The new ProtectionDomainEntry must be 9.55 // complete before other threads can be allowed to see it 9.56 @@ -193,7 +199,10 @@ 9.57 9.58 9.59 void Dictionary::always_strong_oops_do(OopClosure* blk) { 9.60 - // Follow all system classes and temporary placeholders in dictionary 9.61 + // Follow all system classes and temporary placeholders in dictionary; only 9.62 + // protection domain oops contain references into the heap. In a first 9.63 + // pass over the system dictionary determine which need to be treated as 9.64 + // strongly reachable and mark them as such. 9.65 for (int index = 0; index < table_size(); index++) { 9.66 for (DictionaryEntry *probe = bucket(index); 9.67 probe != NULL; 9.68 @@ -201,10 +210,13 @@ 9.69 Klass* e = probe->klass(); 9.70 ClassLoaderData* loader_data = probe->loader_data(); 9.71 if (is_strongly_reachable(loader_data, e)) { 9.72 - probe->protection_domain_set_oops_do(blk); 9.73 + probe->set_strongly_reachable(); 9.74 } 9.75 } 9.76 } 9.77 + // Then iterate over the protection domain cache to apply the closure on the 9.78 + // previously marked ones. 9.79 + _pd_cache_table->always_strong_oops_do(blk); 9.80 } 9.81 9.82 9.83 @@ -266,18 +278,12 @@ 9.84 } 9.85 } 9.86 9.87 - 9.88 void Dictionary::oops_do(OopClosure* f) { 9.89 - for (int index = 0; index < table_size(); index++) { 9.90 - for (DictionaryEntry* probe = bucket(index); 9.91 - probe != NULL; 9.92 - probe = probe->next()) { 9.93 - probe->protection_domain_set_oops_do(f); 9.94 - } 9.95 - } 9.96 + // Only the protection domain oops contain references into the heap. Iterate 9.97 + // over all of them. 9.98 + _pd_cache_table->oops_do(f); 9.99 } 9.100 9.101 - 9.102 void Dictionary::methods_do(void f(Method*)) { 9.103 for (int index = 0; index < table_size(); index++) { 9.104 for (DictionaryEntry* probe = bucket(index); 9.105 @@ -292,6 +298,11 @@ 9.106 } 9.107 } 9.108 9.109 +void Dictionary::unlink(BoolObjectClosure* is_alive) { 9.110 + // Only the protection domain cache table may contain references to the heap 9.111 + // that need to be unlinked. 9.112 + _pd_cache_table->unlink(is_alive); 9.113 +} 9.114 9.115 Klass* Dictionary::try_get_next_class() { 9.116 while (true) { 9.117 @@ -306,7 +317,6 @@ 9.118 // never reached 9.119 } 9.120 9.121 - 9.122 // Add a loaded class to the system dictionary. 9.123 // Readers of the SystemDictionary aren't always locked, so _buckets 9.124 // is volatile. The store of the next field in the constructor is 9.125 @@ -396,7 +406,7 @@ 9.126 assert(protection_domain() != NULL, 9.127 "real protection domain should be present"); 9.128 9.129 - entry->add_protection_domain(protection_domain()); 9.130 + entry->add_protection_domain(this, protection_domain()); 9.131 9.132 assert(entry->contains_protection_domain(protection_domain()), 9.133 "now protection domain should be present"); 9.134 @@ -446,6 +456,146 @@ 9.135 } 9.136 } 9.137 9.138 +ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size) 9.139 + : Hashtable<oop, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry)) 9.140 +{ 9.141 +} 9.142 + 9.143 +void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) { 9.144 + assert(SafepointSynchronize::is_at_safepoint(), "must be"); 9.145 + for (int i = 0; i < table_size(); ++i) { 9.146 + ProtectionDomainCacheEntry** p = bucket_addr(i); 9.147 + ProtectionDomainCacheEntry* entry = bucket(i); 9.148 + while (entry != NULL) { 9.149 + if (is_alive->do_object_b(entry->literal())) { 9.150 + p = entry->next_addr(); 9.151 + } else { 9.152 + *p = entry->next(); 9.153 + free_entry(entry); 9.154 + } 9.155 + entry = *p; 9.156 + } 9.157 + } 9.158 +} 9.159 + 9.160 +void ProtectionDomainCacheTable::oops_do(OopClosure* f) { 9.161 + for (int index = 0; index < table_size(); index++) { 9.162 + for (ProtectionDomainCacheEntry* probe = bucket(index); 9.163 + probe != NULL; 9.164 + probe = probe->next()) { 9.165 + probe->oops_do(f); 9.166 + } 9.167 + } 9.168 +} 9.169 + 9.170 +uint ProtectionDomainCacheTable::bucket_size() { 9.171 + return sizeof(ProtectionDomainCacheEntry); 9.172 +} 9.173 + 9.174 +#ifndef PRODUCT 9.175 +void ProtectionDomainCacheTable::print() { 9.176 + tty->print_cr("Protection domain cache table (table_size=%d, classes=%d)", 9.177 + table_size(), number_of_entries()); 9.178 + for (int index = 0; index < table_size(); index++) { 9.179 + for (ProtectionDomainCacheEntry* probe = bucket(index); 9.180 + probe != NULL; 9.181 + probe = probe->next()) { 9.182 + probe->print(); 9.183 + } 9.184 + } 9.185 +} 9.186 + 9.187 +void ProtectionDomainCacheEntry::print() { 9.188 + tty->print_cr("entry "PTR_FORMAT" value "PTR_FORMAT" strongly_reachable %d next "PTR_FORMAT, 9.189 + this, (void*)literal(), _strongly_reachable, next()); 9.190 +} 9.191 +#endif 9.192 + 9.193 +void ProtectionDomainCacheTable::verify() { 9.194 + int element_count = 0; 9.195 + for (int index = 0; index < table_size(); index++) { 9.196 + for (ProtectionDomainCacheEntry* probe = bucket(index); 9.197 + probe != NULL; 9.198 + probe = probe->next()) { 9.199 + probe->verify(); 9.200 + element_count++; 9.201 + } 9.202 + } 9.203 + guarantee(number_of_entries() == element_count, 9.204 + "Verify of protection domain cache table failed"); 9.205 + debug_only(verify_lookup_length((double)number_of_entries() / table_size())); 9.206 +} 9.207 + 9.208 +void ProtectionDomainCacheEntry::verify() { 9.209 + guarantee(literal()->is_oop(), "must be an oop"); 9.210 +} 9.211 + 9.212 +void ProtectionDomainCacheTable::always_strong_oops_do(OopClosure* f) { 9.213 + // the caller marked the protection domain cache entries that we need to apply 9.214 + // the closure on. Only process them. 9.215 + for (int index = 0; index < table_size(); index++) { 9.216 + for (ProtectionDomainCacheEntry* probe = bucket(index); 9.217 + probe != NULL; 9.218 + probe = probe->next()) { 9.219 + if (probe->is_strongly_reachable()) { 9.220 + probe->reset_strongly_reachable(); 9.221 + probe->oops_do(f); 9.222 + } 9.223 + } 9.224 + } 9.225 +} 9.226 + 9.227 +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(oop protection_domain) { 9.228 + unsigned int hash = compute_hash(protection_domain); 9.229 + int index = hash_to_index(hash); 9.230 + 9.231 + ProtectionDomainCacheEntry* entry = find_entry(index, protection_domain); 9.232 + if (entry == NULL) { 9.233 + entry = add_entry(index, hash, protection_domain); 9.234 + } 9.235 + return entry; 9.236 +} 9.237 + 9.238 +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, oop protection_domain) { 9.239 + for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) { 9.240 + if (e->protection_domain() == protection_domain) { 9.241 + return e; 9.242 + } 9.243 + } 9.244 + 9.245 + return NULL; 9.246 +} 9.247 + 9.248 +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, unsigned int hash, oop protection_domain) { 9.249 + assert_locked_or_safepoint(SystemDictionary_lock); 9.250 + assert(index == index_for(protection_domain), "incorrect index?"); 9.251 + assert(find_entry(index, protection_domain) == NULL, "no double entry"); 9.252 + 9.253 + ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain); 9.254 + Hashtable<oop, mtClass>::add_entry(index, p); 9.255 + return p; 9.256 +} 9.257 + 9.258 +void ProtectionDomainCacheTable::free(ProtectionDomainCacheEntry* to_delete) { 9.259 + unsigned int hash = compute_hash(to_delete->protection_domain()); 9.260 + int index = hash_to_index(hash); 9.261 + 9.262 + ProtectionDomainCacheEntry** p = bucket_addr(index); 9.263 + ProtectionDomainCacheEntry* entry = bucket(index); 9.264 + while (true) { 9.265 + assert(entry != NULL, "sanity"); 9.266 + 9.267 + if (entry == to_delete) { 9.268 + *p = entry->next(); 9.269 + Hashtable<oop, mtClass>::free_entry(entry); 9.270 + break; 9.271 + } else { 9.272 + p = entry->next_addr(); 9.273 + entry = *p; 9.274 + } 9.275 + } 9.276 +} 9.277 + 9.278 SymbolPropertyTable::SymbolPropertyTable(int table_size) 9.279 : Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry)) 9.280 { 9.281 @@ -532,11 +682,13 @@ 9.282 tty->cr(); 9.283 } 9.284 } 9.285 + tty->cr(); 9.286 + _pd_cache_table->print(); 9.287 + tty->cr(); 9.288 } 9.289 9.290 #endif 9.291 9.292 - 9.293 void Dictionary::verify() { 9.294 guarantee(number_of_entries() >= 0, "Verify of system dictionary failed"); 9.295 9.296 @@ -563,5 +715,7 @@ 9.297 guarantee(number_of_entries() == element_count, 9.298 "Verify of system dictionary failed"); 9.299 debug_only(verify_lookup_length((double)number_of_entries() / table_size())); 9.300 + 9.301 + _pd_cache_table->verify(); 9.302 } 9.303
10.1 --- a/src/share/vm/classfile/dictionary.hpp Thu Oct 10 13:25:51 2013 -0700 10.2 +++ b/src/share/vm/classfile/dictionary.hpp Fri Oct 11 08:27:21 2013 -0700 10.3 @@ -27,11 +27,14 @@ 10.4 10.5 #include "classfile/systemDictionary.hpp" 10.6 #include "oops/instanceKlass.hpp" 10.7 -#include "oops/oop.hpp" 10.8 +#include "oops/oop.inline.hpp" 10.9 #include "utilities/hashtable.hpp" 10.10 10.11 class DictionaryEntry; 10.12 class PSPromotionManager; 10.13 +class ProtectionDomainCacheTable; 10.14 +class ProtectionDomainCacheEntry; 10.15 +class BoolObjectClosure; 10.16 10.17 //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 10.18 // The data structure for the system dictionary (and the shared system 10.19 @@ -45,6 +48,8 @@ 10.20 // pointer to the current hash table entry. 10.21 static DictionaryEntry* _current_class_entry; 10.22 10.23 + ProtectionDomainCacheTable* _pd_cache_table; 10.24 + 10.25 DictionaryEntry* get_entry(int index, unsigned int hash, 10.26 Symbol* name, ClassLoaderData* loader_data); 10.27 10.28 @@ -93,6 +98,7 @@ 10.29 10.30 void methods_do(void f(Method*)); 10.31 10.32 + void unlink(BoolObjectClosure* is_alive); 10.33 10.34 // Classes loaded by the bootstrap loader are always strongly reachable. 10.35 // If we're not doing class unloading, all classes are strongly reachable. 10.36 @@ -118,6 +124,7 @@ 10.37 // Sharing support 10.38 void reorder_dictionary(); 10.39 10.40 + ProtectionDomainCacheEntry* cache_get(oop protection_domain); 10.41 10.42 #ifndef PRODUCT 10.43 void print(); 10.44 @@ -126,21 +133,112 @@ 10.45 }; 10.46 10.47 // The following classes can be in dictionary.cpp, but we need these 10.48 -// to be in header file so that SA's vmStructs can access. 10.49 +// to be in header file so that SA's vmStructs can access them. 10.50 +class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> { 10.51 + friend class VMStructs; 10.52 + private: 10.53 + // Flag indicating whether this protection domain entry is strongly reachable. 10.54 + // Used during iterating over the system dictionary to remember oops that need 10.55 + // to be updated. 10.56 + bool _strongly_reachable; 10.57 + public: 10.58 + oop protection_domain() { return literal(); } 10.59 + 10.60 + void init() { 10.61 + _strongly_reachable = false; 10.62 + } 10.63 + 10.64 + ProtectionDomainCacheEntry* next() { 10.65 + return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next(); 10.66 + } 10.67 + 10.68 + ProtectionDomainCacheEntry** next_addr() { 10.69 + return (ProtectionDomainCacheEntry**)HashtableEntry<oop, mtClass>::next_addr(); 10.70 + } 10.71 + 10.72 + void oops_do(OopClosure* f) { 10.73 + f->do_oop(literal_addr()); 10.74 + } 10.75 + 10.76 + void set_strongly_reachable() { _strongly_reachable = true; } 10.77 + bool is_strongly_reachable() { return _strongly_reachable; } 10.78 + void reset_strongly_reachable() { _strongly_reachable = false; } 10.79 + 10.80 + void print() PRODUCT_RETURN; 10.81 + void verify(); 10.82 +}; 10.83 + 10.84 +// The ProtectionDomainCacheTable contains all protection domain oops. The system 10.85 +// dictionary entries reference its entries instead of having references to oops 10.86 +// directly. 10.87 +// This is used to speed up system dictionary iteration: the oops in the 10.88 +// protection domain are the only ones referring the Java heap. So when there is 10.89 +// need to update these, instead of going over every entry of the system dictionary, 10.90 +// we only need to iterate over this set. 10.91 +// The amount of different protection domains used is typically magnitudes smaller 10.92 +// than the number of system dictionary entries (loaded classes). 10.93 +class ProtectionDomainCacheTable : public Hashtable<oop, mtClass> { 10.94 + friend class VMStructs; 10.95 +private: 10.96 + ProtectionDomainCacheEntry* bucket(int i) { 10.97 + return (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::bucket(i); 10.98 + } 10.99 + 10.100 + // The following method is not MT-safe and must be done under lock. 10.101 + ProtectionDomainCacheEntry** bucket_addr(int i) { 10.102 + return (ProtectionDomainCacheEntry**) Hashtable<oop, mtClass>::bucket_addr(i); 10.103 + } 10.104 + 10.105 + ProtectionDomainCacheEntry* new_entry(unsigned int hash, oop protection_domain) { 10.106 + ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::new_entry(hash, protection_domain); 10.107 + entry->init(); 10.108 + return entry; 10.109 + } 10.110 + 10.111 + static unsigned int compute_hash(oop protection_domain) { 10.112 + return (unsigned int)(protection_domain->identity_hash()); 10.113 + } 10.114 + 10.115 + int index_for(oop protection_domain) { 10.116 + return hash_to_index(compute_hash(protection_domain)); 10.117 + } 10.118 + 10.119 + ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, oop protection_domain); 10.120 + ProtectionDomainCacheEntry* find_entry(int index, oop protection_domain); 10.121 + 10.122 +public: 10.123 + 10.124 + ProtectionDomainCacheTable(int table_size); 10.125 + 10.126 + ProtectionDomainCacheEntry* get(oop protection_domain); 10.127 + void free(ProtectionDomainCacheEntry* entry); 10.128 + 10.129 + void unlink(BoolObjectClosure* cl); 10.130 + 10.131 + // GC support 10.132 + void oops_do(OopClosure* f); 10.133 + void always_strong_oops_do(OopClosure* f); 10.134 + 10.135 + static uint bucket_size(); 10.136 + 10.137 + void print() PRODUCT_RETURN; 10.138 + void verify(); 10.139 +}; 10.140 + 10.141 10.142 class ProtectionDomainEntry :public CHeapObj<mtClass> { 10.143 friend class VMStructs; 10.144 public: 10.145 ProtectionDomainEntry* _next; 10.146 - oop _protection_domain; 10.147 + ProtectionDomainCacheEntry* _pd_cache; 10.148 10.149 - ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) { 10.150 - _protection_domain = protection_domain; 10.151 - _next = next; 10.152 + ProtectionDomainEntry(ProtectionDomainCacheEntry* pd_cache, ProtectionDomainEntry* next) { 10.153 + _pd_cache = pd_cache; 10.154 + _next = next; 10.155 } 10.156 10.157 ProtectionDomainEntry* next() { return _next; } 10.158 - oop protection_domain() { return _protection_domain; } 10.159 + oop protection_domain() { return _pd_cache->protection_domain(); } 10.160 }; 10.161 10.162 // An entry in the system dictionary, this describes a class as 10.163 @@ -151,6 +249,24 @@ 10.164 private: 10.165 // Contains the set of approved protection domains that can access 10.166 // this system dictionary entry. 10.167 + // 10.168 + // This protection domain set is a set of tuples: 10.169 + // 10.170 + // (InstanceKlass C, initiating class loader ICL, Protection Domain PD) 10.171 + // 10.172 + // [Note that C.protection_domain(), which is stored in the java.lang.Class 10.173 + // mirror of C, is NOT the same as PD] 10.174 + // 10.175 + // If such an entry (C, ICL, PD) exists in the table, it means that 10.176 + // it is okay for a class Foo to reference C, where 10.177 + // 10.178 + // Foo.protection_domain() == PD, and 10.179 + // Foo's defining class loader == ICL 10.180 + // 10.181 + // The usage of the PD set can be seen in SystemDictionary::validate_protection_domain() 10.182 + // It is essentially a cache to avoid repeated Java up-calls to 10.183 + // ClassLoader.checkPackageAccess(). 10.184 + // 10.185 ProtectionDomainEntry* _pd_set; 10.186 ClassLoaderData* _loader_data; 10.187 10.188 @@ -158,7 +274,7 @@ 10.189 // Tells whether a protection is in the approved set. 10.190 bool contains_protection_domain(oop protection_domain) const; 10.191 // Adds a protection domain to the approved set. 10.192 - void add_protection_domain(oop protection_domain); 10.193 + void add_protection_domain(Dictionary* dict, oop protection_domain); 10.194 10.195 Klass* klass() const { return (Klass*)literal(); } 10.196 Klass** klass_addr() { return (Klass**)literal_addr(); } 10.197 @@ -189,12 +305,11 @@ 10.198 : contains_protection_domain(protection_domain()); 10.199 } 10.200 10.201 - 10.202 - void protection_domain_set_oops_do(OopClosure* f) { 10.203 + void set_strongly_reachable() { 10.204 for (ProtectionDomainEntry* current = _pd_set; 10.205 current != NULL; 10.206 current = current->_next) { 10.207 - f->do_oop(&(current->_protection_domain)); 10.208 + current->_pd_cache->set_strongly_reachable(); 10.209 } 10.210 } 10.211 10.212 @@ -202,7 +317,7 @@ 10.213 for (ProtectionDomainEntry* current = _pd_set; 10.214 current != NULL; 10.215 current = current->_next) { 10.216 - current->_protection_domain->verify(); 10.217 + current->_pd_cache->protection_domain()->verify(); 10.218 } 10.219 } 10.220
11.1 --- a/src/share/vm/classfile/systemDictionary.cpp Thu Oct 10 13:25:51 2013 -0700 11.2 +++ b/src/share/vm/classfile/systemDictionary.cpp Fri Oct 11 08:27:21 2013 -0700 11.3 @@ -1697,6 +1697,24 @@ 11.4 return newsize; 11.5 } 11.6 11.7 +#ifdef ASSERT 11.8 +class VerifySDReachableAndLiveClosure : public OopClosure { 11.9 +private: 11.10 + BoolObjectClosure* _is_alive; 11.11 + 11.12 + template <class T> void do_oop_work(T* p) { 11.13 + oop obj = oopDesc::load_decode_heap_oop(p); 11.14 + guarantee(_is_alive->do_object_b(obj), "Oop in system dictionary must be live"); 11.15 + } 11.16 + 11.17 +public: 11.18 + VerifySDReachableAndLiveClosure(BoolObjectClosure* is_alive) : OopClosure(), _is_alive(is_alive) { } 11.19 + 11.20 + virtual void do_oop(oop* p) { do_oop_work(p); } 11.21 + virtual void do_oop(narrowOop* p) { do_oop_work(p); } 11.22 +}; 11.23 +#endif 11.24 + 11.25 // Assumes classes in the SystemDictionary are only unloaded at a safepoint 11.26 // Note: anonymous classes are not in the SD. 11.27 bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) { 11.28 @@ -1707,7 +1725,15 @@ 11.29 unloading_occurred = dictionary()->do_unloading(); 11.30 constraints()->purge_loader_constraints(); 11.31 resolution_errors()->purge_resolution_errors(); 11.32 -} 11.33 + } 11.34 + // Oops referenced by the system dictionary may get unreachable independently 11.35 + // of the class loader (eg. cached protection domain oops). So we need to 11.36 + // explicitly unlink them here instead of in Dictionary::do_unloading. 11.37 + dictionary()->unlink(is_alive); 11.38 +#ifdef ASSERT 11.39 + VerifySDReachableAndLiveClosure cl(is_alive); 11.40 + dictionary()->oops_do(&cl); 11.41 +#endif 11.42 return unloading_occurred; 11.43 } 11.44
12.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Oct 10 13:25:51 2013 -0700 12.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Oct 11 08:27:21 2013 -0700 12.3 @@ -6035,7 +6035,11 @@ 12.4 // is dirty. 12.5 G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); 12.6 MemRegion mr(hr->bottom(), hr->pre_dummy_top()); 12.7 - ct_bs->verify_dirty_region(mr); 12.8 + if (hr->is_young()) { 12.9 + ct_bs->verify_g1_young_region(mr); 12.10 + } else { 12.11 + ct_bs->verify_dirty_region(mr); 12.12 + } 12.13 } 12.14 12.15 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
13.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Oct 10 13:25:51 2013 -0700 13.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Oct 11 08:27:21 2013 -0700 13.3 @@ -29,6 +29,7 @@ 13.4 #include "gc_implementation/g1/g1CollectedHeap.hpp" 13.5 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" 13.6 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 13.7 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 13.8 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 13.9 #include "utilities/taskqueue.hpp" 13.10 13.11 @@ -134,7 +135,7 @@ 13.12 assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); 13.13 13.14 MemRegion mr(start, end); 13.15 - g1_barrier_set()->dirty(mr); 13.16 + g1_barrier_set()->g1_mark_as_young(mr); 13.17 } 13.18 13.19 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
14.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Oct 10 13:25:51 2013 -0700 14.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Oct 11 08:27:21 2013 -0700 14.3 @@ -319,10 +319,10 @@ 14.4 } 14.5 14.6 void G1CollectorPolicy::initialize_flags() { 14.7 - set_min_alignment(HeapRegion::GrainBytes); 14.8 + _min_alignment = HeapRegion::GrainBytes; 14.9 size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); 14.10 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 14.11 - set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size)); 14.12 + _max_alignment = MAX3(card_table_alignment, _min_alignment, page_size); 14.13 if (SurvivorRatio < 1) { 14.14 vm_exit_during_initialization("Invalid survivor ratio specified"); 14.15 }
15.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Thu Oct 10 13:25:51 2013 -0700 15.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Fri Oct 11 08:27:21 2013 -0700 15.3 @@ -70,6 +70,12 @@ 15.4 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { 15.5 return false; 15.6 } 15.7 + 15.8 + if (val == g1_young_gen) { 15.9 + // the card is for a young gen region. We don't need to keep track of all pointers into young 15.10 + return false; 15.11 + } 15.12 + 15.13 // Cached bit can be installed either on a clean card or on a claimed card. 15.14 jbyte new_val = val; 15.15 if (val == clean_card_val()) { 15.16 @@ -85,6 +91,19 @@ 15.17 return true; 15.18 } 15.19 15.20 +void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) { 15.21 + jbyte *const first = byte_for(mr.start()); 15.22 + jbyte *const last = byte_after(mr.last()); 15.23 + 15.24 + memset(first, g1_young_gen, last - first); 15.25 +} 15.26 + 15.27 +#ifndef PRODUCT 15.28 +void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) { 15.29 + verify_region(mr, g1_young_gen, true); 15.30 +} 15.31 +#endif 15.32 + 15.33 G1SATBCardTableLoggingModRefBS:: 15.34 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, 15.35 int max_covered_regions) : 15.36 @@ -97,7 +116,11 @@ 15.37 void 15.38 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field, 15.39 oop new_val) { 15.40 - jbyte* byte = byte_for(field); 15.41 + volatile jbyte* byte = byte_for(field); 15.42 + if (*byte == g1_young_gen) { 15.43 + return; 15.44 + } 15.45 + OrderAccess::storeload(); 15.46 if (*byte != dirty_card) { 15.47 *byte = dirty_card; 15.48 Thread* thr = Thread::current(); 15.49 @@ -129,7 +152,7 @@ 15.50 15.51 void 15.52 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) { 15.53 - jbyte* byte = byte_for(mr.start()); 15.54 + volatile jbyte* byte = byte_for(mr.start()); 15.55 jbyte* last_byte = byte_for(mr.last()); 15.56 Thread* thr = Thread::current(); 15.57 if (whole_heap) { 15.58 @@ -138,25 +161,35 @@ 15.59 byte++; 15.60 } 15.61 } else { 15.62 - // Enqueue if necessary. 15.63 - if (thr->is_Java_thread()) { 15.64 - JavaThread* jt = (JavaThread*)thr; 15.65 - while (byte <= last_byte) { 15.66 - if (*byte != dirty_card) { 15.67 - *byte = dirty_card; 15.68 - jt->dirty_card_queue().enqueue(byte); 15.69 + // skip all consecutive young cards 15.70 + for (; byte <= last_byte && *byte == g1_young_gen; byte++); 15.71 + 15.72 + if (byte <= last_byte) { 15.73 + OrderAccess::storeload(); 15.74 + // Enqueue if necessary. 15.75 + if (thr->is_Java_thread()) { 15.76 + JavaThread* jt = (JavaThread*)thr; 15.77 + for (; byte <= last_byte; byte++) { 15.78 + if (*byte == g1_young_gen) { 15.79 + continue; 15.80 + } 15.81 + if (*byte != dirty_card) { 15.82 + *byte = dirty_card; 15.83 + jt->dirty_card_queue().enqueue(byte); 15.84 + } 15.85 } 15.86 - byte++; 15.87 - } 15.88 - } else { 15.89 - MutexLockerEx x(Shared_DirtyCardQ_lock, 15.90 - Mutex::_no_safepoint_check_flag); 15.91 - while (byte <= last_byte) { 15.92 - if (*byte != dirty_card) { 15.93 - *byte = dirty_card; 15.94 - _dcqs.shared_dirty_card_queue()->enqueue(byte); 15.95 + } else { 15.96 + MutexLockerEx x(Shared_DirtyCardQ_lock, 15.97 + Mutex::_no_safepoint_check_flag); 15.98 + for (; byte <= last_byte; byte++) { 15.99 + if (*byte == g1_young_gen) { 15.100 + continue; 15.101 + } 15.102 + if (*byte != dirty_card) { 15.103 + *byte = dirty_card; 15.104 + _dcqs.shared_dirty_card_queue()->enqueue(byte); 15.105 + } 15.106 } 15.107 - byte++; 15.108 } 15.109 } 15.110 }
16.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Thu Oct 10 13:25:51 2013 -0700 16.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Fri Oct 11 08:27:21 2013 -0700 16.3 @@ -38,7 +38,14 @@ 16.4 // snapshot-at-the-beginning marking. 16.5 16.6 class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS { 16.7 +protected: 16.8 + enum G1CardValues { 16.9 + g1_young_gen = CT_MR_BS_last_reserved << 1 16.10 + }; 16.11 + 16.12 public: 16.13 + static int g1_young_card_val() { return g1_young_gen; } 16.14 + 16.15 // Add "pre_val" to a set of objects that may have been disconnected from the 16.16 // pre-marking object graph. 16.17 static void enqueue(oop pre_val); 16.18 @@ -118,6 +125,9 @@ 16.19 _byte_map[card_index] = val; 16.20 } 16.21 16.22 + void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN; 16.23 + void g1_mark_as_young(const MemRegion& mr); 16.24 + 16.25 bool mark_card_deferred(size_t card_index); 16.26 16.27 bool is_card_deferred(size_t card_index) {
17.1 --- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp Thu Oct 10 13:25:51 2013 -0700 17.2 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp Fri Oct 11 08:27:21 2013 -0700 17.3 @@ -80,6 +80,10 @@ 17.4 17.5 void reset() { if (_buf != NULL) _index = _sz; } 17.6 17.7 + void enqueue(volatile void* ptr) { 17.8 + enqueue((void*)(ptr)); 17.9 + } 17.10 + 17.11 // Enqueues the given "obj". 17.12 void enqueue(void* ptr) { 17.13 if (!_active) return;
18.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Thu Oct 10 13:25:51 2013 -0700 18.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Fri Oct 11 08:27:21 2013 -0700 18.3 @@ -214,9 +214,6 @@ 18.4 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), 18.5 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { 18.6 } 18.7 - ~VM_CollectForMetadataAllocation() { 18.8 - MetaspaceGC::set_expand_after_GC(false); 18.9 - } 18.10 virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; } 18.11 virtual void doit(); 18.12 MetaWord* result() const { return _result; }
19.1 --- a/src/share/vm/gc_interface/collectedHeap.cpp Thu Oct 10 13:25:51 2013 -0700 19.2 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Fri Oct 11 08:27:21 2013 -0700 19.3 @@ -202,12 +202,6 @@ 19.4 ShouldNotReachHere(); // Unexpected use of this function 19.5 } 19.6 } 19.7 -MetaWord* CollectedHeap::satisfy_failed_metadata_allocation( 19.8 - ClassLoaderData* loader_data, 19.9 - size_t size, Metaspace::MetadataType mdtype) { 19.10 - return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype); 19.11 -} 19.12 - 19.13 19.14 void CollectedHeap::pre_initialize() { 19.15 // Used for ReduceInitialCardMarks (when COMPILER2 is used);
20.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp Thu Oct 10 13:25:51 2013 -0700 20.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Fri Oct 11 08:27:21 2013 -0700 20.3 @@ -475,11 +475,6 @@ 20.4 // the context of the vm thread. 20.5 virtual void collect_as_vm_thread(GCCause::Cause cause); 20.6 20.7 - // Callback from VM_CollectForMetadataAllocation operation. 20.8 - MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 20.9 - size_t size, 20.10 - Metaspace::MetadataType mdtype); 20.11 - 20.12 // Returns the barrier set for this heap 20.13 BarrierSet* barrier_set() { return _barrier_set; } 20.14
21.1 --- a/src/share/vm/memory/collectorPolicy.cpp Thu Oct 10 13:25:51 2013 -0700 21.2 +++ b/src/share/vm/memory/collectorPolicy.cpp Fri Oct 11 08:27:21 2013 -0700 21.3 @@ -47,85 +47,53 @@ 21.4 21.5 // CollectorPolicy methods. 21.6 21.7 -// Align down. If the aligning result in 0, return 'alignment'. 21.8 -static size_t restricted_align_down(size_t size, size_t alignment) { 21.9 - return MAX2(alignment, align_size_down_(size, alignment)); 21.10 -} 21.11 - 21.12 void CollectorPolicy::initialize_flags() { 21.13 - assert(max_alignment() >= min_alignment(), 21.14 - err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, 21.15 - max_alignment(), min_alignment())); 21.16 - assert(max_alignment() % min_alignment() == 0, 21.17 - err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, 21.18 - max_alignment(), min_alignment())); 21.19 + assert(_max_alignment >= _min_alignment, 21.20 + err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, 21.21 + _max_alignment, _min_alignment)); 21.22 + assert(_max_alignment % _min_alignment == 0, 21.23 + err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, 21.24 + _max_alignment, _min_alignment)); 21.25 21.26 if (MaxHeapSize < InitialHeapSize) { 21.27 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 21.28 } 21.29 21.30 - // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 21.31 - // override if MaxMetaspaceSize was set on the command line or not. 21.32 - // This information is needed later to conform to the specification of the 21.33 - // java.lang.management.MemoryUsage API. 21.34 - // 21.35 - // Ideally, we would be able to set the default value of MaxMetaspaceSize in 21.36 - // globals.hpp to the aligned value, but this is not possible, since the 21.37 - // alignment depends on other flags being parsed. 21.38 - MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment()); 21.39 - 21.40 - if (MetaspaceSize > MaxMetaspaceSize) { 21.41 - MetaspaceSize = MaxMetaspaceSize; 21.42 - } 21.43 - 21.44 - MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment()); 21.45 - 21.46 - assert(MetaspaceSize <= MaxMetaspaceSize, "Must be"); 21.47 - 21.48 - MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment()); 21.49 - MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment()); 21.50 - 21.51 - MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment()); 21.52 - 21.53 - assert(MetaspaceSize % min_alignment() == 0, "metapace alignment"); 21.54 - assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment"); 21.55 - if (MetaspaceSize < 256*K) { 21.56 - vm_exit_during_initialization("Too small initial Metaspace size"); 21.57 - } 21.58 + MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment); 21.59 } 21.60 21.61 void CollectorPolicy::initialize_size_info() { 21.62 // User inputs from -mx and ms must be aligned 21.63 - set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment())); 21.64 - set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment())); 21.65 - set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); 21.66 + _min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment); 21.67 + _initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment); 21.68 + _max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment); 21.69 21.70 // Check heap parameter properties 21.71 - if (initial_heap_byte_size() < M) { 21.72 + if (_initial_heap_byte_size < M) { 21.73 vm_exit_during_initialization("Too small initial heap"); 21.74 } 21.75 // Check heap parameter properties 21.76 - if (min_heap_byte_size() < M) { 21.77 + if (_min_heap_byte_size < M) { 21.78 vm_exit_during_initialization("Too small minimum heap"); 21.79 } 21.80 - if (initial_heap_byte_size() <= NewSize) { 21.81 + if (_initial_heap_byte_size <= NewSize) { 21.82 // make sure there is at least some room in old space 21.83 vm_exit_during_initialization("Too small initial heap for new size specified"); 21.84 } 21.85 - if (max_heap_byte_size() < min_heap_byte_size()) { 21.86 + if (_max_heap_byte_size < _min_heap_byte_size) { 21.87 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); 21.88 } 21.89 - if (initial_heap_byte_size() < min_heap_byte_size()) { 21.90 + if (_initial_heap_byte_size < _min_heap_byte_size) { 21.91 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); 21.92 } 21.93 - if (max_heap_byte_size() < initial_heap_byte_size()) { 21.94 + if (_max_heap_byte_size < _initial_heap_byte_size) { 21.95 vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); 21.96 } 21.97 21.98 if (PrintGCDetails && Verbose) { 21.99 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " 21.100 SIZE_FORMAT " Maximum heap " SIZE_FORMAT, 21.101 - min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size()); 21.102 + _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size); 21.103 } 21.104 } 21.105 21.106 @@ -180,15 +148,15 @@ 21.107 21.108 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { 21.109 size_t x = base_size / (NewRatio+1); 21.110 - size_t new_gen_size = x > min_alignment() ? 21.111 - align_size_down(x, min_alignment()) : 21.112 - min_alignment(); 21.113 + size_t new_gen_size = x > _min_alignment ? 21.114 + align_size_down(x, _min_alignment) : 21.115 + _min_alignment; 21.116 return new_gen_size; 21.117 } 21.118 21.119 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, 21.120 size_t maximum_size) { 21.121 - size_t alignment = min_alignment(); 21.122 + size_t alignment = _min_alignment; 21.123 size_t max_minus = maximum_size - alignment; 21.124 return desired_size < max_minus ? desired_size : max_minus; 21.125 } 21.126 @@ -207,8 +175,8 @@ 21.127 21.128 void GenCollectorPolicy::initialize_flags() { 21.129 // All sizes must be multiples of the generation granularity. 21.130 - set_min_alignment((uintx) Generation::GenGrain); 21.131 - set_max_alignment(compute_max_alignment()); 21.132 + _min_alignment = (uintx) Generation::GenGrain; 21.133 + _max_alignment = compute_max_alignment(); 21.134 21.135 CollectorPolicy::initialize_flags(); 21.136 21.137 @@ -218,26 +186,26 @@ 21.138 if (NewSize > MaxNewSize) { 21.139 MaxNewSize = NewSize; 21.140 } 21.141 - NewSize = align_size_down(NewSize, min_alignment()); 21.142 - MaxNewSize = align_size_down(MaxNewSize, min_alignment()); 21.143 + NewSize = align_size_down(NewSize, _min_alignment); 21.144 + MaxNewSize = align_size_down(MaxNewSize, _min_alignment); 21.145 21.146 // Check validity of heap flags 21.147 - assert(NewSize % min_alignment() == 0, "eden space alignment"); 21.148 - assert(MaxNewSize % min_alignment() == 0, "survivor space alignment"); 21.149 + assert(NewSize % _min_alignment == 0, "eden space alignment"); 21.150 + assert(MaxNewSize % _min_alignment == 0, "survivor space alignment"); 21.151 21.152 - if (NewSize < 3*min_alignment()) { 21.153 + if (NewSize < 3 * _min_alignment) { 21.154 // make sure there room for eden and two survivor spaces 21.155 vm_exit_during_initialization("Too small new size specified"); 21.156 } 21.157 if (SurvivorRatio < 1 || NewRatio < 1) { 21.158 - vm_exit_during_initialization("Invalid heap ratio specified"); 21.159 + vm_exit_during_initialization("Invalid young gen ratio specified"); 21.160 } 21.161 } 21.162 21.163 void TwoGenerationCollectorPolicy::initialize_flags() { 21.164 GenCollectorPolicy::initialize_flags(); 21.165 21.166 - OldSize = align_size_down(OldSize, min_alignment()); 21.167 + OldSize = align_size_down(OldSize, _min_alignment); 21.168 21.169 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { 21.170 // NewRatio will be used later to set the young generation size so we use 21.171 @@ -246,11 +214,11 @@ 21.172 assert(NewRatio > 0, "NewRatio should have been set up earlier"); 21.173 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); 21.174 21.175 - calculated_heapsize = align_size_up(calculated_heapsize, max_alignment()); 21.176 + calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment); 21.177 MaxHeapSize = calculated_heapsize; 21.178 InitialHeapSize = calculated_heapsize; 21.179 } 21.180 - MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 21.181 + MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); 21.182 21.183 // adjust max heap size if necessary 21.184 if (NewSize + OldSize > MaxHeapSize) { 21.185 @@ -260,18 +228,18 @@ 21.186 uintx calculated_size = NewSize + OldSize; 21.187 double shrink_factor = (double) MaxHeapSize / calculated_size; 21.188 // align 21.189 - NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); 21.190 + NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment); 21.191 // OldSize is already aligned because above we aligned MaxHeapSize to 21.192 - // max_alignment(), and we just made sure that NewSize is aligned to 21.193 - // min_alignment(). In initialize_flags() we verified that max_alignment() 21.194 - // is a multiple of min_alignment(). 21.195 + // _max_alignment, and we just made sure that NewSize is aligned to 21.196 + // _min_alignment. In initialize_flags() we verified that _max_alignment 21.197 + // is a multiple of _min_alignment. 21.198 OldSize = MaxHeapSize - NewSize; 21.199 } else { 21.200 MaxHeapSize = NewSize + OldSize; 21.201 } 21.202 } 21.203 // need to do this again 21.204 - MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 21.205 + MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); 21.206 21.207 // adjust max heap size if necessary 21.208 if (NewSize + OldSize > MaxHeapSize) { 21.209 @@ -281,24 +249,24 @@ 21.210 uintx calculated_size = NewSize + OldSize; 21.211 double shrink_factor = (double) MaxHeapSize / calculated_size; 21.212 // align 21.213 - NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); 21.214 + NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment); 21.215 // OldSize is already aligned because above we aligned MaxHeapSize to 21.216 - // max_alignment(), and we just made sure that NewSize is aligned to 21.217 - // min_alignment(). In initialize_flags() we verified that max_alignment() 21.218 - // is a multiple of min_alignment(). 21.219 + // _max_alignment, and we just made sure that NewSize is aligned to 21.220 + // _min_alignment. In initialize_flags() we verified that _max_alignment 21.221 + // is a multiple of _min_alignment. 21.222 OldSize = MaxHeapSize - NewSize; 21.223 } else { 21.224 MaxHeapSize = NewSize + OldSize; 21.225 } 21.226 } 21.227 // need to do this again 21.228 - MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); 21.229 + MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); 21.230 21.231 always_do_update_barrier = UseConcMarkSweepGC; 21.232 21.233 // Check validity of heap flags 21.234 - assert(OldSize % min_alignment() == 0, "old space alignment"); 21.235 - assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); 21.236 + assert(OldSize % _min_alignment == 0, "old space alignment"); 21.237 + assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment"); 21.238 } 21.239 21.240 // Values set on the command line win over any ergonomically 21.241 @@ -313,7 +281,7 @@ 21.242 void GenCollectorPolicy::initialize_size_info() { 21.243 CollectorPolicy::initialize_size_info(); 21.244 21.245 - // min_alignment() is used for alignment within a generation. 21.246 + // _min_alignment is used for alignment within a generation. 21.247 // There is additional alignment done down stream for some 21.248 // collectors that sometimes causes unwanted rounding up of 21.249 // generations sizes. 21.250 @@ -322,18 +290,18 @@ 21.251 21.252 size_t max_new_size = 0; 21.253 if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { 21.254 - if (MaxNewSize < min_alignment()) { 21.255 - max_new_size = min_alignment(); 21.256 + if (MaxNewSize < _min_alignment) { 21.257 + max_new_size = _min_alignment; 21.258 } 21.259 - if (MaxNewSize >= max_heap_byte_size()) { 21.260 - max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), 21.261 - min_alignment()); 21.262 + if (MaxNewSize >= _max_heap_byte_size) { 21.263 + max_new_size = align_size_down(_max_heap_byte_size - _min_alignment, 21.264 + _min_alignment); 21.265 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " 21.266 "greater than the entire heap (" SIZE_FORMAT "k). A " 21.267 "new generation size of " SIZE_FORMAT "k will be used.", 21.268 - MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K); 21.269 + MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K); 21.270 } else { 21.271 - max_new_size = align_size_down(MaxNewSize, min_alignment()); 21.272 + max_new_size = align_size_down(MaxNewSize, _min_alignment); 21.273 } 21.274 21.275 // The case for FLAG_IS_ERGO(MaxNewSize) could be treated 21.276 @@ -351,7 +319,7 @@ 21.277 // just accept those choices. The choices currently made are 21.278 // not always "wise". 21.279 } else { 21.280 - max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size()); 21.281 + max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size); 21.282 // Bound the maximum size by NewSize below (since it historically 21.283 // would have been NewSize and because the NewRatio calculation could 21.284 // yield a size that is too small) and bound it by MaxNewSize above. 21.285 @@ -364,13 +332,13 @@ 21.286 // Given the maximum gen0 size, determine the initial and 21.287 // minimum gen0 sizes. 21.288 21.289 - if (max_heap_byte_size() == min_heap_byte_size()) { 21.290 + if (_max_heap_byte_size == _min_heap_byte_size) { 21.291 // The maximum and minimum heap sizes are the same so 21.292 // the generations minimum and initial must be the 21.293 // same as its maximum. 21.294 - set_min_gen0_size(max_new_size); 21.295 - set_initial_gen0_size(max_new_size); 21.296 - set_max_gen0_size(max_new_size); 21.297 + _min_gen0_size = max_new_size; 21.298 + _initial_gen0_size = max_new_size; 21.299 + _max_gen0_size = max_new_size; 21.300 } else { 21.301 size_t desired_new_size = 0; 21.302 if (!FLAG_IS_DEFAULT(NewSize)) { 21.303 @@ -391,43 +359,37 @@ 21.304 // Use the default NewSize as the floor for these values. If 21.305 // NewRatio is overly large, the resulting sizes can be too 21.306 // small. 21.307 - _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()), 21.308 - NewSize); 21.309 + _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize); 21.310 desired_new_size = 21.311 - MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()), 21.312 - NewSize); 21.313 + MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize); 21.314 } 21.315 21.316 assert(_min_gen0_size > 0, "Sanity check"); 21.317 - set_initial_gen0_size(desired_new_size); 21.318 - set_max_gen0_size(max_new_size); 21.319 + _initial_gen0_size = desired_new_size; 21.320 + _max_gen0_size = max_new_size; 21.321 21.322 // At this point the desirable initial and minimum sizes have been 21.323 // determined without regard to the maximum sizes. 21.324 21.325 // Bound the sizes by the corresponding overall heap sizes. 21.326 - set_min_gen0_size( 21.327 - bound_minus_alignment(_min_gen0_size, min_heap_byte_size())); 21.328 - set_initial_gen0_size( 21.329 - bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size())); 21.330 - set_max_gen0_size( 21.331 - bound_minus_alignment(_max_gen0_size, max_heap_byte_size())); 21.332 + _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size); 21.333 + _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size); 21.334 + _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size); 21.335 21.336 // At this point all three sizes have been checked against the 21.337 // maximum sizes but have not been checked for consistency 21.338 // among the three. 21.339 21.340 // Final check min <= initial <= max 21.341 - set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size)); 21.342 - set_initial_gen0_size( 21.343 - MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size)); 21.344 - set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size)); 21.345 + _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size); 21.346 + _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size); 21.347 + _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size); 21.348 } 21.349 21.350 if (PrintGCDetails && Verbose) { 21.351 gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 21.352 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 21.353 - min_gen0_size(), initial_gen0_size(), max_gen0_size()); 21.354 + _min_gen0_size, _initial_gen0_size, _max_gen0_size); 21.355 } 21.356 } 21.357 21.358 @@ -447,19 +409,17 @@ 21.359 21.360 if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { 21.361 if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && 21.362 - (heap_size >= min_gen1_size + min_alignment())) { 21.363 + (heap_size >= min_gen1_size + _min_alignment)) { 21.364 // Adjust gen0 down to accommodate min_gen1_size 21.365 *gen0_size_ptr = heap_size - min_gen1_size; 21.366 *gen0_size_ptr = 21.367 - MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()), 21.368 - min_alignment()); 21.369 + MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment); 21.370 assert(*gen0_size_ptr > 0, "Min gen0 is too large"); 21.371 result = true; 21.372 } else { 21.373 *gen1_size_ptr = heap_size - *gen0_size_ptr; 21.374 *gen1_size_ptr = 21.375 - MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()), 21.376 - min_alignment()); 21.377 + MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment); 21.378 } 21.379 } 21.380 return result; 21.381 @@ -480,10 +440,9 @@ 21.382 // The maximum gen1 size can be determined from the maximum gen0 21.383 // and maximum heap size since no explicit flags exits 21.384 // for setting the gen1 maximum. 21.385 - _max_gen1_size = max_heap_byte_size() - _max_gen0_size; 21.386 + _max_gen1_size = _max_heap_byte_size - _max_gen0_size; 21.387 _max_gen1_size = 21.388 - MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()), 21.389 - min_alignment()); 21.390 + MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment); 21.391 // If no explicit command line flag has been set for the 21.392 // gen1 size, use what is left for gen1. 21.393 if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { 21.394 @@ -492,70 +451,66 @@ 21.395 // with the overall heap size). In either case make 21.396 // the minimum, maximum and initial sizes consistent 21.397 // with the gen0 sizes and the overall heap sizes. 21.398 - assert(min_heap_byte_size() > _min_gen0_size, 21.399 + assert(_min_heap_byte_size > _min_gen0_size, 21.400 "gen0 has an unexpected minimum size"); 21.401 - set_min_gen1_size(min_heap_byte_size() - min_gen0_size()); 21.402 - set_min_gen1_size( 21.403 - MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()), 21.404 - min_alignment())); 21.405 - set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size()); 21.406 - set_initial_gen1_size( 21.407 - MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()), 21.408 - min_alignment())); 21.409 - 21.410 + _min_gen1_size = _min_heap_byte_size - _min_gen0_size; 21.411 + _min_gen1_size = 21.412 + MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment); 21.413 + _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size; 21.414 + _initial_gen1_size = 21.415 + MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment); 21.416 } else { 21.417 // It's been explicitly set on the command line. Use the 21.418 // OldSize and then determine the consequences. 21.419 - set_min_gen1_size(OldSize); 21.420 - set_initial_gen1_size(OldSize); 21.421 + _min_gen1_size = OldSize; 21.422 + _initial_gen1_size = OldSize; 21.423 21.424 // If the user has explicitly set an OldSize that is inconsistent 21.425 // with other command line flags, issue a warning. 21.426 // The generation minimums and the overall heap mimimum should 21.427 // be within one heap alignment. 21.428 - if ((_min_gen1_size + _min_gen0_size + min_alignment()) < 21.429 - min_heap_byte_size()) { 21.430 + if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) { 21.431 warning("Inconsistency between minimum heap size and minimum " 21.432 - "generation sizes: using minimum heap = " SIZE_FORMAT, 21.433 - min_heap_byte_size()); 21.434 + "generation sizes: using minimum heap = " SIZE_FORMAT, 21.435 + _min_heap_byte_size); 21.436 } 21.437 if ((OldSize > _max_gen1_size)) { 21.438 warning("Inconsistency between maximum heap size and maximum " 21.439 - "generation sizes: using maximum heap = " SIZE_FORMAT 21.440 - " -XX:OldSize flag is being ignored", 21.441 - max_heap_byte_size()); 21.442 + "generation sizes: using maximum heap = " SIZE_FORMAT 21.443 + " -XX:OldSize flag is being ignored", 21.444 + _max_heap_byte_size); 21.445 } 21.446 // If there is an inconsistency between the OldSize and the minimum and/or 21.447 // initial size of gen0, since OldSize was explicitly set, OldSize wins. 21.448 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, 21.449 - min_heap_byte_size(), OldSize)) { 21.450 + _min_heap_byte_size, OldSize)) { 21.451 if (PrintGCDetails && Verbose) { 21.452 gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 21.453 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 21.454 - min_gen0_size(), initial_gen0_size(), max_gen0_size()); 21.455 + _min_gen0_size, _initial_gen0_size, _max_gen0_size); 21.456 } 21.457 } 21.458 // Initial size 21.459 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, 21.460 - initial_heap_byte_size(), OldSize)) { 21.461 + _initial_heap_byte_size, OldSize)) { 21.462 if (PrintGCDetails && Verbose) { 21.463 gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 21.464 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 21.465 - min_gen0_size(), initial_gen0_size(), max_gen0_size()); 21.466 + _min_gen0_size, _initial_gen0_size, _max_gen0_size); 21.467 } 21.468 } 21.469 } 21.470 // Enforce the maximum gen1 size. 21.471 - set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size)); 21.472 + _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size); 21.473 21.474 // Check that min gen1 <= initial gen1 <= max gen1 21.475 - set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size)); 21.476 - set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size)); 21.477 + _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size); 21.478 + _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size); 21.479 21.480 if (PrintGCDetails && Verbose) { 21.481 gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " 21.482 SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, 21.483 - min_gen1_size(), initial_gen1_size(), max_gen1_size()); 21.484 + _min_gen1_size, _initial_gen1_size, _max_gen1_size); 21.485 } 21.486 } 21.487
22.1 --- a/src/share/vm/memory/collectorPolicy.hpp Thu Oct 10 13:25:51 2013 -0700 22.2 +++ b/src/share/vm/memory/collectorPolicy.hpp Fri Oct 11 08:27:21 2013 -0700 22.3 @@ -101,17 +101,12 @@ 22.4 // Return maximum heap alignment that may be imposed by the policy 22.5 static size_t compute_max_alignment(); 22.6 22.7 - void set_min_alignment(size_t align) { _min_alignment = align; } 22.8 size_t min_alignment() { return _min_alignment; } 22.9 - void set_max_alignment(size_t align) { _max_alignment = align; } 22.10 size_t max_alignment() { return _max_alignment; } 22.11 22.12 size_t initial_heap_byte_size() { return _initial_heap_byte_size; } 22.13 - void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; } 22.14 size_t max_heap_byte_size() { return _max_heap_byte_size; } 22.15 - void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; } 22.16 size_t min_heap_byte_size() { return _min_heap_byte_size; } 22.17 - void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; } 22.18 22.19 enum Name { 22.20 CollectorPolicyKind, 22.21 @@ -248,12 +243,9 @@ 22.22 22.23 public: 22.24 // Accessors 22.25 - size_t min_gen0_size() { return _min_gen0_size; } 22.26 - void set_min_gen0_size(size_t v) { _min_gen0_size = v; } 22.27 + size_t min_gen0_size() { return _min_gen0_size; } 22.28 size_t initial_gen0_size() { return _initial_gen0_size; } 22.29 - void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; } 22.30 - size_t max_gen0_size() { return _max_gen0_size; } 22.31 - void set_max_gen0_size(size_t v) { _max_gen0_size = v; } 22.32 + size_t max_gen0_size() { return _max_gen0_size; } 22.33 22.34 virtual int number_of_generations() = 0; 22.35 22.36 @@ -302,12 +294,9 @@ 22.37 22.38 public: 22.39 // Accessors 22.40 - size_t min_gen1_size() { return _min_gen1_size; } 22.41 - void set_min_gen1_size(size_t v) { _min_gen1_size = v; } 22.42 + size_t min_gen1_size() { return _min_gen1_size; } 22.43 size_t initial_gen1_size() { return _initial_gen1_size; } 22.44 - void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; } 22.45 - size_t max_gen1_size() { return _max_gen1_size; } 22.46 - void set_max_gen1_size(size_t v) { _max_gen1_size = v; } 22.47 + size_t max_gen1_size() { return _max_gen1_size; } 22.48 22.49 // Inherited methods 22.50 TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
23.1 --- a/src/share/vm/memory/filemap.hpp Thu Oct 10 13:25:51 2013 -0700 23.2 +++ b/src/share/vm/memory/filemap.hpp Fri Oct 11 08:27:21 2013 -0700 23.3 @@ -26,6 +26,7 @@ 23.4 #define SHARE_VM_MEMORY_FILEMAP_HPP 23.5 23.6 #include "memory/metaspaceShared.hpp" 23.7 +#include "memory/metaspace.hpp" 23.8 23.9 // Layout of the file: 23.10 // header: dump of archive instance plus versioning info, datestamp, etc.
24.1 --- a/src/share/vm/memory/metaspace.cpp Thu Oct 10 13:25:51 2013 -0700 24.2 +++ b/src/share/vm/memory/metaspace.cpp Fri Oct 11 08:27:21 2013 -0700 24.3 @@ -29,17 +29,21 @@ 24.4 #include "memory/collectorPolicy.hpp" 24.5 #include "memory/filemap.hpp" 24.6 #include "memory/freeList.hpp" 24.7 +#include "memory/gcLocker.hpp" 24.8 #include "memory/metablock.hpp" 24.9 #include "memory/metachunk.hpp" 24.10 #include "memory/metaspace.hpp" 24.11 #include "memory/metaspaceShared.hpp" 24.12 #include "memory/resourceArea.hpp" 24.13 #include "memory/universe.hpp" 24.14 +#include "runtime/atomic.inline.hpp" 24.15 #include "runtime/globals.hpp" 24.16 +#include "runtime/init.hpp" 24.17 #include "runtime/java.hpp" 24.18 #include "runtime/mutex.hpp" 24.19 #include "runtime/orderAccess.hpp" 24.20 #include "services/memTracker.hpp" 24.21 +#include "services/memoryService.hpp" 24.22 #include "utilities/copy.hpp" 24.23 #include "utilities/debug.hpp" 24.24 24.25 @@ -84,13 +88,7 @@ 24.26 return (ChunkIndex) (i+1); 24.27 } 24.28 24.29 -// Originally _capacity_until_GC was set to MetaspaceSize here but 24.30 -// the default MetaspaceSize before argument processing was being 24.31 -// used which was not the desired value. See the code 24.32 -// in should_expand() to see how the initialization is handled 24.33 -// now. 24.34 -size_t MetaspaceGC::_capacity_until_GC = 0; 24.35 -bool MetaspaceGC::_expand_after_GC = false; 24.36 +volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 24.37 uint MetaspaceGC::_shrink_factor = 0; 24.38 bool MetaspaceGC::_should_concurrent_collect = false; 24.39 24.40 @@ -293,9 +291,10 @@ 24.41 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 24.42 24.43 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 24.44 - size_t expanded_words() const { return _virtual_space.committed_size() / BytesPerWord; } 24.45 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 24.46 24.47 + bool is_pre_committed() const { return _virtual_space.special(); } 24.48 + 24.49 // address of next available space in _virtual_space; 24.50 // Accessors 24.51 VirtualSpaceNode* next() { return _next; } 24.52 @@ -337,7 +336,7 @@ 24.53 24.54 // Expands/shrinks the committed space in a virtual space. Delegates 24.55 // to Virtualspace 24.56 - bool expand_by(size_t words, bool pre_touch = false); 24.57 + bool expand_by(size_t min_words, size_t preferred_words); 24.58 24.59 // In preparation for deleting this node, remove all the chunks 24.60 // in the node from any freelist. 24.61 @@ -351,29 +350,64 @@ 24.62 void print_on(outputStream* st) const; 24.63 }; 24.64 24.65 +#define assert_is_ptr_aligned(ptr, alignment) \ 24.66 + assert(is_ptr_aligned(ptr, alignment), \ 24.67 + err_msg(PTR_FORMAT " is not aligned to " \ 24.68 + SIZE_FORMAT, ptr, alignment)) 24.69 + 24.70 +#define assert_is_size_aligned(size, alignment) \ 24.71 + assert(is_size_aligned(size, alignment), \ 24.72 + err_msg(SIZE_FORMAT " is not aligned to " \ 24.73 + SIZE_FORMAT, size, alignment)) 24.74 + 24.75 + 24.76 +// Decide if large pages should be committed when the memory is reserved. 24.77 +static bool should_commit_large_pages_when_reserving(size_t bytes) { 24.78 + if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 24.79 + size_t words = bytes / BytesPerWord; 24.80 + bool is_class = false; // We never reserve large pages for the class space. 24.81 + if (MetaspaceGC::can_expand(words, is_class) && 24.82 + MetaspaceGC::allowed_expansion() >= words) { 24.83 + return true; 24.84 + } 24.85 + } 24.86 + 24.87 + return false; 24.88 +} 24.89 + 24.90 // byte_size is the size of the associated virtualspace. 24.91 -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 24.92 - // align up to vm allocation granularity 24.93 - byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); 24.94 +VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 24.95 + assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 24.96 24.97 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 24.98 // configurable address, generally at the top of the Java heap so other 24.99 // memory addresses don't conflict. 24.100 if (DumpSharedSpaces) { 24.101 - char* shared_base = (char*)SharedBaseAddress; 24.102 - _rs = ReservedSpace(byte_size, 0, false, shared_base, 0); 24.103 + bool large_pages = false; // No large pages when dumping the CDS archive. 24.104 + char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 24.105 + 24.106 + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0); 24.107 if (_rs.is_reserved()) { 24.108 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 24.109 } else { 24.110 // Get a mmap region anywhere if the SharedBaseAddress fails. 24.111 - _rs = ReservedSpace(byte_size); 24.112 + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 24.113 } 24.114 MetaspaceShared::set_shared_rs(&_rs); 24.115 } else { 24.116 - _rs = ReservedSpace(byte_size); 24.117 + bool large_pages = should_commit_large_pages_when_reserving(bytes); 24.118 + 24.119 + _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 24.120 } 24.121 24.122 - MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 24.123 + if (_rs.is_reserved()) { 24.124 + assert(_rs.base() != NULL, "Catch if we get a NULL address"); 24.125 + assert(_rs.size() != 0, "Catch if we get a 0 size"); 24.126 + assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 24.127 + assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 24.128 + 24.129 + MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 24.130 + } 24.131 } 24.132 24.133 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 24.134 @@ -410,8 +444,6 @@ 24.135 #endif 24.136 24.137 // List of VirtualSpaces for metadata allocation. 24.138 -// It has a _next link for singly linked list and a MemRegion 24.139 -// for total space in the VirtualSpace. 24.140 class VirtualSpaceList : public CHeapObj<mtClass> { 24.141 friend class VirtualSpaceNode; 24.142 24.143 @@ -419,16 +451,13 @@ 24.144 VirtualSpaceSize = 256 * K 24.145 }; 24.146 24.147 - // Global list of virtual spaces 24.148 // Head of the list 24.149 VirtualSpaceNode* _virtual_space_list; 24.150 // virtual space currently being used for allocations 24.151 VirtualSpaceNode* _current_virtual_space; 24.152 24.153 - // Can this virtual list allocate >1 spaces? Also, used to determine 24.154 - // whether to allocate unlimited small chunks in this virtual space 24.155 + // Is this VirtualSpaceList used for the compressed class space 24.156 bool _is_class; 24.157 - bool can_grow() const { return !is_class() || !UseCompressedClassPointers; } 24.158 24.159 // Sum of reserved and committed memory in the virtual spaces 24.160 size_t _reserved_words; 24.161 @@ -453,7 +482,7 @@ 24.162 // Get another virtual space and add it to the list. This 24.163 // is typically prompted by a failed attempt to allocate a chunk 24.164 // and is typically followed by the allocation of a chunk. 24.165 - bool grow_vs(size_t vs_word_size); 24.166 + bool create_new_virtual_space(size_t vs_word_size); 24.167 24.168 public: 24.169 VirtualSpaceList(size_t word_size); 24.170 @@ -465,12 +494,12 @@ 24.171 size_t grow_chunks_by_words, 24.172 size_t medium_chunk_bunch); 24.173 24.174 - bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false); 24.175 - 24.176 - // Get the first chunk for a Metaspace. Used for 24.177 - // special cases such as the boot class loader, reflection 24.178 - // class loader and anonymous class loader. 24.179 - Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch); 24.180 + bool expand_node_by(VirtualSpaceNode* node, 24.181 + size_t min_words, 24.182 + size_t preferred_words); 24.183 + 24.184 + bool expand_by(size_t min_words, 24.185 + size_t preferred_words); 24.186 24.187 VirtualSpaceNode* current_virtual_space() { 24.188 return _current_virtual_space; 24.189 @@ -478,8 +507,7 @@ 24.190 24.191 bool is_class() const { return _is_class; } 24.192 24.193 - // Allocate the first virtualspace. 24.194 - void initialize(size_t word_size); 24.195 + bool initialization_succeeded() { return _virtual_space_list != NULL; } 24.196 24.197 size_t reserved_words() { return _reserved_words; } 24.198 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 24.199 @@ -708,6 +736,9 @@ 24.200 // and allocates from that chunk. 24.201 MetaWord* grow_and_allocate(size_t word_size); 24.202 24.203 + // Notify memory usage to MemoryService. 24.204 + void track_metaspace_memory_usage(); 24.205 + 24.206 // debugging support. 24.207 24.208 void dump(outputStream* const out) const; 24.209 @@ -869,6 +900,12 @@ 24.210 MetaWord* chunk_limit = top(); 24.211 assert(chunk_limit != NULL, "Not safe to call this method"); 24.212 24.213 + // The virtual spaces are always expanded by the 24.214 + // commit granularity to enforce the following condition. 24.215 + // Without this the is_available check will not work correctly. 24.216 + assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 24.217 + "The committed memory doesn't match the expanded memory."); 24.218 + 24.219 if (!is_available(chunk_word_size)) { 24.220 if (TraceMetadataChunkAllocation) { 24.221 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); 24.222 @@ -888,14 +925,21 @@ 24.223 24.224 24.225 // Expand the virtual space (commit more of the reserved space) 24.226 -bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) { 24.227 - size_t bytes = words * BytesPerWord; 24.228 - bool result = virtual_space()->expand_by(bytes, pre_touch); 24.229 - if (TraceMetavirtualspaceAllocation && !result) { 24.230 - gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed " 24.231 - "for byte size " SIZE_FORMAT, bytes); 24.232 - virtual_space()->print_on(gclog_or_tty); 24.233 +bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 24.234 + size_t min_bytes = min_words * BytesPerWord; 24.235 + size_t preferred_bytes = preferred_words * BytesPerWord; 24.236 + 24.237 + size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 24.238 + 24.239 + if (uncommitted < min_bytes) { 24.240 + return false; 24.241 } 24.242 + 24.243 + size_t commit = MIN2(preferred_bytes, uncommitted); 24.244 + bool result = virtual_space()->expand_by(commit, false); 24.245 + 24.246 + assert(result, "Failed to commit memory"); 24.247 + 24.248 return result; 24.249 } 24.250 24.251 @@ -914,12 +958,23 @@ 24.252 return false; 24.253 } 24.254 24.255 - // An allocation out of this Virtualspace that is larger 24.256 - // than an initial commit size can waste that initial committed 24.257 - // space. 24.258 - size_t committed_byte_size = 0; 24.259 - bool result = virtual_space()->initialize(_rs, committed_byte_size); 24.260 + // These are necessary restriction to make sure that the virtual space always 24.261 + // grows in steps of Metaspace::commit_alignment(). If both base and size are 24.262 + // aligned only the middle alignment of the VirtualSpace is used. 24.263 + assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 24.264 + assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 24.265 + 24.266 + // ReservedSpaces marked as special will have the entire memory 24.267 + // pre-committed. Setting a committed size will make sure that 24.268 + // committed_size and actual_committed_size agrees. 24.269 + size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 24.270 + 24.271 + bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 24.272 + Metaspace::commit_alignment()); 24.273 if (result) { 24.274 + assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 24.275 + "Checking that the pre-committed memory was registered by the VirtualSpace"); 24.276 + 24.277 set_top((MetaWord*)virtual_space()->low()); 24.278 set_reserved(MemRegion((HeapWord*)_rs.base(), 24.279 (HeapWord*)(_rs.base() + _rs.size()))); 24.280 @@ -976,13 +1031,23 @@ 24.281 _reserved_words = _reserved_words - v; 24.282 } 24.283 24.284 +#define assert_committed_below_limit() \ 24.285 + assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 24.286 + err_msg("Too much committed memory. Committed: " SIZE_FORMAT \ 24.287 + " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 24.288 + MetaspaceAux::committed_bytes(), MaxMetaspaceSize)); 24.289 + 24.290 void VirtualSpaceList::inc_committed_words(size_t v) { 24.291 assert_lock_strong(SpaceManager::expand_lock()); 24.292 _committed_words = _committed_words + v; 24.293 + 24.294 + assert_committed_below_limit(); 24.295 } 24.296 void VirtualSpaceList::dec_committed_words(size_t v) { 24.297 assert_lock_strong(SpaceManager::expand_lock()); 24.298 _committed_words = _committed_words - v; 24.299 + 24.300 + assert_committed_below_limit(); 24.301 } 24.302 24.303 void VirtualSpaceList::inc_virtual_space_count() { 24.304 @@ -1025,8 +1090,8 @@ 24.305 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 24.306 // Unlink it from the list 24.307 if (prev_vsl == vsl) { 24.308 - // This is the case of the current note being the first note. 24.309 - assert(vsl == virtual_space_list(), "Expected to be the first note"); 24.310 + // This is the case of the current node being the first node. 24.311 + assert(vsl == virtual_space_list(), "Expected to be the first node"); 24.312 set_virtual_space_list(vsl->next()); 24.313 } else { 24.314 prev_vsl->set_next(vsl->next()); 24.315 @@ -1054,7 +1119,7 @@ 24.316 #endif 24.317 } 24.318 24.319 -VirtualSpaceList::VirtualSpaceList(size_t word_size ) : 24.320 +VirtualSpaceList::VirtualSpaceList(size_t word_size) : 24.321 _is_class(false), 24.322 _virtual_space_list(NULL), 24.323 _current_virtual_space(NULL), 24.324 @@ -1063,9 +1128,7 @@ 24.325 _virtual_space_count(0) { 24.326 MutexLockerEx cl(SpaceManager::expand_lock(), 24.327 Mutex::_no_safepoint_check_flag); 24.328 - bool initialization_succeeded = grow_vs(word_size); 24.329 - assert(initialization_succeeded, 24.330 - " VirtualSpaceList initialization should not fail"); 24.331 + create_new_virtual_space(word_size); 24.332 } 24.333 24.334 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 24.335 @@ -1079,8 +1142,9 @@ 24.336 Mutex::_no_safepoint_check_flag); 24.337 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 24.338 bool succeeded = class_entry->initialize(); 24.339 - assert(succeeded, " VirtualSpaceList initialization should not fail"); 24.340 - link_vs(class_entry); 24.341 + if (succeeded) { 24.342 + link_vs(class_entry); 24.343 + } 24.344 } 24.345 24.346 size_t VirtualSpaceList::free_bytes() { 24.347 @@ -1088,14 +1152,24 @@ 24.348 } 24.349 24.350 // Allocate another meta virtual space and add it to the list. 24.351 -bool VirtualSpaceList::grow_vs(size_t vs_word_size) { 24.352 +bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 24.353 assert_lock_strong(SpaceManager::expand_lock()); 24.354 - if (vs_word_size == 0) { 24.355 + 24.356 + if (is_class()) { 24.357 + assert(false, "We currently don't support more than one VirtualSpace for" 24.358 + " the compressed class space. The initialization of the" 24.359 + " CCS uses another code path and should not hit this path."); 24.360 return false; 24.361 } 24.362 + 24.363 + if (vs_word_size == 0) { 24.364 + assert(false, "vs_word_size should always be at least _reserve_alignment large."); 24.365 + return false; 24.366 + } 24.367 + 24.368 // Reserve the space 24.369 size_t vs_byte_size = vs_word_size * BytesPerWord; 24.370 - assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned"); 24.371 + assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 24.372 24.373 // Allocate the meta virtual space and initialize it. 24.374 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 24.375 @@ -1103,7 +1177,8 @@ 24.376 delete new_entry; 24.377 return false; 24.378 } else { 24.379 - assert(new_entry->reserved_words() == vs_word_size, "Must be"); 24.380 + assert(new_entry->reserved_words() == vs_word_size, 24.381 + "Reserved memory size differs from requested memory size"); 24.382 // ensure lock-free iteration sees fully initialized node 24.383 OrderAccess::storestore(); 24.384 link_vs(new_entry); 24.385 @@ -1130,20 +1205,67 @@ 24.386 } 24.387 } 24.388 24.389 -bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) { 24.390 +bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 24.391 + size_t min_words, 24.392 + size_t preferred_words) { 24.393 size_t before = node->committed_words(); 24.394 24.395 - bool result = node->expand_by(word_size, pre_touch); 24.396 + bool result = node->expand_by(min_words, preferred_words); 24.397 24.398 size_t after = node->committed_words(); 24.399 24.400 // after and before can be the same if the memory was pre-committed. 24.401 - assert(after >= before, "Must be"); 24.402 + assert(after >= before, "Inconsistency"); 24.403 inc_committed_words(after - before); 24.404 24.405 return result; 24.406 } 24.407 24.408 +bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 24.409 + assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 24.410 + assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 24.411 + assert(min_words <= preferred_words, "Invalid arguments"); 24.412 + 24.413 + if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 24.414 + return false; 24.415 + } 24.416 + 24.417 + size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 24.418 + if (allowed_expansion_words < min_words) { 24.419 + return false; 24.420 + } 24.421 + 24.422 + size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 24.423 + 24.424 + // Commit more memory from the the current virtual space. 24.425 + bool vs_expanded = expand_node_by(current_virtual_space(), 24.426 + min_words, 24.427 + max_expansion_words); 24.428 + if (vs_expanded) { 24.429 + return true; 24.430 + } 24.431 + 24.432 + // Get another virtual space. 24.433 + size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 24.434 + grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 24.435 + 24.436 + if (create_new_virtual_space(grow_vs_words)) { 24.437 + if (current_virtual_space()->is_pre_committed()) { 24.438 + // The memory was pre-committed, so we are done here. 24.439 + assert(min_words <= current_virtual_space()->committed_words(), 24.440 + "The new VirtualSpace was pre-committed, so it" 24.441 + "should be large enough to fit the alloc request."); 24.442 + return true; 24.443 + } 24.444 + 24.445 + return expand_node_by(current_virtual_space(), 24.446 + min_words, 24.447 + max_expansion_words); 24.448 + } 24.449 + 24.450 + return false; 24.451 +} 24.452 + 24.453 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 24.454 size_t grow_chunks_by_words, 24.455 size_t medium_chunk_bunch) { 24.456 @@ -1151,63 +1273,27 @@ 24.457 // Allocate a chunk out of the current virtual space. 24.458 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 24.459 24.460 - if (next == NULL) { 24.461 - // Not enough room in current virtual space. Try to commit 24.462 - // more space. 24.463 - size_t expand_vs_by_words = MAX2(medium_chunk_bunch, 24.464 - grow_chunks_by_words); 24.465 - size_t page_size_words = os::vm_page_size() / BytesPerWord; 24.466 - size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words, 24.467 - page_size_words); 24.468 - bool vs_expanded = 24.469 - expand_by(current_virtual_space(), aligned_expand_vs_by_words); 24.470 - if (!vs_expanded) { 24.471 - // Should the capacity of the metaspaces be expanded for 24.472 - // this allocation? If it's the virtual space for classes and is 24.473 - // being used for CompressedHeaders, don't allocate a new virtualspace. 24.474 - if (can_grow() && MetaspaceGC::should_expand(this, word_size)) { 24.475 - // Get another virtual space. 24.476 - size_t allocation_aligned_expand_words = 24.477 - align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord); 24.478 - size_t grow_vs_words = 24.479 - MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words); 24.480 - if (grow_vs(grow_vs_words)) { 24.481 - // Got it. It's on the list now. Get a chunk from it. 24.482 - assert(current_virtual_space()->expanded_words() == 0, 24.483 - "New virtual space nodes should not have expanded"); 24.484 - 24.485 - size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words, 24.486 - page_size_words); 24.487 - // We probably want to expand by aligned_expand_vs_by_words here. 24.488 - expand_by(current_virtual_space(), grow_chunks_by_words_aligned); 24.489 - next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 24.490 - } 24.491 - } else { 24.492 - // Allocation will fail and induce a GC 24.493 - if (TraceMetadataChunkAllocation && Verbose) { 24.494 - gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():" 24.495 - " Fail instead of expand the metaspace"); 24.496 - } 24.497 - } 24.498 - } else { 24.499 - // The virtual space expanded, get a new chunk 24.500 - next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 24.501 - assert(next != NULL, "Just expanded, should succeed"); 24.502 - } 24.503 + if (next != NULL) { 24.504 + return next; 24.505 } 24.506 24.507 - assert(next == NULL || (next->next() == NULL && next->prev() == NULL), 24.508 - "New chunk is still on some list"); 24.509 - return next; 24.510 -} 24.511 - 24.512 -Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size, 24.513 - size_t chunk_bunch) { 24.514 - // Get a chunk from the chunk freelist 24.515 - Metachunk* new_chunk = get_new_chunk(chunk_word_size, 24.516 - chunk_word_size, 24.517 - chunk_bunch); 24.518 - return new_chunk; 24.519 + // The expand amount is currently only determined by the requested sizes 24.520 + // and not how much committed memory is left in the current virtual space. 24.521 + 24.522 + size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); 24.523 + size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); 24.524 + if (min_word_size >= preferred_word_size) { 24.525 + // Can happen when humongous chunks are allocated. 24.526 + preferred_word_size = min_word_size; 24.527 + } 24.528 + 24.529 + bool expanded = expand_by(min_word_size, preferred_word_size); 24.530 + if (expanded) { 24.531 + next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 24.532 + assert(next != NULL, "The allocation was expected to succeed after the expansion"); 24.533 + } 24.534 + 24.535 + return next; 24.536 } 24.537 24.538 void VirtualSpaceList::print_on(outputStream* st) const { 24.539 @@ -1256,96 +1342,96 @@ 24.540 // Calculate the amount to increase the high water mark (HWM). 24.541 // Increase by a minimum amount (MinMetaspaceExpansion) so that 24.542 // another expansion is not requested too soon. If that is not 24.543 -// enough to satisfy the allocation (i.e. big enough for a word_size 24.544 -// allocation), increase by MaxMetaspaceExpansion. If that is still 24.545 -// not enough, expand by the size of the allocation (word_size) plus 24.546 -// some. 24.547 -size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) { 24.548 - size_t before_inc = MetaspaceGC::capacity_until_GC(); 24.549 - size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord; 24.550 - size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord; 24.551 - size_t page_size_words = os::vm_page_size() / BytesPerWord; 24.552 - size_t size_delta_words = align_size_up(word_size, page_size_words); 24.553 - size_t delta_words = MAX2(size_delta_words, min_delta_words); 24.554 - if (delta_words > min_delta_words) { 24.555 +// enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 24.556 +// If that is still not enough, expand by the size of the allocation 24.557 +// plus some. 24.558 +size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 24.559 + size_t min_delta = MinMetaspaceExpansion; 24.560 + size_t max_delta = MaxMetaspaceExpansion; 24.561 + size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 24.562 + 24.563 + if (delta <= min_delta) { 24.564 + delta = min_delta; 24.565 + } else if (delta <= max_delta) { 24.566 // Don't want to hit the high water mark on the next 24.567 // allocation so make the delta greater than just enough 24.568 // for this allocation. 24.569 - delta_words = MAX2(delta_words, max_delta_words); 24.570 - if (delta_words > max_delta_words) { 24.571 - // This allocation is large but the next ones are probably not 24.572 - // so increase by the minimum. 24.573 - delta_words = delta_words + min_delta_words; 24.574 - } 24.575 + delta = max_delta; 24.576 + } else { 24.577 + // This allocation is large but the next ones are probably not 24.578 + // so increase by the minimum. 24.579 + delta = delta + min_delta; 24.580 } 24.581 - return delta_words; 24.582 + 24.583 + assert_is_size_aligned(delta, Metaspace::commit_alignment()); 24.584 + 24.585 + return delta; 24.586 } 24.587 24.588 -bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { 24.589 - 24.590 - // If the user wants a limit, impose one. 24.591 - // The reason for someone using this flag is to limit reserved space. So 24.592 - // for non-class virtual space, compare against virtual spaces that are reserved. 24.593 - // For class virtual space, we only compare against the committed space, not 24.594 - // reserved space, because this is a larger space prereserved for compressed 24.595 - // class pointers. 24.596 - if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { 24.597 - size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 24.598 - size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); 24.599 - size_t real_allocated = nonclass_allocated + class_allocated; 24.600 - if (real_allocated >= MaxMetaspaceSize) { 24.601 +size_t MetaspaceGC::capacity_until_GC() { 24.602 + size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 24.603 + assert(value >= MetaspaceSize, "Not initialied properly?"); 24.604 + return value; 24.605 +} 24.606 + 24.607 +size_t MetaspaceGC::inc_capacity_until_GC(size_t v) { 24.608 + assert_is_size_aligned(v, Metaspace::commit_alignment()); 24.609 + 24.610 + return (size_t)Atomic::add_ptr(v, &_capacity_until_GC); 24.611 +} 24.612 + 24.613 +size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 24.614 + assert_is_size_aligned(v, Metaspace::commit_alignment()); 24.615 + 24.616 + return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 24.617 +} 24.618 + 24.619 +bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 24.620 + // Check if the compressed class space is full. 24.621 + if (is_class && Metaspace::using_class_space()) { 24.622 + size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 24.623 + if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 24.624 return false; 24.625 } 24.626 } 24.627 24.628 - // Class virtual space should always be expanded. Call GC for the other 24.629 - // metadata virtual space. 24.630 - if (Metaspace::using_class_space() && 24.631 - (vsl == Metaspace::class_space_list())) return true; 24.632 - 24.633 - // If this is part of an allocation after a GC, expand 24.634 - // unconditionally. 24.635 - if (MetaspaceGC::expand_after_GC()) { 24.636 - return true; 24.637 + // Check if the user has imposed a limit on the metaspace memory. 24.638 + size_t committed_bytes = MetaspaceAux::committed_bytes(); 24.639 + if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 24.640 + return false; 24.641 } 24.642 24.643 - 24.644 - // If the capacity is below the minimum capacity, allow the 24.645 - // expansion. Also set the high-water-mark (capacity_until_GC) 24.646 - // to that minimum capacity so that a GC will not be induced 24.647 - // until that minimum capacity is exceeded. 24.648 - size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); 24.649 - size_t metaspace_size_bytes = MetaspaceSize; 24.650 - if (committed_capacity_bytes < metaspace_size_bytes || 24.651 - capacity_until_GC() == 0) { 24.652 - set_capacity_until_GC(metaspace_size_bytes); 24.653 - return true; 24.654 - } else { 24.655 - if (committed_capacity_bytes < capacity_until_GC()) { 24.656 - return true; 24.657 - } else { 24.658 - if (TraceMetadataChunkAllocation && Verbose) { 24.659 - gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT 24.660 - " capacity_until_GC " SIZE_FORMAT 24.661 - " allocated_capacity_bytes " SIZE_FORMAT, 24.662 - word_size, 24.663 - capacity_until_GC(), 24.664 - MetaspaceAux::allocated_capacity_bytes()); 24.665 - } 24.666 - return false; 24.667 - } 24.668 + return true; 24.669 +} 24.670 + 24.671 +size_t MetaspaceGC::allowed_expansion() { 24.672 + size_t committed_bytes = MetaspaceAux::committed_bytes(); 24.673 + 24.674 + size_t left_until_max = MaxMetaspaceSize - committed_bytes; 24.675 + 24.676 + // Always grant expansion if we are initiating the JVM, 24.677 + // or if the GC_locker is preventing GCs. 24.678 + if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) { 24.679 + return left_until_max / BytesPerWord; 24.680 } 24.681 + 24.682 + size_t capacity_until_gc = capacity_until_GC(); 24.683 + 24.684 + if (capacity_until_gc <= committed_bytes) { 24.685 + return 0; 24.686 + } 24.687 + 24.688 + size_t left_until_GC = capacity_until_gc - committed_bytes; 24.689 + size_t left_to_commit = MIN2(left_until_GC, left_until_max); 24.690 + 24.691 + return left_to_commit / BytesPerWord; 24.692 } 24.693 24.694 - 24.695 - 24.696 void MetaspaceGC::compute_new_size() { 24.697 assert(_shrink_factor <= 100, "invalid shrink factor"); 24.698 uint current_shrink_factor = _shrink_factor; 24.699 _shrink_factor = 0; 24.700 24.701 - // Until a faster way of calculating the "used" quantity is implemented, 24.702 - // use "capacity". 24.703 const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); 24.704 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 24.705 24.706 @@ -1377,9 +1463,10 @@ 24.707 // If we have less capacity below the metaspace HWM, then 24.708 // increment the HWM. 24.709 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 24.710 + expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 24.711 // Don't expand unless it's significant 24.712 if (expand_bytes >= MinMetaspaceExpansion) { 24.713 - MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); 24.714 + MetaspaceGC::inc_capacity_until_GC(expand_bytes); 24.715 } 24.716 if (PrintGCDetails && Verbose) { 24.717 size_t new_capacity_until_GC = capacity_until_GC; 24.718 @@ -1436,6 +1523,9 @@ 24.719 // on the third call, and 100% by the fourth call. But if we recompute 24.720 // size without shrinking, it goes back to 0%. 24.721 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 24.722 + 24.723 + shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 24.724 + 24.725 assert(shrink_bytes <= max_shrink_bytes, 24.726 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 24.727 shrink_bytes, max_shrink_bytes)); 24.728 @@ -1467,7 +1557,7 @@ 24.729 // Don't shrink unless it's significant 24.730 if (shrink_bytes >= MinMetaspaceExpansion && 24.731 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 24.732 - MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); 24.733 + MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 24.734 } 24.735 } 24.736 24.737 @@ -1700,7 +1790,6 @@ 24.738 assert(free_list != NULL, "Sanity check"); 24.739 24.740 chunk = free_list->head(); 24.741 - debug_only(Metachunk* debug_head = chunk;) 24.742 24.743 if (chunk == NULL) { 24.744 return NULL; 24.745 @@ -1709,9 +1798,6 @@ 24.746 // Remove the chunk as the head of the list. 24.747 free_list->remove_chunk(chunk); 24.748 24.749 - // Chunk is being removed from the chunks free list. 24.750 - dec_free_chunks_total(chunk->capacity_word_size()); 24.751 - 24.752 if (TraceMetadataChunkAllocation && Verbose) { 24.753 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " 24.754 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 24.755 @@ -1722,21 +1808,22 @@ 24.756 word_size, 24.757 FreeBlockDictionary<Metachunk>::atLeast); 24.758 24.759 - if (chunk != NULL) { 24.760 - if (TraceMetadataHumongousAllocation) { 24.761 - size_t waste = chunk->word_size() - word_size; 24.762 - gclog_or_tty->print_cr("Free list allocate humongous chunk size " 24.763 - SIZE_FORMAT " for requested size " SIZE_FORMAT 24.764 - " waste " SIZE_FORMAT, 24.765 - chunk->word_size(), word_size, waste); 24.766 - } 24.767 - // Chunk is being removed from the chunks free list. 24.768 - dec_free_chunks_total(chunk->capacity_word_size()); 24.769 - } else { 24.770 + if (chunk == NULL) { 24.771 return NULL; 24.772 } 24.773 + 24.774 + if (TraceMetadataHumongousAllocation) { 24.775 + size_t waste = chunk->word_size() - word_size; 24.776 + gclog_or_tty->print_cr("Free list allocate humongous chunk size " 24.777 + SIZE_FORMAT " for requested size " SIZE_FORMAT 24.778 + " waste " SIZE_FORMAT, 24.779 + chunk->word_size(), word_size, waste); 24.780 + } 24.781 } 24.782 24.783 + // Chunk is being removed from the chunks free list. 24.784 + dec_free_chunks_total(chunk->capacity_word_size()); 24.785 + 24.786 // Remove it from the links to this freelist 24.787 chunk->set_next(NULL); 24.788 chunk->set_prev(NULL); 24.789 @@ -1977,6 +2064,15 @@ 24.790 return chunk_word_size; 24.791 } 24.792 24.793 +void SpaceManager::track_metaspace_memory_usage() { 24.794 + if (is_init_completed()) { 24.795 + if (is_class()) { 24.796 + MemoryService::track_compressed_class_memory_usage(); 24.797 + } 24.798 + MemoryService::track_metaspace_memory_usage(); 24.799 + } 24.800 +} 24.801 + 24.802 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 24.803 assert(vs_list()->current_virtual_space() != NULL, 24.804 "Should have been set"); 24.805 @@ -2002,15 +2098,24 @@ 24.806 size_t grow_chunks_by_words = calc_chunk_size(word_size); 24.807 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 24.808 24.809 + if (next != NULL) { 24.810 + Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words); 24.811 + } 24.812 + 24.813 + MetaWord* mem = NULL; 24.814 + 24.815 // If a chunk was available, add it to the in-use chunk list 24.816 // and do an allocation from it. 24.817 if (next != NULL) { 24.818 - Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words); 24.819 // Add to this manager's list of chunks in use. 24.820 add_chunk(next, false); 24.821 - return next->allocate(word_size); 24.822 + mem = next->allocate(word_size); 24.823 } 24.824 - return NULL; 24.825 + 24.826 + // Track metaspace memory usage statistic. 24.827 + track_metaspace_memory_usage(); 24.828 + 24.829 + return mem; 24.830 } 24.831 24.832 void SpaceManager::print_on(outputStream* st) const { 24.833 @@ -2366,6 +2471,7 @@ 24.834 inc_used_metrics(word_size); 24.835 return current_chunk()->allocate(word_size); // caller handles null result 24.836 } 24.837 + 24.838 if (current_chunk() != NULL) { 24.839 result = current_chunk()->allocate(word_size); 24.840 } 24.841 @@ -2373,7 +2479,8 @@ 24.842 if (result == NULL) { 24.843 result = grow_and_allocate(word_size); 24.844 } 24.845 - if (result != 0) { 24.846 + 24.847 + if (result != NULL) { 24.848 inc_used_metrics(word_size); 24.849 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 24.850 "Head of the list is being allocated"); 24.851 @@ -2639,24 +2746,26 @@ 24.852 void MetaspaceAux::print_on(outputStream* out) { 24.853 Metaspace::MetadataType nct = Metaspace::NonClassType; 24.854 24.855 - out->print_cr(" Metaspace total " 24.856 - SIZE_FORMAT "K, used " SIZE_FORMAT "K," 24.857 - " reserved " SIZE_FORMAT "K", 24.858 - allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K); 24.859 - 24.860 - out->print_cr(" data space " 24.861 - SIZE_FORMAT "K, used " SIZE_FORMAT "K," 24.862 - " reserved " SIZE_FORMAT "K", 24.863 - allocated_capacity_bytes(nct)/K, 24.864 - allocated_used_bytes(nct)/K, 24.865 - reserved_bytes(nct)/K); 24.866 + out->print_cr(" Metaspace " 24.867 + "used " SIZE_FORMAT "K, " 24.868 + "capacity " SIZE_FORMAT "K, " 24.869 + "committed " SIZE_FORMAT "K, " 24.870 + "reserved " SIZE_FORMAT "K", 24.871 + allocated_used_bytes()/K, 24.872 + allocated_capacity_bytes()/K, 24.873 + committed_bytes()/K, 24.874 + reserved_bytes()/K); 24.875 + 24.876 if (Metaspace::using_class_space()) { 24.877 Metaspace::MetadataType ct = Metaspace::ClassType; 24.878 out->print_cr(" class space " 24.879 - SIZE_FORMAT "K, used " SIZE_FORMAT "K," 24.880 - " reserved " SIZE_FORMAT "K", 24.881 + "used " SIZE_FORMAT "K, " 24.882 + "capacity " SIZE_FORMAT "K, " 24.883 + "committed " SIZE_FORMAT "K, " 24.884 + "reserved " SIZE_FORMAT "K", 24.885 + allocated_used_bytes(ct)/K, 24.886 allocated_capacity_bytes(ct)/K, 24.887 - allocated_used_bytes(ct)/K, 24.888 + committed_bytes(ct)/K, 24.889 reserved_bytes(ct)/K); 24.890 } 24.891 } 24.892 @@ -2808,6 +2917,9 @@ 24.893 size_t Metaspace::_first_chunk_word_size = 0; 24.894 size_t Metaspace::_first_class_chunk_word_size = 0; 24.895 24.896 +size_t Metaspace::_commit_alignment = 0; 24.897 +size_t Metaspace::_reserve_alignment = 0; 24.898 + 24.899 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 24.900 initialize(lock, type); 24.901 } 24.902 @@ -2869,21 +2981,30 @@ 24.903 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 24.904 assert(class_metaspace_size() < KlassEncodingMetaspaceMax, 24.905 "Metaspace size is too big"); 24.906 + assert_is_ptr_aligned(requested_addr, _reserve_alignment); 24.907 + assert_is_ptr_aligned(cds_base, _reserve_alignment); 24.908 + assert_is_size_aligned(class_metaspace_size(), _reserve_alignment); 24.909 + 24.910 + // Don't use large pages for the class space. 24.911 + bool large_pages = false; 24.912 24.913 ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(), 24.914 - os::vm_allocation_granularity(), 24.915 - false, requested_addr, 0); 24.916 + _reserve_alignment, 24.917 + large_pages, 24.918 + requested_addr, 0); 24.919 if (!metaspace_rs.is_reserved()) { 24.920 if (UseSharedSpaces) { 24.921 + size_t increment = align_size_up(1*G, _reserve_alignment); 24.922 + 24.923 // Keep trying to allocate the metaspace, increasing the requested_addr 24.924 // by 1GB each time, until we reach an address that will no longer allow 24.925 // use of CDS with compressed klass pointers. 24.926 char *addr = requested_addr; 24.927 - while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) && 24.928 - can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) { 24.929 - addr = addr + 1*G; 24.930 + while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 24.931 + can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 24.932 + addr = addr + increment; 24.933 metaspace_rs = ReservedSpace(class_metaspace_size(), 24.934 - os::vm_allocation_granularity(), false, addr, 0); 24.935 + _reserve_alignment, large_pages, addr, 0); 24.936 } 24.937 } 24.938 24.939 @@ -2894,7 +3015,7 @@ 24.940 // So, UseCompressedClassPointers cannot be turned off at this point. 24.941 if (!metaspace_rs.is_reserved()) { 24.942 metaspace_rs = ReservedSpace(class_metaspace_size(), 24.943 - os::vm_allocation_granularity(), false); 24.944 + _reserve_alignment, large_pages); 24.945 if (!metaspace_rs.is_reserved()) { 24.946 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", 24.947 class_metaspace_size())); 24.948 @@ -2933,34 +3054,96 @@ 24.949 assert(using_class_space(), "Must be using class space"); 24.950 _class_space_list = new VirtualSpaceList(rs); 24.951 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 24.952 + 24.953 + if (!_class_space_list->initialization_succeeded()) { 24.954 + vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 24.955 + } 24.956 } 24.957 24.958 #endif 24.959 24.960 +// Align down. If the aligning result in 0, return 'alignment'. 24.961 +static size_t restricted_align_down(size_t size, size_t alignment) { 24.962 + return MAX2(alignment, align_size_down_(size, alignment)); 24.963 +} 24.964 + 24.965 +void Metaspace::ergo_initialize() { 24.966 + if (DumpSharedSpaces) { 24.967 + // Using large pages when dumping the shared archive is currently not implemented. 24.968 + FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 24.969 + } 24.970 + 24.971 + size_t page_size = os::vm_page_size(); 24.972 + if (UseLargePages && UseLargePagesInMetaspace) { 24.973 + page_size = os::large_page_size(); 24.974 + } 24.975 + 24.976 + _commit_alignment = page_size; 24.977 + _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 24.978 + 24.979 + // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 24.980 + // override if MaxMetaspaceSize was set on the command line or not. 24.981 + // This information is needed later to conform to the specification of the 24.982 + // java.lang.management.MemoryUsage API. 24.983 + // 24.984 + // Ideally, we would be able to set the default value of MaxMetaspaceSize in 24.985 + // globals.hpp to the aligned value, but this is not possible, since the 24.986 + // alignment depends on other flags being parsed. 24.987 + MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment); 24.988 + 24.989 + if (MetaspaceSize > MaxMetaspaceSize) { 24.990 + MetaspaceSize = MaxMetaspaceSize; 24.991 + } 24.992 + 24.993 + MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment); 24.994 + 24.995 + assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 24.996 + 24.997 + if (MetaspaceSize < 256*K) { 24.998 + vm_exit_during_initialization("Too small initial Metaspace size"); 24.999 + } 24.1000 + 24.1001 + MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment); 24.1002 + MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment); 24.1003 + 24.1004 + CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment); 24.1005 + set_class_metaspace_size(CompressedClassSpaceSize); 24.1006 +} 24.1007 + 24.1008 void Metaspace::global_initialize() { 24.1009 // Initialize the alignment for shared spaces. 24.1010 int max_alignment = os::vm_page_size(); 24.1011 size_t cds_total = 0; 24.1012 24.1013 - set_class_metaspace_size(align_size_up(CompressedClassSpaceSize, 24.1014 - os::vm_allocation_granularity())); 24.1015 - 24.1016 MetaspaceShared::set_max_alignment(max_alignment); 24.1017 24.1018 if (DumpSharedSpaces) { 24.1019 - SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 24.1020 + SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 24.1021 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 24.1022 - SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 24.1023 - SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 24.1024 + SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 24.1025 + SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 24.1026 24.1027 // Initialize with the sum of the shared space sizes. The read-only 24.1028 // and read write metaspace chunks will be allocated out of this and the 24.1029 // remainder is the misc code and data chunks. 24.1030 cds_total = FileMapInfo::shared_spaces_size(); 24.1031 + cds_total = align_size_up(cds_total, _reserve_alignment); 24.1032 _space_list = new VirtualSpaceList(cds_total/wordSize); 24.1033 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 24.1034 24.1035 + if (!_space_list->initialization_succeeded()) { 24.1036 + vm_exit_during_initialization("Unable to dump shared archive.", NULL); 24.1037 + } 24.1038 + 24.1039 #ifdef _LP64 24.1040 + if (cds_total + class_metaspace_size() > (uint64_t)max_juint) { 24.1041 + vm_exit_during_initialization("Unable to dump shared archive.", 24.1042 + err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 24.1043 + SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 24.1044 + "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(), 24.1045 + cds_total + class_metaspace_size(), (size_t)max_juint)); 24.1046 + } 24.1047 + 24.1048 // Set the compressed klass pointer base so that decoding of these pointers works 24.1049 // properly when creating the shared archive. 24.1050 assert(UseCompressedOops && UseCompressedClassPointers, 24.1051 @@ -2971,9 +3154,6 @@ 24.1052 _space_list->current_virtual_space()->bottom()); 24.1053 } 24.1054 24.1055 - // Set the shift to zero. 24.1056 - assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total, 24.1057 - "CDS region is too large"); 24.1058 Universe::set_narrow_klass_shift(0); 24.1059 #endif 24.1060 24.1061 @@ -2992,12 +3172,12 @@ 24.1062 // Map in spaces now also 24.1063 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 24.1064 FileMapInfo::set_current_info(mapinfo); 24.1065 + cds_total = FileMapInfo::shared_spaces_size(); 24.1066 + cds_address = (address)mapinfo->region_base(0); 24.1067 } else { 24.1068 assert(!mapinfo->is_open() && !UseSharedSpaces, 24.1069 "archive file not closed or shared spaces not disabled."); 24.1070 } 24.1071 - cds_total = FileMapInfo::shared_spaces_size(); 24.1072 - cds_address = (address)mapinfo->region_base(0); 24.1073 } 24.1074 24.1075 #ifdef _LP64 24.1076 @@ -3005,7 +3185,9 @@ 24.1077 // above the heap and above the CDS area (if it exists). 24.1078 if (using_class_space()) { 24.1079 if (UseSharedSpaces) { 24.1080 - allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address); 24.1081 + char* cds_end = (char*)(cds_address + cds_total); 24.1082 + cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 24.1083 + allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 24.1084 } else { 24.1085 allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0); 24.1086 } 24.1087 @@ -3023,11 +3205,19 @@ 24.1088 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 24.1089 // Arbitrarily set the initial virtual space to a multiple 24.1090 // of the boot class loader size. 24.1091 - size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size(); 24.1092 + size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 24.1093 + word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 24.1094 + 24.1095 // Initialize the list of virtual spaces. 24.1096 _space_list = new VirtualSpaceList(word_size); 24.1097 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 24.1098 + 24.1099 + if (!_space_list->initialization_succeeded()) { 24.1100 + vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 24.1101 + } 24.1102 } 24.1103 + 24.1104 + MetaspaceGC::initialize(); 24.1105 } 24.1106 24.1107 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 24.1108 @@ -3039,7 +3229,7 @@ 24.1109 return chunk; 24.1110 } 24.1111 24.1112 - return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch); 24.1113 + return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); 24.1114 } 24.1115 24.1116 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 24.1117 @@ -3112,19 +3302,18 @@ 24.1118 } 24.1119 24.1120 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 24.1121 - MetaWord* result; 24.1122 - MetaspaceGC::set_expand_after_GC(true); 24.1123 - size_t before_inc = MetaspaceGC::capacity_until_GC(); 24.1124 - size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; 24.1125 - MetaspaceGC::inc_capacity_until_GC(delta_bytes); 24.1126 + size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 24.1127 + assert(delta_bytes > 0, "Must be"); 24.1128 + 24.1129 + size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes); 24.1130 + size_t before_inc = after_inc - delta_bytes; 24.1131 + 24.1132 if (PrintGCDetails && Verbose) { 24.1133 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT 24.1134 - " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); 24.1135 + " to " SIZE_FORMAT, before_inc, after_inc); 24.1136 } 24.1137 24.1138 - result = allocate(word_size, mdtype); 24.1139 - 24.1140 - return result; 24.1141 + return allocate(word_size, mdtype); 24.1142 } 24.1143 24.1144 // Space allocated in the Metaspace. This may 24.1145 @@ -3206,6 +3395,7 @@ 24.1146 } 24.1147 } 24.1148 24.1149 + 24.1150 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 24.1151 bool read_only, MetaspaceObj::Type type, TRAPS) { 24.1152 if (HAS_PENDING_EXCEPTION) { 24.1153 @@ -3213,20 +3403,16 @@ 24.1154 return NULL; // caller does a CHECK_NULL too 24.1155 } 24.1156 24.1157 - MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 24.1158 - 24.1159 - // SSS: Should we align the allocations and make sure the sizes are aligned. 24.1160 - MetaWord* result = NULL; 24.1161 - 24.1162 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 24.1163 "ClassLoaderData::the_null_class_loader_data() should have been used."); 24.1164 + 24.1165 // Allocate in metaspaces without taking out a lock, because it deadlocks 24.1166 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 24.1167 // to revisit this for application class data sharing. 24.1168 if (DumpSharedSpaces) { 24.1169 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 24.1170 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 24.1171 - result = space->allocate(word_size, NonClassType); 24.1172 + MetaWord* result = space->allocate(word_size, NonClassType); 24.1173 if (result == NULL) { 24.1174 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 24.1175 } else { 24.1176 @@ -3235,42 +3421,64 @@ 24.1177 return Metablock::initialize(result, word_size); 24.1178 } 24.1179 24.1180 - result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 24.1181 + MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 24.1182 + 24.1183 + // Try to allocate metadata. 24.1184 + MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 24.1185 24.1186 if (result == NULL) { 24.1187 - // Try to clean out some memory and retry. 24.1188 - result = 24.1189 - Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 24.1190 - loader_data, word_size, mdtype); 24.1191 - 24.1192 - // If result is still null, we are out of memory. 24.1193 - if (result == NULL) { 24.1194 - if (Verbose && TraceMetadataChunkAllocation) { 24.1195 - gclog_or_tty->print_cr("Metaspace allocation failed for size " 24.1196 - SIZE_FORMAT, word_size); 24.1197 - if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty); 24.1198 - MetaspaceAux::dump(gclog_or_tty); 24.1199 - } 24.1200 - // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 24.1201 - const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" : 24.1202 - "Metadata space"; 24.1203 - report_java_out_of_memory(space_string); 24.1204 - 24.1205 - if (JvmtiExport::should_post_resource_exhausted()) { 24.1206 - JvmtiExport::post_resource_exhausted( 24.1207 - JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 24.1208 - space_string); 24.1209 - } 24.1210 - if (is_class_space_allocation(mdtype)) { 24.1211 - THROW_OOP_0(Universe::out_of_memory_error_class_metaspace()); 24.1212 - } else { 24.1213 - THROW_OOP_0(Universe::out_of_memory_error_metaspace()); 24.1214 - } 24.1215 + // Allocation failed. 24.1216 + if (is_init_completed()) { 24.1217 + // Only start a GC if the bootstrapping has completed. 24.1218 + 24.1219 + // Try to clean out some memory and retry. 24.1220 + result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 24.1221 + loader_data, word_size, mdtype); 24.1222 } 24.1223 } 24.1224 + 24.1225 + if (result == NULL) { 24.1226 + report_metadata_oome(loader_data, word_size, mdtype, THREAD); 24.1227 + // Will not reach here. 24.1228 + return NULL; 24.1229 + } 24.1230 + 24.1231 return Metablock::initialize(result, word_size); 24.1232 } 24.1233 24.1234 +void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) { 24.1235 + // If result is still null, we are out of memory. 24.1236 + if (Verbose && TraceMetadataChunkAllocation) { 24.1237 + gclog_or_tty->print_cr("Metaspace allocation failed for size " 24.1238 + SIZE_FORMAT, word_size); 24.1239 + if (loader_data->metaspace_or_null() != NULL) { 24.1240 + loader_data->dump(gclog_or_tty); 24.1241 + } 24.1242 + MetaspaceAux::dump(gclog_or_tty); 24.1243 + } 24.1244 + 24.1245 + // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 24.1246 + const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" : 24.1247 + "Metadata space"; 24.1248 + report_java_out_of_memory(space_string); 24.1249 + 24.1250 + if (JvmtiExport::should_post_resource_exhausted()) { 24.1251 + JvmtiExport::post_resource_exhausted( 24.1252 + JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 24.1253 + space_string); 24.1254 + } 24.1255 + 24.1256 + if (!is_init_completed()) { 24.1257 + vm_exit_during_initialization("OutOfMemoryError", space_string); 24.1258 + } 24.1259 + 24.1260 + if (is_class_space_allocation(mdtype)) { 24.1261 + THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 24.1262 + } else { 24.1263 + THROW_OOP(Universe::out_of_memory_error_metaspace()); 24.1264 + } 24.1265 +} 24.1266 + 24.1267 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 24.1268 assert(DumpSharedSpaces, "sanity"); 24.1269
25.1 --- a/src/share/vm/memory/metaspace.hpp Thu Oct 10 13:25:51 2013 -0700 25.2 +++ b/src/share/vm/memory/metaspace.hpp Fri Oct 11 08:27:21 2013 -0700 25.3 @@ -87,9 +87,10 @@ 25.4 friend class MetaspaceAux; 25.5 25.6 public: 25.7 - enum MetadataType {ClassType = 0, 25.8 - NonClassType = ClassType + 1, 25.9 - MetadataTypeCount = ClassType + 2 25.10 + enum MetadataType { 25.11 + ClassType, 25.12 + NonClassType, 25.13 + MetadataTypeCount 25.14 }; 25.15 enum MetaspaceType { 25.16 StandardMetaspaceType, 25.17 @@ -103,6 +104,9 @@ 25.18 private: 25.19 void initialize(Mutex* lock, MetaspaceType type); 25.20 25.21 + // Get the first chunk for a Metaspace. Used for 25.22 + // special cases such as the boot class loader, reflection 25.23 + // class loader and anonymous class loader. 25.24 Metachunk* get_initialization_chunk(MetadataType mdtype, 25.25 size_t chunk_word_size, 25.26 size_t chunk_bunch); 25.27 @@ -123,6 +127,9 @@ 25.28 static size_t _first_chunk_word_size; 25.29 static size_t _first_class_chunk_word_size; 25.30 25.31 + static size_t _commit_alignment; 25.32 + static size_t _reserve_alignment; 25.33 + 25.34 SpaceManager* _vsm; 25.35 SpaceManager* vsm() const { return _vsm; } 25.36 25.37 @@ -191,12 +198,17 @@ 25.38 Metaspace(Mutex* lock, MetaspaceType type); 25.39 ~Metaspace(); 25.40 25.41 - // Initialize globals for Metaspace 25.42 + static void ergo_initialize(); 25.43 static void global_initialize(); 25.44 25.45 static size_t first_chunk_word_size() { return _first_chunk_word_size; } 25.46 static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } 25.47 25.48 + static size_t reserve_alignment() { return _reserve_alignment; } 25.49 + static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; } 25.50 + static size_t commit_alignment() { return _commit_alignment; } 25.51 + static size_t commit_alignment_words() { return _commit_alignment / BytesPerWord; } 25.52 + 25.53 char* bottom() const; 25.54 size_t used_words_slow(MetadataType mdtype) const; 25.55 size_t free_words_slow(MetadataType mdtype) const; 25.56 @@ -219,6 +231,9 @@ 25.57 static void purge(MetadataType mdtype); 25.58 static void purge(); 25.59 25.60 + static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, 25.61 + MetadataType mdtype, TRAPS); 25.62 + 25.63 void print_on(outputStream* st) const; 25.64 // Debugging support 25.65 void verify(); 25.66 @@ -352,17 +367,10 @@ 25.67 25.68 class MetaspaceGC : AllStatic { 25.69 25.70 - // The current high-water-mark for inducing a GC. When 25.71 - // the capacity of all space in the virtual lists reaches this value, 25.72 - // a GC is induced and the value is increased. This should be changed 25.73 - // to the space actually used for allocations to avoid affects of 25.74 - // fragmentation losses to partially used chunks. Size is in words. 25.75 - static size_t _capacity_until_GC; 25.76 - 25.77 - // After a GC is done any allocation that fails should try to expand 25.78 - // the capacity of the Metaspaces. This flag is set during attempts 25.79 - // to allocate in the VMGCOperation that does the GC. 25.80 - static bool _expand_after_GC; 25.81 + // The current high-water-mark for inducing a GC. 25.82 + // When committed memory of all metaspaces reaches this value, 25.83 + // a GC is induced and the value is increased. Size is in bytes. 25.84 + static volatile intptr_t _capacity_until_GC; 25.85 25.86 // For a CMS collection, signal that a concurrent collection should 25.87 // be started. 25.88 @@ -370,20 +378,16 @@ 25.89 25.90 static uint _shrink_factor; 25.91 25.92 - static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; } 25.93 - 25.94 static size_t shrink_factor() { return _shrink_factor; } 25.95 void set_shrink_factor(uint v) { _shrink_factor = v; } 25.96 25.97 public: 25.98 25.99 - static size_t capacity_until_GC() { return _capacity_until_GC; } 25.100 - static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; } 25.101 - static void dec_capacity_until_GC(size_t v) { 25.102 - _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0; 25.103 - } 25.104 - static bool expand_after_GC() { return _expand_after_GC; } 25.105 - static void set_expand_after_GC(bool v) { _expand_after_GC = v; } 25.106 + static void initialize() { _capacity_until_GC = MetaspaceSize; } 25.107 + 25.108 + static size_t capacity_until_GC(); 25.109 + static size_t inc_capacity_until_GC(size_t v); 25.110 + static size_t dec_capacity_until_GC(size_t v); 25.111 25.112 static bool should_concurrent_collect() { return _should_concurrent_collect; } 25.113 static void set_should_concurrent_collect(bool v) { 25.114 @@ -391,11 +395,14 @@ 25.115 } 25.116 25.117 // The amount to increase the high-water-mark (_capacity_until_GC) 25.118 - static size_t delta_capacity_until_GC(size_t word_size); 25.119 + static size_t delta_capacity_until_GC(size_t bytes); 25.120 25.121 - // It is expected that this will be called when the current capacity 25.122 - // has been used and a GC should be considered. 25.123 - static bool should_expand(VirtualSpaceList* vsl, size_t word_size); 25.124 + // Tells if we have can expand metaspace without hitting set limits. 25.125 + static bool can_expand(size_t words, bool is_class); 25.126 + 25.127 + // Returns amount that we can expand without hitting a GC, 25.128 + // measured in words. 25.129 + static size_t allowed_expansion(); 25.130 25.131 // Calculate the new high-water mark at which to induce 25.132 // a GC.
26.1 --- a/src/share/vm/opto/graphKit.cpp Thu Oct 10 13:25:51 2013 -0700 26.2 +++ b/src/share/vm/opto/graphKit.cpp Fri Oct 11 08:27:21 2013 -0700 26.3 @@ -3713,7 +3713,8 @@ 26.4 Node* no_base = __ top(); 26.5 float likely = PROB_LIKELY(0.999); 26.6 float unlikely = PROB_UNLIKELY(0.999); 26.7 - Node* zero = __ ConI(0); 26.8 + Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val()); 26.9 + Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val()); 26.10 Node* zeroX = __ ConX(0); 26.11 26.12 // Get the alias_index for raw card-mark memory 26.13 @@ -3769,8 +3770,16 @@ 26.14 // load the original value of the card 26.15 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 26.16 26.17 - __ if_then(card_val, BoolTest::ne, zero); { 26.18 - g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); 26.19 + __ if_then(card_val, BoolTest::ne, young_card); { 26.20 + sync_kit(ideal); 26.21 + // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier. 26.22 + insert_mem_bar(Op_MemBarVolatile, oop_store); 26.23 + __ sync_kit(this); 26.24 + 26.25 + Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); 26.26 + __ if_then(card_val_reload, BoolTest::ne, dirty_card); { 26.27 + g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); 26.28 + } __ end_if(); 26.29 } __ end_if(); 26.30 } __ end_if(); 26.31 } __ end_if();
27.1 --- a/src/share/vm/runtime/arguments.cpp Thu Oct 10 13:25:51 2013 -0700 27.2 +++ b/src/share/vm/runtime/arguments.cpp Fri Oct 11 08:27:21 2013 -0700 27.3 @@ -2657,16 +2657,16 @@ 27.4 FLAG_SET_CMDLINE(bool, BackgroundCompilation, false); 27.5 // -Xmn for compatibility with other JVM vendors 27.6 } else if (match_option(option, "-Xmn", &tail)) { 27.7 - julong long_initial_eden_size = 0; 27.8 - ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1); 27.9 + julong long_initial_young_size = 0; 27.10 + ArgsRange errcode = parse_memory_size(tail, &long_initial_young_size, 1); 27.11 if (errcode != arg_in_range) { 27.12 jio_fprintf(defaultStream::error_stream(), 27.13 - "Invalid initial eden size: %s\n", option->optionString); 27.14 + "Invalid initial young generation size: %s\n", option->optionString); 27.15 describe_range_error(errcode); 27.16 return JNI_EINVAL; 27.17 } 27.18 - FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_eden_size); 27.19 - FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_eden_size); 27.20 + FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_young_size); 27.21 + FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_young_size); 27.22 // -Xms 27.23 } else if (match_option(option, "-Xms", &tail)) { 27.24 julong long_initial_heap_size = 0; 27.25 @@ -3666,6 +3666,9 @@ 27.26 assert(verify_serial_gc_flags(), "SerialGC unset"); 27.27 #endif // INCLUDE_ALL_GCS 27.28 27.29 + // Initialize Metaspace flags and alignments. 27.30 + Metaspace::ergo_initialize(); 27.31 + 27.32 // Set bytecode rewriting flags 27.33 set_bytecode_flags(); 27.34
28.1 --- a/src/share/vm/runtime/globals.hpp Thu Oct 10 13:25:51 2013 -0700 28.2 +++ b/src/share/vm/runtime/globals.hpp Fri Oct 11 08:27:21 2013 -0700 28.3 @@ -481,21 +481,21 @@ 28.4 #define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \ 28.5 \ 28.6 lp64_product(bool, UseCompressedOops, false, \ 28.7 - "Use 32-bit object references in 64-bit VM " \ 28.8 - "lp64_product means flag is always constant in 32 bit VM") \ 28.9 + "Use 32-bit object references in 64-bit VM. " \ 28.10 + "lp64_product means flag is always constant in 32 bit VM") \ 28.11 \ 28.12 lp64_product(bool, UseCompressedClassPointers, false, \ 28.13 - "Use 32-bit class pointers in 64-bit VM " \ 28.14 - "lp64_product means flag is always constant in 32 bit VM") \ 28.15 + "Use 32-bit class pointers in 64-bit VM. " \ 28.16 + "lp64_product means flag is always constant in 32 bit VM") \ 28.17 \ 28.18 notproduct(bool, CheckCompressedOops, true, \ 28.19 - "generate checks in encoding/decoding code in debug VM") \ 28.20 + "Generate checks in encoding/decoding code in debug VM") \ 28.21 \ 28.22 product_pd(uintx, HeapBaseMinAddress, \ 28.23 - "OS specific low limit for heap base address") \ 28.24 + "OS specific low limit for heap base address") \ 28.25 \ 28.26 diagnostic(bool, PrintCompressedOopsMode, false, \ 28.27 - "Print compressed oops base address and encoding mode") \ 28.28 + "Print compressed oops base address and encoding mode") \ 28.29 \ 28.30 lp64_product(intx, ObjectAlignmentInBytes, 8, \ 28.31 "Default object alignment in bytes, 8 is minimum") \ 28.32 @@ -517,7 +517,7 @@ 28.33 "Use lwsync instruction if true, else use slower sync") \ 28.34 \ 28.35 develop(bool, CleanChunkPoolAsync, falseInEmbedded, \ 28.36 - "Whether to clean the chunk pool asynchronously") \ 28.37 + "Clean the chunk pool asynchronously") \ 28.38 \ 28.39 /* Temporary: See 6948537 */ \ 28.40 experimental(bool, UseMemSetInBOT, true, \ 28.41 @@ -527,10 +527,12 @@ 28.42 "Enable normal processing of flags relating to field diagnostics")\ 28.43 \ 28.44 experimental(bool, UnlockExperimentalVMOptions, false, \ 28.45 - "Enable normal processing of flags relating to experimental features")\ 28.46 + "Enable normal processing of flags relating to experimental " \ 28.47 + "features") \ 28.48 \ 28.49 product(bool, JavaMonitorsInStackTrace, true, \ 28.50 - "Print info. about Java monitor locks when the stacks are dumped")\ 28.51 + "Print information about Java monitor locks when the stacks are" \ 28.52 + "dumped") \ 28.53 \ 28.54 product_pd(bool, UseLargePages, \ 28.55 "Use large page memory") \ 28.56 @@ -541,8 +543,12 @@ 28.57 develop(bool, LargePagesIndividualAllocationInjectError, false, \ 28.58 "Fail large pages individual allocation") \ 28.59 \ 28.60 + product(bool, UseLargePagesInMetaspace, false, \ 28.61 + "Use large page memory in metaspace. " \ 28.62 + "Only used if UseLargePages is enabled.") \ 28.63 + \ 28.64 develop(bool, TracePageSizes, false, \ 28.65 - "Trace page size selection and usage.") \ 28.66 + "Trace page size selection and usage") \ 28.67 \ 28.68 product(bool, UseNUMA, false, \ 28.69 "Use NUMA if available") \ 28.70 @@ -557,12 +563,12 @@ 28.71 "Force NUMA optimizations on single-node/UMA systems") \ 28.72 \ 28.73 product(uintx, NUMAChunkResizeWeight, 20, \ 28.74 - "Percentage (0-100) used to weigh the current sample when " \ 28.75 + "Percentage (0-100) used to weigh the current sample when " \ 28.76 "computing exponentially decaying average for " \ 28.77 "AdaptiveNUMAChunkSizing") \ 28.78 \ 28.79 product(uintx, NUMASpaceResizeRate, 1*G, \ 28.80 - "Do not reallocate more that this amount per collection") \ 28.81 + "Do not reallocate more than this amount per collection") \ 28.82 \ 28.83 product(bool, UseAdaptiveNUMAChunkSizing, true, \ 28.84 "Enable adaptive chunk sizing for NUMA") \ 28.85 @@ -579,17 +585,17 @@ 28.86 product(intx, UseSSE, 99, \ 28.87 "Highest supported SSE instructions set on x86/x64") \ 28.88 \ 28.89 - product(bool, UseAES, false, \ 28.90 + product(bool, UseAES, false, \ 28.91 "Control whether AES instructions can be used on x86/x64") \ 28.92 \ 28.93 product(uintx, LargePageSizeInBytes, 0, \ 28.94 - "Large page size (0 to let VM choose the page size") \ 28.95 + "Large page size (0 to let VM choose the page size)") \ 28.96 \ 28.97 product(uintx, LargePageHeapSizeThreshold, 128*M, \ 28.98 - "Use large pages if max heap is at least this big") \ 28.99 + "Use large pages if maximum heap is at least this big") \ 28.100 \ 28.101 product(bool, ForceTimeHighResolution, false, \ 28.102 - "Using high time resolution(For Win32 only)") \ 28.103 + "Using high time resolution (for Win32 only)") \ 28.104 \ 28.105 develop(bool, TraceItables, false, \ 28.106 "Trace initialization and use of itables") \ 28.107 @@ -605,10 +611,10 @@ 28.108 \ 28.109 develop(bool, TraceLongCompiles, false, \ 28.110 "Print out every time compilation is longer than " \ 28.111 - "a given threashold") \ 28.112 + "a given threshold") \ 28.113 \ 28.114 develop(bool, SafepointALot, false, \ 28.115 - "Generates a lot of safepoints. Works with " \ 28.116 + "Generate a lot of safepoints. This works with " \ 28.117 "GuaranteedSafepointInterval") \ 28.118 \ 28.119 product_pd(bool, BackgroundCompilation, \ 28.120 @@ -616,13 +622,13 @@ 28.121 "compilation") \ 28.122 \ 28.123 product(bool, PrintVMQWaitTime, false, \ 28.124 - "Prints out the waiting time in VM operation queue") \ 28.125 + "Print out the waiting time in VM operation queue") \ 28.126 \ 28.127 develop(bool, NoYieldsInMicrolock, false, \ 28.128 "Disable yields in microlock") \ 28.129 \ 28.130 develop(bool, TraceOopMapGeneration, false, \ 28.131 - "Shows oopmap generation") \ 28.132 + "Show OopMapGeneration") \ 28.133 \ 28.134 product(bool, MethodFlushing, true, \ 28.135 "Reclamation of zombie and not-entrant methods") \ 28.136 @@ -631,10 +637,11 @@ 28.137 "Verify stack of each thread when it is entering a runtime call") \ 28.138 \ 28.139 diagnostic(bool, ForceUnreachable, false, \ 28.140 - "Make all non code cache addresses to be unreachable with forcing use of 64bit literal fixups") \ 28.141 + "Make all non code cache addresses to be unreachable by " \ 28.142 + "forcing use of 64bit literal fixups") \ 28.143 \ 28.144 notproduct(bool, StressDerivedPointers, false, \ 28.145 - "Force scavenge when a derived pointers is detected on stack " \ 28.146 + "Force scavenge when a derived pointer is detected on stack " \ 28.147 "after rtm call") \ 28.148 \ 28.149 develop(bool, TraceDerivedPointers, false, \ 28.150 @@ -653,86 +660,86 @@ 28.151 "Use Inline Caches for virtual calls ") \ 28.152 \ 28.153 develop(bool, InlineArrayCopy, true, \ 28.154 - "inline arraycopy native that is known to be part of " \ 28.155 + "Inline arraycopy native that is known to be part of " \ 28.156 "base library DLL") \ 28.157 \ 28.158 develop(bool, InlineObjectHash, true, \ 28.159 - "inline Object::hashCode() native that is known to be part " \ 28.160 + "Inline Object::hashCode() native that is known to be part " \ 28.161 "of base library DLL") \ 28.162 \ 28.163 develop(bool, InlineNatives, true, \ 28.164 - "inline natives that are known to be part of base library DLL") \ 28.165 + "Inline natives that are known to be part of base library DLL") \ 28.166 \ 28.167 develop(bool, InlineMathNatives, true, \ 28.168 - "inline SinD, CosD, etc.") \ 28.169 + "Inline SinD, CosD, etc.") \ 28.170 \ 28.171 develop(bool, InlineClassNatives, true, \ 28.172 - "inline Class.isInstance, etc") \ 28.173 + "Inline Class.isInstance, etc") \ 28.174 \ 28.175 develop(bool, InlineThreadNatives, true, \ 28.176 - "inline Thread.currentThread, etc") \ 28.177 + "Inline Thread.currentThread, etc") \ 28.178 \ 28.179 develop(bool, InlineUnsafeOps, true, \ 28.180 - "inline memory ops (native methods) from sun.misc.Unsafe") \ 28.181 + "Inline memory ops (native methods) from sun.misc.Unsafe") \ 28.182 \ 28.183 product(bool, CriticalJNINatives, true, \ 28.184 - "check for critical JNI entry points") \ 28.185 + "Check for critical JNI entry points") \ 28.186 \ 28.187 notproduct(bool, StressCriticalJNINatives, false, \ 28.188 - "Exercise register saving code in critical natives") \ 28.189 + "Exercise register saving code in critical natives") \ 28.190 \ 28.191 product(bool, UseSSE42Intrinsics, false, \ 28.192 "SSE4.2 versions of intrinsics") \ 28.193 \ 28.194 product(bool, UseAESIntrinsics, false, \ 28.195 - "use intrinsics for AES versions of crypto") \ 28.196 + "Use intrinsics for AES versions of crypto") \ 28.197 \ 28.198 product(bool, UseCRC32Intrinsics, false, \ 28.199 "use intrinsics for java.util.zip.CRC32") \ 28.200 \ 28.201 develop(bool, TraceCallFixup, false, \ 28.202 - "traces all call fixups") \ 28.203 + "Trace all call fixups") \ 28.204 \ 28.205 develop(bool, DeoptimizeALot, false, \ 28.206 - "deoptimize at every exit from the runtime system") \ 28.207 + "Deoptimize at every exit from the runtime system") \ 28.208 \ 28.209 notproduct(ccstrlist, DeoptimizeOnlyAt, "", \ 28.210 - "a comma separated list of bcis to deoptimize at") \ 28.211 + "A comma separated list of bcis to deoptimize at") \ 28.212 \ 28.213 product(bool, DeoptimizeRandom, false, \ 28.214 - "deoptimize random frames on random exit from the runtime system")\ 28.215 + "Deoptimize random frames on random exit from the runtime system")\ 28.216 \ 28.217 notproduct(bool, ZombieALot, false, \ 28.218 - "creates zombies (non-entrant) at exit from the runt. system") \ 28.219 + "Create zombies (non-entrant) at exit from the runtime system") \ 28.220 \ 28.221 product(bool, UnlinkSymbolsALot, false, \ 28.222 - "unlink unreferenced symbols from the symbol table at safepoints")\ 28.223 + "Unlink unreferenced symbols from the symbol table at safepoints")\ 28.224 \ 28.225 notproduct(bool, WalkStackALot, false, \ 28.226 - "trace stack (no print) at every exit from the runtime system") \ 28.227 + "Trace stack (no print) at every exit from the runtime system") \ 28.228 \ 28.229 product(bool, Debugging, false, \ 28.230 - "set when executing debug methods in debug.ccp " \ 28.231 + "Set when executing debug methods in debug.cpp " \ 28.232 "(to prevent triggering assertions)") \ 28.233 \ 28.234 notproduct(bool, StrictSafepointChecks, trueInDebug, \ 28.235 "Enable strict checks that safepoints cannot happen for threads " \ 28.236 - "that used No_Safepoint_Verifier") \ 28.237 + "that use No_Safepoint_Verifier") \ 28.238 \ 28.239 notproduct(bool, VerifyLastFrame, false, \ 28.240 "Verify oops on last frame on entry to VM") \ 28.241 \ 28.242 develop(bool, TraceHandleAllocation, false, \ 28.243 - "Prints out warnings when suspicious many handles are allocated") \ 28.244 + "Print out warnings when suspiciously many handles are allocated")\ 28.245 \ 28.246 product(bool, UseCompilerSafepoints, true, \ 28.247 "Stop at safepoints in compiled code") \ 28.248 \ 28.249 product(bool, FailOverToOldVerifier, true, \ 28.250 - "fail over to old verifier when split verifier fails") \ 28.251 + "Fail over to old verifier when split verifier fails") \ 28.252 \ 28.253 develop(bool, ShowSafepointMsgs, false, \ 28.254 - "Show msg. about safepoint synch.") \ 28.255 + "Show message about safepoint synchronization") \ 28.256 \ 28.257 product(bool, SafepointTimeout, false, \ 28.258 "Time out and warn or fail after SafepointTimeoutDelay " \ 28.259 @@ -756,19 +763,19 @@ 28.260 "Trace external suspend wait failures") \ 28.261 \ 28.262 product(bool, MaxFDLimit, true, \ 28.263 - "Bump the number of file descriptors to max in solaris.") \ 28.264 + "Bump the number of file descriptors to maximum in Solaris") \ 28.265 \ 28.266 diagnostic(bool, LogEvents, true, \ 28.267 - "Enable the various ring buffer event logs") \ 28.268 + "Enable the various ring buffer event logs") \ 28.269 \ 28.270 diagnostic(uintx, LogEventsBufferEntries, 10, \ 28.271 - "Enable the various ring buffer event logs") \ 28.272 + "Number of ring buffer event logs") \ 28.273 \ 28.274 product(bool, BytecodeVerificationRemote, true, \ 28.275 - "Enables the Java bytecode verifier for remote classes") \ 28.276 + "Enable the Java bytecode verifier for remote classes") \ 28.277 \ 28.278 product(bool, BytecodeVerificationLocal, false, \ 28.279 - "Enables the Java bytecode verifier for local classes") \ 28.280 + "Enable the Java bytecode verifier for local classes") \ 28.281 \ 28.282 develop(bool, ForceFloatExceptions, trueInDebug, \ 28.283 "Force exceptions on FP stack under/overflow") \ 28.284 @@ -780,7 +787,7 @@ 28.285 "Trace java language assertions") \ 28.286 \ 28.287 notproduct(bool, CheckAssertionStatusDirectives, false, \ 28.288 - "temporary - see javaClasses.cpp") \ 28.289 + "Temporary - see javaClasses.cpp") \ 28.290 \ 28.291 notproduct(bool, PrintMallocFree, false, \ 28.292 "Trace calls to C heap malloc/free allocation") \ 28.293 @@ -799,16 +806,16 @@ 28.294 "entering the VM") \ 28.295 \ 28.296 notproduct(bool, CheckOopishValues, false, \ 28.297 - "Warn if value contains oop ( requires ZapDeadLocals)") \ 28.298 + "Warn if value contains oop (requires ZapDeadLocals)") \ 28.299 \ 28.300 develop(bool, UseMallocOnly, false, \ 28.301 - "use only malloc/free for allocation (no resource area/arena)") \ 28.302 + "Use only malloc/free for allocation (no resource area/arena)") \ 28.303 \ 28.304 develop(bool, PrintMalloc, false, \ 28.305 - "print all malloc/free calls") \ 28.306 + "Print all malloc/free calls") \ 28.307 \ 28.308 develop(bool, PrintMallocStatistics, false, \ 28.309 - "print malloc/free statistics") \ 28.310 + "Print malloc/free statistics") \ 28.311 \ 28.312 develop(bool, ZapResourceArea, trueInDebug, \ 28.313 "Zap freed resource/arena space with 0xABABABAB") \ 28.314 @@ -820,7 +827,7 @@ 28.315 "Zap freed JNI handle space with 0xFEFEFEFE") \ 28.316 \ 28.317 notproduct(bool, ZapStackSegments, trueInDebug, \ 28.318 - "Zap allocated/freed Stack segments with 0xFADFADED") \ 28.319 + "Zap allocated/freed stack segments with 0xFADFADED") \ 28.320 \ 28.321 develop(bool, ZapUnusedHeapArea, trueInDebug, \ 28.322 "Zap unused heap space with 0xBAADBABE") \ 28.323 @@ -835,7 +842,7 @@ 28.324 "Zap filler objects with 0xDEAFBABE") \ 28.325 \ 28.326 develop(bool, PrintVMMessages, true, \ 28.327 - "Print vm messages on console") \ 28.328 + "Print VM messages on console") \ 28.329 \ 28.330 product(bool, PrintGCApplicationConcurrentTime, false, \ 28.331 "Print the time the application has been running") \ 28.332 @@ -844,21 +851,21 @@ 28.333 "Print the time the application has been stopped") \ 28.334 \ 28.335 diagnostic(bool, VerboseVerification, false, \ 28.336 - "Display detailed verification details") \ 28.337 + "Display detailed verification details") \ 28.338 \ 28.339 notproduct(uintx, ErrorHandlerTest, 0, \ 28.340 - "If > 0, provokes an error after VM initialization; the value" \ 28.341 - "determines which error to provoke. See test_error_handler()" \ 28.342 + "If > 0, provokes an error after VM initialization; the value " \ 28.343 + "determines which error to provoke. See test_error_handler() " \ 28.344 "in debug.cpp.") \ 28.345 \ 28.346 develop(bool, Verbose, false, \ 28.347 - "Prints additional debugging information from other modes") \ 28.348 + "Print additional debugging information from other modes") \ 28.349 \ 28.350 develop(bool, PrintMiscellaneous, false, \ 28.351 - "Prints uncategorized debugging information (requires +Verbose)") \ 28.352 + "Print uncategorized debugging information (requires +Verbose)") \ 28.353 \ 28.354 develop(bool, WizardMode, false, \ 28.355 - "Prints much more debugging information") \ 28.356 + "Print much more debugging information") \ 28.357 \ 28.358 product(bool, ShowMessageBoxOnError, false, \ 28.359 "Keep process alive on VM fatal error") \ 28.360 @@ -870,7 +877,7 @@ 28.361 "Let VM fatal error propagate to the OS (ie. WER on Windows)") \ 28.362 \ 28.363 product(bool, SuppressFatalErrorMessage, false, \ 28.364 - "Do NO Fatal Error report [Avoid deadlock]") \ 28.365 + "Report NO fatal error message (avoid deadlock)") \ 28.366 \ 28.367 product(ccstrlist, OnError, "", \ 28.368 "Run user-defined commands on fatal error; see VMError.cpp " \ 28.369 @@ -880,17 +887,17 @@ 28.370 "Run user-defined commands on first java.lang.OutOfMemoryError") \ 28.371 \ 28.372 manageable(bool, HeapDumpBeforeFullGC, false, \ 28.373 - "Dump heap to file before any major stop-world GC") \ 28.374 + "Dump heap to file before any major stop-the-world GC") \ 28.375 \ 28.376 manageable(bool, HeapDumpAfterFullGC, false, \ 28.377 - "Dump heap to file after any major stop-world GC") \ 28.378 + "Dump heap to file after any major stop-the-world GC") \ 28.379 \ 28.380 manageable(bool, HeapDumpOnOutOfMemoryError, false, \ 28.381 "Dump heap to file when java.lang.OutOfMemoryError is thrown") \ 28.382 \ 28.383 manageable(ccstr, HeapDumpPath, NULL, \ 28.384 - "When HeapDumpOnOutOfMemoryError is on, the path (filename or" \ 28.385 - "directory) of the dump file (defaults to java_pid<pid>.hprof" \ 28.386 + "When HeapDumpOnOutOfMemoryError is on, the path (filename or " \ 28.387 + "directory) of the dump file (defaults to java_pid<pid>.hprof " \ 28.388 "in the working directory)") \ 28.389 \ 28.390 develop(uintx, SegmentedHeapDumpThreshold, 2*G, \ 28.391 @@ -904,10 +911,10 @@ 28.392 "Execute breakpoint upon encountering VM warning") \ 28.393 \ 28.394 develop(bool, TraceVMOperation, false, \ 28.395 - "Trace vm operations") \ 28.396 + "Trace VM operations") \ 28.397 \ 28.398 develop(bool, UseFakeTimers, false, \ 28.399 - "Tells whether the VM should use system time or a fake timer") \ 28.400 + "Tell whether the VM should use system time or a fake timer") \ 28.401 \ 28.402 product(ccstr, NativeMemoryTracking, "off", \ 28.403 "Native memory tracking options") \ 28.404 @@ -917,7 +924,7 @@ 28.405 \ 28.406 diagnostic(bool, AutoShutdownNMT, true, \ 28.407 "Automatically shutdown native memory tracking under stress " \ 28.408 - "situation. When set to false, native memory tracking tries to " \ 28.409 + "situations. When set to false, native memory tracking tries to " \ 28.410 "stay alive at the expense of JVM performance") \ 28.411 \ 28.412 diagnostic(bool, LogCompilation, false, \ 28.413 @@ -927,12 +934,12 @@ 28.414 "Print compilations") \ 28.415 \ 28.416 diagnostic(bool, TraceNMethodInstalls, false, \ 28.417 - "Trace nmethod intallation") \ 28.418 + "Trace nmethod installation") \ 28.419 \ 28.420 diagnostic(intx, ScavengeRootsInCode, 2, \ 28.421 - "0: do not allow scavengable oops in the code cache; " \ 28.422 - "1: allow scavenging from the code cache; " \ 28.423 - "2: emit as many constants as the compiler can see") \ 28.424 + "0: do not allow scavengable oops in the code cache; " \ 28.425 + "1: allow scavenging from the code cache; " \ 28.426 + "2: emit as many constants as the compiler can see") \ 28.427 \ 28.428 product(bool, AlwaysRestoreFPU, false, \ 28.429 "Restore the FPU control word after every JNI call (expensive)") \ 28.430 @@ -953,7 +960,7 @@ 28.431 "Print assembly code (using external disassembler.so)") \ 28.432 \ 28.433 diagnostic(ccstr, PrintAssemblyOptions, NULL, \ 28.434 - "Options string passed to disassembler.so") \ 28.435 + "Print options string passed to disassembler.so") \ 28.436 \ 28.437 diagnostic(bool, PrintNMethods, false, \ 28.438 "Print assembly code for nmethods when generated") \ 28.439 @@ -974,20 +981,21 @@ 28.440 "Print exception handler tables for all nmethods when generated") \ 28.441 \ 28.442 develop(bool, StressCompiledExceptionHandlers, false, \ 28.443 - "Exercise compiled exception handlers") \ 28.444 + "Exercise compiled exception handlers") \ 28.445 \ 28.446 develop(bool, InterceptOSException, false, \ 28.447 - "Starts debugger when an implicit OS (e.g., NULL) " \ 28.448 + "Start debugger when an implicit OS (e.g. NULL) " \ 28.449 "exception happens") \ 28.450 \ 28.451 product(bool, PrintCodeCache, false, \ 28.452 "Print the code cache memory usage when exiting") \ 28.453 \ 28.454 develop(bool, PrintCodeCache2, false, \ 28.455 - "Print detailed usage info on the code cache when exiting") \ 28.456 + "Print detailed usage information on the code cache when exiting")\ 28.457 \ 28.458 product(bool, PrintCodeCacheOnCompilation, false, \ 28.459 - "Print the code cache memory usage each time a method is compiled") \ 28.460 + "Print the code cache memory usage each time a method is " \ 28.461 + "compiled") \ 28.462 \ 28.463 diagnostic(bool, PrintStubCode, false, \ 28.464 "Print generated stub code") \ 28.465 @@ -999,40 +1007,40 @@ 28.466 "Omit backtraces for some 'hot' exceptions in optimized code") \ 28.467 \ 28.468 product(bool, ProfilerPrintByteCodeStatistics, false, \ 28.469 - "Prints byte code statictics when dumping profiler output") \ 28.470 + "Print bytecode statistics when dumping profiler output") \ 28.471 \ 28.472 product(bool, ProfilerRecordPC, false, \ 28.473 - "Collects tick for each 16 byte interval of compiled code") \ 28.474 + "Collect ticks for each 16 byte interval of compiled code") \ 28.475 \ 28.476 product(bool, ProfileVM, false, \ 28.477 - "Profiles ticks that fall within VM (either in the VM Thread " \ 28.478 + "Profile ticks that fall within VM (either in the VM Thread " \ 28.479 "or VM code called through stubs)") \ 28.480 \ 28.481 product(bool, ProfileIntervals, false, \ 28.482 - "Prints profiles for each interval (see ProfileIntervalsTicks)") \ 28.483 + "Print profiles for each interval (see ProfileIntervalsTicks)") \ 28.484 \ 28.485 notproduct(bool, ProfilerCheckIntervals, false, \ 28.486 - "Collect and print info on spacing of profiler ticks") \ 28.487 + "Collect and print information on spacing of profiler ticks") \ 28.488 \ 28.489 develop(bool, PrintJVMWarnings, false, \ 28.490 - "Prints warnings for unimplemented JVM functions") \ 28.491 + "Print warnings for unimplemented JVM functions") \ 28.492 \ 28.493 product(bool, PrintWarnings, true, \ 28.494 - "Prints JVM warnings to output stream") \ 28.495 + "Print JVM warnings to output stream") \ 28.496 \ 28.497 notproduct(uintx, WarnOnStalledSpinLock, 0, \ 28.498 - "Prints warnings for stalled SpinLocks") \ 28.499 + "Print warnings for stalled SpinLocks") \ 28.500 \ 28.501 product(bool, RegisterFinalizersAtInit, true, \ 28.502 "Register finalizable objects at end of Object.<init> or " \ 28.503 "after allocation") \ 28.504 \ 28.505 develop(bool, RegisterReferences, true, \ 28.506 - "Tells whether the VM should register soft/weak/final/phantom " \ 28.507 + "Tell whether the VM should register soft/weak/final/phantom " \ 28.508 "references") \ 28.509 \ 28.510 develop(bool, IgnoreRewrites, false, \ 28.511 - "Supress rewrites of bytecodes in the oopmap generator. " \ 28.512 + "Suppress rewrites of bytecodes in the oopmap generator. " \ 28.513 "This is unsafe!") \ 28.514 \ 28.515 develop(bool, PrintCodeCacheExtension, false, \ 28.516 @@ -1042,8 +1050,7 @@ 28.517 "Enable the security JVM functions") \ 28.518 \ 28.519 develop(bool, ProtectionDomainVerification, true, \ 28.520 - "Verifies protection domain before resolution in system " \ 28.521 - "dictionary") \ 28.522 + "Verify protection domain before resolution in system dictionary")\ 28.523 \ 28.524 product(bool, ClassUnloading, true, \ 28.525 "Do unloading of classes") \ 28.526 @@ -1056,14 +1063,14 @@ 28.527 "Write memory usage profiling to log file") \ 28.528 \ 28.529 notproduct(bool, PrintSystemDictionaryAtExit, false, \ 28.530 - "Prints the system dictionary at exit") \ 28.531 + "Print the system dictionary at exit") \ 28.532 \ 28.533 experimental(intx, PredictedLoadedClassCount, 0, \ 28.534 - "Experimental: Tune loaded class cache starting size.") \ 28.535 + "Experimental: Tune loaded class cache starting size") \ 28.536 \ 28.537 diagnostic(bool, UnsyncloadClass, false, \ 28.538 "Unstable: VM calls loadClass unsynchronized. Custom " \ 28.539 - "class loader must call VM synchronized for findClass " \ 28.540 + "class loader must call VM synchronized for findClass " \ 28.541 "and defineClass.") \ 28.542 \ 28.543 product(bool, AlwaysLockClassLoader, false, \ 28.544 @@ -1079,22 +1086,22 @@ 28.545 "Call loadClassInternal() rather than loadClass()") \ 28.546 \ 28.547 product_pd(bool, DontYieldALot, \ 28.548 - "Throw away obvious excess yield calls (for SOLARIS only)") \ 28.549 + "Throw away obvious excess yield calls (for Solaris only)") \ 28.550 \ 28.551 product_pd(bool, ConvertSleepToYield, \ 28.552 - "Converts sleep(0) to thread yield " \ 28.553 - "(may be off for SOLARIS to improve GUI)") \ 28.554 + "Convert sleep(0) to thread yield " \ 28.555 + "(may be off for Solaris to improve GUI)") \ 28.556 \ 28.557 product(bool, ConvertYieldToSleep, false, \ 28.558 - "Converts yield to a sleep of MinSleepInterval to simulate Win32 "\ 28.559 - "behavior (SOLARIS only)") \ 28.560 + "Convert yield to a sleep of MinSleepInterval to simulate Win32 " \ 28.561 + "behavior (Solaris only)") \ 28.562 \ 28.563 product(bool, UseBoundThreads, true, \ 28.564 - "Bind user level threads to kernel threads (for SOLARIS only)") \ 28.565 + "Bind user level threads to kernel threads (for Solaris only)") \ 28.566 \ 28.567 develop(bool, UseDetachedThreads, true, \ 28.568 "Use detached threads that are recycled upon termination " \ 28.569 - "(for SOLARIS only)") \ 28.570 + "(for Solaris only)") \ 28.571 \ 28.572 product(bool, UseLWPSynchronization, true, \ 28.573 "Use LWP-based instead of libthread-based synchronization " \ 28.574 @@ -1104,41 +1111,43 @@ 28.575 "(Unstable) Various monitor synchronization tunables") \ 28.576 \ 28.577 product(intx, EmitSync, 0, \ 28.578 - "(Unsafe,Unstable) " \ 28.579 - " Controls emission of inline sync fast-path code") \ 28.580 + "(Unsafe, Unstable) " \ 28.581 + "Control emission of inline sync fast-path code") \ 28.582 \ 28.583 product(intx, MonitorBound, 0, "Bound Monitor population") \ 28.584 \ 28.585 product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \ 28.586 \ 28.587 - product(intx, SyncFlags, 0, "(Unsafe,Unstable) Experimental Sync flags" ) \ 28.588 - \ 28.589 - product(intx, SyncVerbose, 0, "(Unstable)" ) \ 28.590 - \ 28.591 - product(intx, ClearFPUAtPark, 0, "(Unsafe,Unstable)" ) \ 28.592 + product(intx, SyncFlags, 0, "(Unsafe, Unstable) Experimental Sync flags") \ 28.593 + \ 28.594 + product(intx, SyncVerbose, 0, "(Unstable)") \ 28.595 + \ 28.596 + product(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)") \ 28.597 \ 28.598 product(intx, hashCode, 5, \ 28.599 - "(Unstable) select hashCode generation algorithm" ) \ 28.600 + "(Unstable) select hashCode generation algorithm") \ 28.601 \ 28.602 product(intx, WorkAroundNPTLTimedWaitHang, 1, \ 28.603 - "(Unstable, Linux-specific)" \ 28.604 - " avoid NPTL-FUTEX hang pthread_cond_timedwait" ) \ 28.605 + "(Unstable, Linux-specific) " \ 28.606 + "avoid NPTL-FUTEX hang pthread_cond_timedwait") \ 28.607 \ 28.608 product(bool, FilterSpuriousWakeups, true, \ 28.609 "Prevent spurious or premature wakeups from object.wait " \ 28.610 "(Solaris only)") \ 28.611 \ 28.612 - product(intx, NativeMonitorTimeout, -1, "(Unstable)" ) \ 28.613 - product(intx, NativeMonitorFlags, 0, "(Unstable)" ) \ 28.614 - product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" ) \ 28.615 + product(intx, NativeMonitorTimeout, -1, "(Unstable)") \ 28.616 + \ 28.617 + product(intx, NativeMonitorFlags, 0, "(Unstable)") \ 28.618 + \ 28.619 + product(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \ 28.620 \ 28.621 develop(bool, UsePthreads, false, \ 28.622 "Use pthread-based instead of libthread-based synchronization " \ 28.623 "(SPARC only)") \ 28.624 \ 28.625 product(bool, AdjustConcurrency, false, \ 28.626 - "call thr_setconcurrency at thread create time to avoid " \ 28.627 - "LWP starvation on MP systems (For Solaris Only)") \ 28.628 + "Call thr_setconcurrency at thread creation time to avoid " \ 28.629 + "LWP starvation on MP systems (for Solaris Only)") \ 28.630 \ 28.631 product(bool, ReduceSignalUsage, false, \ 28.632 "Reduce the use of OS signals in Java and/or the VM") \ 28.633 @@ -1147,13 +1156,14 @@ 28.634 "Share vtable stubs (smaller code but worse branch prediction") \ 28.635 \ 28.636 develop(bool, LoadLineNumberTables, true, \ 28.637 - "Tells whether the class file parser loads line number tables") \ 28.638 + "Tell whether the class file parser loads line number tables") \ 28.639 \ 28.640 develop(bool, LoadLocalVariableTables, true, \ 28.641 - "Tells whether the class file parser loads local variable tables")\ 28.642 + "Tell whether the class file parser loads local variable tables") \ 28.643 \ 28.644 develop(bool, LoadLocalVariableTypeTables, true, \ 28.645 - "Tells whether the class file parser loads local variable type tables")\ 28.646 + "Tell whether the class file parser loads local variable type" \ 28.647 + "tables") \ 28.648 \ 28.649 product(bool, AllowUserSignalHandlers, false, \ 28.650 "Do not complain if the application installs signal handlers " \ 28.651 @@ -1184,10 +1194,12 @@ 28.652 \ 28.653 product(bool, EagerXrunInit, false, \ 28.654 "Eagerly initialize -Xrun libraries; allows startup profiling, " \ 28.655 - " but not all -Xrun libraries may support the state of the VM at this time") \ 28.656 + "but not all -Xrun libraries may support the state of the VM " \ 28.657 + "at this time") \ 28.658 \ 28.659 product(bool, PreserveAllAnnotations, false, \ 28.660 - "Preserve RuntimeInvisibleAnnotations as well as RuntimeVisibleAnnotations") \ 28.661 + "Preserve RuntimeInvisibleAnnotations as well " \ 28.662 + "as RuntimeVisibleAnnotations") \ 28.663 \ 28.664 develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \ 28.665 "Number of OutOfMemoryErrors preallocated with backtrace") \ 28.666 @@ -1262,7 +1274,7 @@ 28.667 "Trace level for JVMTI RedefineClasses") \ 28.668 \ 28.669 develop(bool, StressMethodComparator, false, \ 28.670 - "run the MethodComparator on all loaded methods") \ 28.671 + "Run the MethodComparator on all loaded methods") \ 28.672 \ 28.673 /* change to false by default sometime after Mustang */ \ 28.674 product(bool, VerifyMergedCPBytecodes, true, \ 28.675 @@ -1296,7 +1308,7 @@ 28.676 "Trace dependencies") \ 28.677 \ 28.678 develop(bool, VerifyDependencies, trueInDebug, \ 28.679 - "Exercise and verify the compilation dependency mechanism") \ 28.680 + "Exercise and verify the compilation dependency mechanism") \ 28.681 \ 28.682 develop(bool, TraceNewOopMapGeneration, false, \ 28.683 "Trace OopMapGeneration") \ 28.684 @@ -1314,7 +1326,7 @@ 28.685 "Trace monitor matching failures during OopMapGeneration") \ 28.686 \ 28.687 develop(bool, TraceOopMapRewrites, false, \ 28.688 - "Trace rewritting of method oops during oop map generation") \ 28.689 + "Trace rewriting of method oops during oop map generation") \ 28.690 \ 28.691 develop(bool, TraceSafepoint, false, \ 28.692 "Trace safepoint operations") \ 28.693 @@ -1332,10 +1344,10 @@ 28.694 "Trace setup time") \ 28.695 \ 28.696 develop(bool, TraceProtectionDomainVerification, false, \ 28.697 - "Trace protection domain verifcation") \ 28.698 + "Trace protection domain verification") \ 28.699 \ 28.700 develop(bool, TraceClearedExceptions, false, \ 28.701 - "Prints when an exception is forcibly cleared") \ 28.702 + "Print when an exception is forcibly cleared") \ 28.703 \ 28.704 product(bool, TraceClassResolution, false, \ 28.705 "Trace all constant pool resolutions (for debugging)") \ 28.706 @@ -1349,7 +1361,7 @@ 28.707 /* gc */ \ 28.708 \ 28.709 product(bool, UseSerialGC, false, \ 28.710 - "Use the serial garbage collector") \ 28.711 + "Use the Serial garbage collector") \ 28.712 \ 28.713 product(bool, UseG1GC, false, \ 28.714 "Use the Garbage-First garbage collector") \ 28.715 @@ -1368,16 +1380,16 @@ 28.716 "The collection count for the first maximum compaction") \ 28.717 \ 28.718 product(bool, UseMaximumCompactionOnSystemGC, true, \ 28.719 - "In the Parallel Old garbage collector maximum compaction for " \ 28.720 - "a system GC") \ 28.721 + "Use maximum compaction in the Parallel Old garbage collector " \ 28.722 + "for a system GC") \ 28.723 \ 28.724 product(uintx, ParallelOldDeadWoodLimiterMean, 50, \ 28.725 - "The mean used by the par compact dead wood" \ 28.726 - "limiter (a number between 0-100).") \ 28.727 + "The mean used by the parallel compact dead wood " \ 28.728 + "limiter (a number between 0-100)") \ 28.729 \ 28.730 product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \ 28.731 - "The standard deviation used by the par compact dead wood" \ 28.732 - "limiter (a number between 0-100).") \ 28.733 + "The standard deviation used by the parallel compact dead wood " \ 28.734 + "limiter (a number between 0-100)") \ 28.735 \ 28.736 product(uintx, ParallelGCThreads, 0, \ 28.737 "Number of parallel threads parallel gc will use") \ 28.738 @@ -1387,7 +1399,7 @@ 28.739 "parallel gc will use") \ 28.740 \ 28.741 diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \ 28.742 - "Force dynamic selection of the number of" \ 28.743 + "Force dynamic selection of the number of " \ 28.744 "parallel threads parallel gc will use to aid debugging") \ 28.745 \ 28.746 product(uintx, HeapSizePerGCThread, ScaleForWordSize(64*M), \ 28.747 @@ -1398,7 +1410,7 @@ 28.748 "Trace the dynamic GC thread usage") \ 28.749 \ 28.750 develop(bool, ParallelOldGCSplitALot, false, \ 28.751 - "Provoke splitting (copying data from a young gen space to" \ 28.752 + "Provoke splitting (copying data from a young gen space to " \ 28.753 "multiple destination spaces)") \ 28.754 \ 28.755 develop(uintx, ParallelOldGCSplitInterval, 3, \ 28.756 @@ -1408,19 +1420,19 @@ 28.757 "Number of threads concurrent gc will use") \ 28.758 \ 28.759 product(uintx, YoungPLABSize, 4096, \ 28.760 - "Size of young gen promotion labs (in HeapWords)") \ 28.761 + "Size of young gen promotion LAB's (in HeapWords)") \ 28.762 \ 28.763 product(uintx, OldPLABSize, 1024, \ 28.764 - "Size of old gen promotion labs (in HeapWords)") \ 28.765 + "Size of old gen promotion LAB's (in HeapWords)") \ 28.766 \ 28.767 product(uintx, GCTaskTimeStampEntries, 200, \ 28.768 "Number of time stamp entries per gc worker thread") \ 28.769 \ 28.770 product(bool, AlwaysTenure, false, \ 28.771 - "Always tenure objects in eden. (ParallelGC only)") \ 28.772 + "Always tenure objects in eden (ParallelGC only)") \ 28.773 \ 28.774 product(bool, NeverTenure, false, \ 28.775 - "Never tenure objects in eden, May tenure on overflow " \ 28.776 + "Never tenure objects in eden, may tenure on overflow " \ 28.777 "(ParallelGC only)") \ 28.778 \ 28.779 product(bool, ScavengeBeforeFullGC, true, \ 28.780 @@ -1428,14 +1440,14 @@ 28.781 "used with UseParallelGC") \ 28.782 \ 28.783 develop(bool, ScavengeWithObjectsInToSpace, false, \ 28.784 - "Allow scavenges to occur when to_space contains objects.") \ 28.785 + "Allow scavenges to occur when to-space contains objects") \ 28.786 \ 28.787 product(bool, UseConcMarkSweepGC, false, \ 28.788 "Use Concurrent Mark-Sweep GC in the old generation") \ 28.789 \ 28.790 product(bool, ExplicitGCInvokesConcurrent, false, \ 28.791 - "A System.gc() request invokes a concurrent collection;" \ 28.792 - " (effective only when UseConcMarkSweepGC)") \ 28.793 + "A System.gc() request invokes a concurrent collection; " \ 28.794 + "(effective only when UseConcMarkSweepGC)") \ 28.795 \ 28.796 product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \ 28.797 "A System.gc() request invokes a concurrent collection and " \ 28.798 @@ -1443,19 +1455,19 @@ 28.799 "(effective only when UseConcMarkSweepGC)") \ 28.800 \ 28.801 product(bool, GCLockerInvokesConcurrent, false, \ 28.802 - "The exit of a JNI CS necessitating a scavenge also" \ 28.803 - " kicks off a bkgrd concurrent collection") \ 28.804 + "The exit of a JNI critical section necessitating a scavenge, " \ 28.805 + "also kicks off a background concurrent collection") \ 28.806 \ 28.807 product(uintx, GCLockerEdenExpansionPercent, 5, \ 28.808 - "How much the GC can expand the eden by while the GC locker " \ 28.809 + "How much the GC can expand the eden by while the GC locker " \ 28.810 "is active (as a percentage)") \ 28.811 \ 28.812 diagnostic(intx, GCLockerRetryAllocationCount, 2, \ 28.813 - "Number of times to retry allocations when" \ 28.814 - " blocked by the GC locker") \ 28.815 + "Number of times to retry allocations when " \ 28.816 + "blocked by the GC locker") \ 28.817 \ 28.818 develop(bool, UseCMSAdaptiveFreeLists, true, \ 28.819 - "Use Adaptive Free Lists in the CMS generation") \ 28.820 + "Use adaptive free lists in the CMS generation") \ 28.821 \ 28.822 develop(bool, UseAsyncConcMarkSweepGC, true, \ 28.823 "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\ 28.824 @@ -1470,44 +1482,46 @@ 28.825 "Use passing of collection from background to foreground") \ 28.826 \ 28.827 product(bool, UseParNewGC, false, \ 28.828 - "Use parallel threads in the new generation.") \ 28.829 + "Use parallel threads in the new generation") \ 28.830 \ 28.831 product(bool, ParallelGCVerbose, false, \ 28.832 - "Verbose output for parallel GC.") \ 28.833 + "Verbose output for parallel gc") \ 28.834 \ 28.835 product(uintx, ParallelGCBufferWastePct, 10, \ 28.836 - "Wasted fraction of parallel allocation buffer.") \ 28.837 + "Wasted fraction of parallel allocation buffer") \ 28.838 \ 28.839 diagnostic(bool, ParallelGCRetainPLAB, false, \ 28.840 - "Retain parallel allocation buffers across scavenges; " \ 28.841 - " -- disabled because this currently conflicts with " \ 28.842 - " parallel card scanning under certain conditions ") \ 28.843 + "Retain parallel allocation buffers across scavenges; " \ 28.844 + "it is disabled because this currently conflicts with " \ 28.845 + "parallel card scanning under certain conditions.") \ 28.846 \ 28.847 product(uintx, TargetPLABWastePct, 10, \ 28.848 "Target wasted space in last buffer as percent of overall " \ 28.849 "allocation") \ 28.850 \ 28.851 product(uintx, PLABWeight, 75, \ 28.852 - "Percentage (0-100) used to weight the current sample when" \ 28.853 - "computing exponentially decaying average for ResizePLAB.") \ 28.854 + "Percentage (0-100) used to weigh the current sample when " \ 28.855 + "computing exponentially decaying average for ResizePLAB") \ 28.856 \ 28.857 product(bool, ResizePLAB, true, \ 28.858 - "Dynamically resize (survivor space) promotion labs") \ 28.859 + "Dynamically resize (survivor space) promotion LAB's") \ 28.860 \ 28.861 product(bool, PrintPLAB, false, \ 28.862 - "Print (survivor space) promotion labs sizing decisions") \ 28.863 + "Print (survivor space) promotion LAB's sizing decisions") \ 28.864 \ 28.865 product(intx, ParGCArrayScanChunk, 50, \ 28.866 - "Scan a subset and push remainder, if array is bigger than this") \ 28.867 + "Scan a subset of object array and push remainder, if array is " \ 28.868 + "bigger than this") \ 28.869 \ 28.870 product(bool, ParGCUseLocalOverflow, false, \ 28.871 "Instead of a global overflow list, use local overflow stacks") \ 28.872 \ 28.873 product(bool, ParGCTrimOverflow, true, \ 28.874 - "Eagerly trim the local overflow lists (when ParGCUseLocalOverflow") \ 28.875 + "Eagerly trim the local overflow lists " \ 28.876 + "(when ParGCUseLocalOverflow)") \ 28.877 \ 28.878 notproduct(bool, ParGCWorkQueueOverflowALot, false, \ 28.879 - "Whether we should simulate work queue overflow in ParNew") \ 28.880 + "Simulate work queue overflow in ParNew") \ 28.881 \ 28.882 notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \ 28.883 "An `interval' counter that determines how frequently " \ 28.884 @@ -1525,43 +1539,46 @@ 28.885 "during card table scanning") \ 28.886 \ 28.887 product(uintx, CMSParPromoteBlocksToClaim, 16, \ 28.888 - "Number of blocks to attempt to claim when refilling CMS LAB for "\ 28.889 - "parallel GC.") \ 28.890 + "Number of blocks to attempt to claim when refilling CMS LAB's " \ 28.891 + "for parallel GC") \ 28.892 \ 28.893 product(uintx, OldPLABWeight, 50, \ 28.894 - "Percentage (0-100) used to weight the current sample when" \ 28.895 - "computing exponentially decaying average for resizing CMSParPromoteBlocksToClaim.") \ 28.896 + "Percentage (0-100) used to weight the current sample when " \ 28.897 + "computing exponentially decaying average for resizing " \ 28.898 + "CMSParPromoteBlocksToClaim") \ 28.899 \ 28.900 product(bool, ResizeOldPLAB, true, \ 28.901 - "Dynamically resize (old gen) promotion labs") \ 28.902 + "Dynamically resize (old gen) promotion LAB's") \ 28.903 \ 28.904 product(bool, PrintOldPLAB, false, \ 28.905 - "Print (old gen) promotion labs sizing decisions") \ 28.906 + "Print (old gen) promotion LAB's sizing decisions") \ 28.907 \ 28.908 product(uintx, CMSOldPLABMin, 16, \ 28.909 - "Min size of CMS gen promotion lab caches per worker per blksize")\ 28.910 + "Minimum size of CMS gen promotion LAB caches per worker " \ 28.911 + "per block size") \ 28.912 \ 28.913 product(uintx, CMSOldPLABMax, 1024, \ 28.914 - "Max size of CMS gen promotion lab caches per worker per blksize")\ 28.915 + "Maximum size of CMS gen promotion LAB caches per worker " \ 28.916 + "per block size") \ 28.917 \ 28.918 product(uintx, CMSOldPLABNumRefills, 4, \ 28.919 - "Nominal number of refills of CMS gen promotion lab cache" \ 28.920 - " per worker per block size") \ 28.921 + "Nominal number of refills of CMS gen promotion LAB cache " \ 28.922 + "per worker per block size") \ 28.923 \ 28.924 product(bool, CMSOldPLABResizeQuicker, false, \ 28.925 - "Whether to react on-the-fly during a scavenge to a sudden" \ 28.926 - " change in block demand rate") \ 28.927 + "React on-the-fly during a scavenge to a sudden " \ 28.928 + "change in block demand rate") \ 28.929 \ 28.930 product(uintx, CMSOldPLABToleranceFactor, 4, \ 28.931 - "The tolerance of the phase-change detector for on-the-fly" \ 28.932 - " PLAB resizing during a scavenge") \ 28.933 + "The tolerance of the phase-change detector for on-the-fly " \ 28.934 + "PLAB resizing during a scavenge") \ 28.935 \ 28.936 product(uintx, CMSOldPLABReactivityFactor, 2, \ 28.937 - "The gain in the feedback loop for on-the-fly PLAB resizing" \ 28.938 - " during a scavenge") \ 28.939 + "The gain in the feedback loop for on-the-fly PLAB resizing " \ 28.940 + "during a scavenge") \ 28.941 \ 28.942 product(bool, AlwaysPreTouch, false, \ 28.943 - "It forces all freshly committed pages to be pre-touched.") \ 28.944 + "Force all freshly committed pages to be pre-touched") \ 28.945 \ 28.946 product_pd(uintx, CMSYoungGenPerWorker, \ 28.947 "The maximum size of young gen chosen by default per GC worker " \ 28.948 @@ -1571,64 +1588,67 @@ 28.949 "Whether CMS GC should operate in \"incremental\" mode") \ 28.950 \ 28.951 product(uintx, CMSIncrementalDutyCycle, 10, \ 28.952 - "CMS incremental mode duty cycle (a percentage, 0-100). If" \ 28.953 - "CMSIncrementalPacing is enabled, then this is just the initial" \ 28.954 - "value") \ 28.955 + "Percentage (0-100) of CMS incremental mode duty cycle. If " \ 28.956 + "CMSIncrementalPacing is enabled, then this is just the initial " \ 28.957 + "value.") \ 28.958 \ 28.959 product(bool, CMSIncrementalPacing, true, \ 28.960 "Whether the CMS incremental mode duty cycle should be " \ 28.961 "automatically adjusted") \ 28.962 \ 28.963 product(uintx, CMSIncrementalDutyCycleMin, 0, \ 28.964 - "Lower bound on the duty cycle when CMSIncrementalPacing is " \ 28.965 - "enabled (a percentage, 0-100)") \ 28.966 + "Minimum percentage (0-100) of the CMS incremental duty cycle " \ 28.967 + "used when CMSIncrementalPacing is enabled") \ 28.968 \ 28.969 product(uintx, CMSIncrementalSafetyFactor, 10, \ 28.970 "Percentage (0-100) used to add conservatism when computing the " \ 28.971 "duty cycle") \ 28.972 \ 28.973 product(uintx, CMSIncrementalOffset, 0, \ 28.974 - "Percentage (0-100) by which the CMS incremental mode duty cycle" \ 28.975 - " is shifted to the right within the period between young GCs") \ 28.976 + "Percentage (0-100) by which the CMS incremental mode duty cycle "\ 28.977 + "is shifted to the right within the period between young GCs") \ 28.978 \ 28.979 product(uintx, CMSExpAvgFactor, 50, \ 28.980 - "Percentage (0-100) used to weight the current sample when" \ 28.981 - "computing exponential averages for CMS statistics.") \ 28.982 + "Percentage (0-100) used to weigh the current sample when " \ 28.983 + "computing exponential averages for CMS statistics") \ 28.984 \ 28.985 product(uintx, CMS_FLSWeight, 75, \ 28.986 - "Percentage (0-100) used to weight the current sample when" \ 28.987 - "computing exponentially decating averages for CMS FLS statistics.") \ 28.988 + "Percentage (0-100) used to weigh the current sample when " \ 28.989 + "computing exponentially decaying averages for CMS FLS " \ 28.990 + "statistics") \ 28.991 \ 28.992 product(uintx, CMS_FLSPadding, 1, \ 28.993 - "The multiple of deviation from mean to use for buffering" \ 28.994 - "against volatility in free list demand.") \ 28.995 + "The multiple of deviation from mean to use for buffering " \ 28.996 + "against volatility in free list demand") \ 28.997 \ 28.998 product(uintx, FLSCoalescePolicy, 2, \ 28.999 - "CMS: Aggression level for coalescing, increasing from 0 to 4") \ 28.1000 + "CMS: aggressiveness level for coalescing, increasing " \ 28.1001 + "from 0 to 4") \ 28.1002 \ 28.1003 product(bool, FLSAlwaysCoalesceLarge, false, \ 28.1004 - "CMS: Larger free blocks are always available for coalescing") \ 28.1005 + "CMS: larger free blocks are always available for coalescing") \ 28.1006 \ 28.1007 product(double, FLSLargestBlockCoalesceProximity, 0.99, \ 28.1008 - "CMS: the smaller the percentage the greater the coalition force")\ 28.1009 + "CMS: the smaller the percentage the greater the coalescing " \ 28.1010 + "force") \ 28.1011 \ 28.1012 product(double, CMSSmallCoalSurplusPercent, 1.05, \ 28.1013 - "CMS: the factor by which to inflate estimated demand of small" \ 28.1014 - " block sizes to prevent coalescing with an adjoining block") \ 28.1015 + "CMS: the factor by which to inflate estimated demand of small " \ 28.1016 + "block sizes to prevent coalescing with an adjoining block") \ 28.1017 \ 28.1018 product(double, CMSLargeCoalSurplusPercent, 0.95, \ 28.1019 - "CMS: the factor by which to inflate estimated demand of large" \ 28.1020 - " block sizes to prevent coalescing with an adjoining block") \ 28.1021 + "CMS: the factor by which to inflate estimated demand of large " \ 28.1022 + "block sizes to prevent coalescing with an adjoining block") \ 28.1023 \ 28.1024 product(double, CMSSmallSplitSurplusPercent, 1.10, \ 28.1025 - "CMS: the factor by which to inflate estimated demand of small" \ 28.1026 - " block sizes to prevent splitting to supply demand for smaller" \ 28.1027 - " blocks") \ 28.1028 + "CMS: the factor by which to inflate estimated demand of small " \ 28.1029 + "block sizes to prevent splitting to supply demand for smaller " \ 28.1030 + "blocks") \ 28.1031 \ 28.1032 product(double, CMSLargeSplitSurplusPercent, 1.00, \ 28.1033 - "CMS: the factor by which to inflate estimated demand of large" \ 28.1034 - " block sizes to prevent splitting to supply demand for smaller" \ 28.1035 - " blocks") \ 28.1036 + "CMS: the factor by which to inflate estimated demand of large " \ 28.1037 + "block sizes to prevent splitting to supply demand for smaller " \ 28.1038 + "blocks") \ 28.1039 \ 28.1040 product(bool, CMSExtrapolateSweep, false, \ 28.1041 "CMS: cushion for block demand during sweep") \ 28.1042 @@ -1640,11 +1660,11 @@ 28.1043 \ 28.1044 product(uintx, CMS_SweepPadding, 1, \ 28.1045 "The multiple of deviation from mean to use for buffering " \ 28.1046 - "against volatility in inter-sweep duration.") \ 28.1047 + "against volatility in inter-sweep duration") \ 28.1048 \ 28.1049 product(uintx, CMS_SweepTimerThresholdMillis, 10, \ 28.1050 "Skip block flux-rate sampling for an epoch unless inter-sweep " \ 28.1051 - "duration exceeds this threhold in milliseconds") \ 28.1052 + "duration exceeds this threshold in milliseconds") \ 28.1053 \ 28.1054 develop(bool, CMSTraceIncrementalMode, false, \ 28.1055 "Trace CMS incremental mode") \ 28.1056 @@ -1659,14 +1679,15 @@ 28.1057 "Whether class unloading enabled when using CMS GC") \ 28.1058 \ 28.1059 product(uintx, CMSClassUnloadingMaxInterval, 0, \ 28.1060 - "When CMS class unloading is enabled, the maximum CMS cycle count"\ 28.1061 - " for which classes may not be unloaded") \ 28.1062 + "When CMS class unloading is enabled, the maximum CMS cycle " \ 28.1063 + "count for which classes may not be unloaded") \ 28.1064 \ 28.1065 product(bool, CMSCompactWhenClearAllSoftRefs, true, \ 28.1066 - "Compact when asked to collect CMS gen with clear_all_soft_refs") \ 28.1067 + "Compact when asked to collect CMS gen with " \ 28.1068 + "clear_all_soft_refs()") \ 28.1069 \ 28.1070 product(bool, UseCMSCompactAtFullCollection, true, \ 28.1071 - "Use mark sweep compact at full collections") \ 28.1072 + "Use Mark-Sweep-Compact algorithm at full collections") \ 28.1073 \ 28.1074 product(uintx, CMSFullGCsBeforeCompaction, 0, \ 28.1075 "Number of CMS full collection done before compaction if > 0") \ 28.1076 @@ -1688,38 +1709,37 @@ 28.1077 "Warn in case of excessive CMS looping") \ 28.1078 \ 28.1079 develop(bool, CMSOverflowEarlyRestoration, false, \ 28.1080 - "Whether preserved marks should be restored early") \ 28.1081 + "Restore preserved marks early") \ 28.1082 \ 28.1083 product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \ 28.1084 "Size of marking stack") \ 28.1085 \ 28.1086 product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \ 28.1087 - "Max size of marking stack") \ 28.1088 + "Maximum size of marking stack") \ 28.1089 \ 28.1090 notproduct(bool, CMSMarkStackOverflowALot, false, \ 28.1091 - "Whether we should simulate frequent marking stack / work queue" \ 28.1092 - " overflow") \ 28.1093 + "Simulate frequent marking stack / work queue overflow") \ 28.1094 \ 28.1095 notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \ 28.1096 - "An `interval' counter that determines how frequently" \ 28.1097 - " we simulate overflow; a smaller number increases frequency") \ 28.1098 + "An \"interval\" counter that determines how frequently " \ 28.1099 + "to simulate overflow; a smaller number increases frequency") \ 28.1100 \ 28.1101 product(uintx, CMSMaxAbortablePrecleanLoops, 0, \ 28.1102 - "(Temporary, subject to experimentation)" \ 28.1103 + "(Temporary, subject to experimentation) " \ 28.1104 "Maximum number of abortable preclean iterations, if > 0") \ 28.1105 \ 28.1106 product(intx, CMSMaxAbortablePrecleanTime, 5000, \ 28.1107 - "(Temporary, subject to experimentation)" \ 28.1108 - "Maximum time in abortable preclean in ms") \ 28.1109 + "(Temporary, subject to experimentation) " \ 28.1110 + "Maximum time in abortable preclean (in milliseconds)") \ 28.1111 \ 28.1112 product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \ 28.1113 - "(Temporary, subject to experimentation)" \ 28.1114 + "(Temporary, subject to experimentation) " \ 28.1115 "Nominal minimum work per abortable preclean iteration") \ 28.1116 \ 28.1117 manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \ 28.1118 - "(Temporary, subject to experimentation)" \ 28.1119 - " Time that we sleep between iterations when not given" \ 28.1120 - " enough work per iteration") \ 28.1121 + "(Temporary, subject to experimentation) " \ 28.1122 + "Time that we sleep between iterations when not given " \ 28.1123 + "enough work per iteration") \ 28.1124 \ 28.1125 product(uintx, CMSRescanMultiple, 32, \ 28.1126 "Size (in cards) of CMS parallel rescan task") \ 28.1127 @@ -1737,23 +1757,24 @@ 28.1128 "Whether parallel remark enabled (only if ParNewGC)") \ 28.1129 \ 28.1130 product(bool, CMSParallelSurvivorRemarkEnabled, true, \ 28.1131 - "Whether parallel remark of survivor space" \ 28.1132 - " enabled (effective only if CMSParallelRemarkEnabled)") \ 28.1133 + "Whether parallel remark of survivor space " \ 28.1134 + "enabled (effective only if CMSParallelRemarkEnabled)") \ 28.1135 \ 28.1136 product(bool, CMSPLABRecordAlways, true, \ 28.1137 - "Whether to always record survivor space PLAB bdries" \ 28.1138 - " (effective only if CMSParallelSurvivorRemarkEnabled)") \ 28.1139 + "Always record survivor space PLAB boundaries (effective only " \ 28.1140 + "if CMSParallelSurvivorRemarkEnabled)") \ 28.1141 \ 28.1142 product(bool, CMSEdenChunksRecordAlways, true, \ 28.1143 - "Whether to always record eden chunks used for " \ 28.1144 - "the parallel initial mark or remark of eden" ) \ 28.1145 + "Always record eden chunks used for the parallel initial mark " \ 28.1146 + "or remark of eden") \ 28.1147 \ 28.1148 product(bool, CMSPrintEdenSurvivorChunks, false, \ 28.1149 "Print the eden and the survivor chunks used for the parallel " \ 28.1150 "initial mark or remark of the eden/survivor spaces") \ 28.1151 \ 28.1152 product(bool, CMSConcurrentMTEnabled, true, \ 28.1153 - "Whether multi-threaded concurrent work enabled (if ParNewGC)") \ 28.1154 + "Whether multi-threaded concurrent work enabled " \ 28.1155 + "(effective only if ParNewGC)") \ 28.1156 \ 28.1157 product(bool, CMSPrecleaningEnabled, true, \ 28.1158 "Whether concurrent precleaning enabled") \ 28.1159 @@ -1762,12 +1783,12 @@ 28.1160 "Maximum number of precleaning iteration passes") \ 28.1161 \ 28.1162 product(uintx, CMSPrecleanNumerator, 2, \ 28.1163 - "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ 28.1164 - " ratio") \ 28.1165 + "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 28.1166 + "ratio") \ 28.1167 \ 28.1168 product(uintx, CMSPrecleanDenominator, 3, \ 28.1169 - "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ 28.1170 - " ratio") \ 28.1171 + "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \ 28.1172 + "ratio") \ 28.1173 \ 28.1174 product(bool, CMSPrecleanRefLists1, true, \ 28.1175 "Preclean ref lists during (initial) preclean phase") \ 28.1176 @@ -1782,7 +1803,7 @@ 28.1177 "Preclean survivors during abortable preclean phase") \ 28.1178 \ 28.1179 product(uintx, CMSPrecleanThreshold, 1000, \ 28.1180 - "Don't re-iterate if #dirty cards less than this") \ 28.1181 + "Do not iterate again if number of dirty cards is less than this")\ 28.1182 \ 28.1183 product(bool, CMSCleanOnEnter, true, \ 28.1184 "Clean-on-enter optimization for reducing number of dirty cards") \ 28.1185 @@ -1791,14 +1812,16 @@ 28.1186 "Choose variant (1,2) of verification following remark") \ 28.1187 \ 28.1188 product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M, \ 28.1189 - "If Eden used is below this value, don't try to schedule remark") \ 28.1190 + "If Eden size is below this, do not try to schedule remark") \ 28.1191 \ 28.1192 product(uintx, CMSScheduleRemarkEdenPenetration, 50, \ 28.1193 - "The Eden occupancy % at which to try and schedule remark pause") \ 28.1194 + "The Eden occupancy percentage (0-100) at which " \ 28.1195 + "to try and schedule remark pause") \ 28.1196 \ 28.1197 product(uintx, CMSScheduleRemarkSamplingRatio, 5, \ 28.1198 - "Start sampling Eden top at least before yg occupancy reaches" \ 28.1199 - " 1/<ratio> of the size at which we plan to schedule remark") \ 28.1200 + "Start sampling eden top at least before young gen " \ 28.1201 + "occupancy reaches 1/<ratio> of the size at which " \ 28.1202 + "we plan to schedule remark") \ 28.1203 \ 28.1204 product(uintx, CMSSamplingGrain, 16*K, \ 28.1205 "The minimum distance between eden samples for CMS (see above)") \ 28.1206 @@ -1820,27 +1843,27 @@ 28.1207 "should start a collection cycle") \ 28.1208 \ 28.1209 product(bool, CMSYield, true, \ 28.1210 - "Yield between steps of concurrent mark & sweep") \ 28.1211 + "Yield between steps of CMS") \ 28.1212 \ 28.1213 product(uintx, CMSBitMapYieldQuantum, 10*M, \ 28.1214 - "Bitmap operations should process at most this many bits" \ 28.1215 + "Bitmap operations should process at most this many bits " \ 28.1216 "between yields") \ 28.1217 \ 28.1218 product(bool, CMSDumpAtPromotionFailure, false, \ 28.1219 "Dump useful information about the state of the CMS old " \ 28.1220 - " generation upon a promotion failure.") \ 28.1221 + "generation upon a promotion failure") \ 28.1222 \ 28.1223 product(bool, CMSPrintChunksInDump, false, \ 28.1224 "In a dump enabled by CMSDumpAtPromotionFailure, include " \ 28.1225 - " more detailed information about the free chunks.") \ 28.1226 + "more detailed information about the free chunks") \ 28.1227 \ 28.1228 product(bool, CMSPrintObjectsInDump, false, \ 28.1229 "In a dump enabled by CMSDumpAtPromotionFailure, include " \ 28.1230 - " more detailed information about the allocated objects.") \ 28.1231 + "more detailed information about the allocated objects") \ 28.1232 \ 28.1233 diagnostic(bool, FLSVerifyAllHeapReferences, false, \ 28.1234 - "Verify that all refs across the FLS boundary " \ 28.1235 - " are to valid objects") \ 28.1236 + "Verify that all references across the FLS boundary " \ 28.1237 + "are to valid objects") \ 28.1238 \ 28.1239 diagnostic(bool, FLSVerifyLists, false, \ 28.1240 "Do lots of (expensive) FreeListSpace verification") \ 28.1241 @@ -1852,17 +1875,18 @@ 28.1242 "Do lots of (expensive) FLS dictionary verification") \ 28.1243 \ 28.1244 develop(bool, VerifyBlockOffsetArray, false, \ 28.1245 - "Do (expensive!) block offset array verification") \ 28.1246 + "Do (expensive) block offset array verification") \ 28.1247 \ 28.1248 diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ 28.1249 - "Maintain _unallocated_block in BlockOffsetArray" \ 28.1250 - " (currently applicable only to CMS collector)") \ 28.1251 + "Maintain _unallocated_block in BlockOffsetArray " \ 28.1252 + "(currently applicable only to CMS collector)") \ 28.1253 \ 28.1254 develop(bool, TraceCMSState, false, \ 28.1255 "Trace the state of the CMS collection") \ 28.1256 \ 28.1257 product(intx, RefDiscoveryPolicy, 0, \ 28.1258 - "Whether reference-based(0) or referent-based(1)") \ 28.1259 + "Select type of reference discovery policy: " \ 28.1260 + "reference-based(0) or referent-based(1)") \ 28.1261 \ 28.1262 product(bool, ParallelRefProcEnabled, false, \ 28.1263 "Enable parallel reference processing whenever possible") \ 28.1264 @@ -1890,7 +1914,7 @@ 28.1265 "denotes 'do constant GC cycles'.") \ 28.1266 \ 28.1267 product(bool, UseCMSInitiatingOccupancyOnly, false, \ 28.1268 - "Only use occupancy as a crierion for starting a CMS collection") \ 28.1269 + "Only use occupancy as a criterion for starting a CMS collection")\ 28.1270 \ 28.1271 product(uintx, CMSIsTooFullPercentage, 98, \ 28.1272 "An absolute ceiling above which CMS will always consider the " \ 28.1273 @@ -1902,7 +1926,7 @@ 28.1274 \ 28.1275 notproduct(bool, CMSVerifyReturnedBytes, false, \ 28.1276 "Check that all the garbage collected was returned to the " \ 28.1277 - "free lists.") \ 28.1278 + "free lists") \ 28.1279 \ 28.1280 notproduct(bool, ScavengeALot, false, \ 28.1281 "Force scavenge at every Nth exit from the runtime system " \ 28.1282 @@ -1917,16 +1941,16 @@ 28.1283 \ 28.1284 product(bool, PrintPromotionFailure, false, \ 28.1285 "Print additional diagnostic information following " \ 28.1286 - " promotion failure") \ 28.1287 + "promotion failure") \ 28.1288 \ 28.1289 notproduct(bool, PromotionFailureALot, false, \ 28.1290 "Use promotion failure handling on every youngest generation " \ 28.1291 "collection") \ 28.1292 \ 28.1293 develop(uintx, PromotionFailureALotCount, 1000, \ 28.1294 - "Number of promotion failures occurring at ParGCAllocBuffer" \ 28.1295 + "Number of promotion failures occurring at ParGCAllocBuffer " \ 28.1296 "refill attempts (ParNew) or promotion attempts " \ 28.1297 - "(other young collectors) ") \ 28.1298 + "(other young collectors)") \ 28.1299 \ 28.1300 develop(uintx, PromotionFailureALotInterval, 5, \ 28.1301 "Total collections between promotion failures alot") \ 28.1302 @@ -1945,7 +1969,7 @@ 28.1303 "Ratio of hard spins to calls to yield") \ 28.1304 \ 28.1305 develop(uintx, ObjArrayMarkingStride, 512, \ 28.1306 - "Number of ObjArray elements to push onto the marking stack" \ 28.1307 + "Number of object array elements to push onto the marking stack " \ 28.1308 "before pushing a continuation entry") \ 28.1309 \ 28.1310 develop(bool, MetadataAllocationFailALot, false, \ 28.1311 @@ -1953,7 +1977,7 @@ 28.1312 "MetadataAllocationFailALotInterval") \ 28.1313 \ 28.1314 develop(uintx, MetadataAllocationFailALotInterval, 1000, \ 28.1315 - "metadata allocation failure alot interval") \ 28.1316 + "Metadata allocation failure a lot interval") \ 28.1317 \ 28.1318 develop(bool, MetaDataDeallocateALot, false, \ 28.1319 "Deallocation bunches of metadata at intervals controlled by " \ 28.1320 @@ -1972,7 +1996,7 @@ 28.1321 "Trace virtual space metadata allocations") \ 28.1322 \ 28.1323 notproduct(bool, ExecuteInternalVMTests, false, \ 28.1324 - "Enable execution of internal VM tests.") \ 28.1325 + "Enable execution of internal VM tests") \ 28.1326 \ 28.1327 notproduct(bool, VerboseInternalVMTests, false, \ 28.1328 "Turn on logging for internal VM tests.") \ 28.1329 @@ -1980,7 +2004,7 @@ 28.1330 product_pd(bool, UseTLAB, "Use thread-local object allocation") \ 28.1331 \ 28.1332 product_pd(bool, ResizeTLAB, \ 28.1333 - "Dynamically resize tlab size for threads") \ 28.1334 + "Dynamically resize TLAB size for threads") \ 28.1335 \ 28.1336 product(bool, ZeroTLAB, false, \ 28.1337 "Zero out the newly created TLAB") \ 28.1338 @@ -1992,7 +2016,8 @@ 28.1339 "Print various TLAB related information") \ 28.1340 \ 28.1341 product(bool, TLABStats, true, \ 28.1342 - "Print various TLAB related information") \ 28.1343 + "Provide more detailed and expensive TLAB statistics " \ 28.1344 + "(with PrintTLAB)") \ 28.1345 \ 28.1346 EMBEDDED_ONLY(product(bool, LowMemoryProtection, true, \ 28.1347 "Enable LowMemoryProtection")) \ 28.1348 @@ -2026,14 +2051,14 @@ 28.1349 "Fraction (1/n) of real memory used for initial heap size") \ 28.1350 \ 28.1351 develop(uintx, MaxVirtMemFraction, 2, \ 28.1352 - "Maximum fraction (1/n) of virtual memory used for ergonomically" \ 28.1353 + "Maximum fraction (1/n) of virtual memory used for ergonomically "\ 28.1354 "determining maximum heap size") \ 28.1355 \ 28.1356 product(bool, UseAutoGCSelectPolicy, false, \ 28.1357 "Use automatic collection selection policy") \ 28.1358 \ 28.1359 product(uintx, AutoGCSelectPauseMillis, 5000, \ 28.1360 - "Automatic GC selection pause threshhold in ms") \ 28.1361 + "Automatic GC selection pause threshold in milliseconds") \ 28.1362 \ 28.1363 product(bool, UseAdaptiveSizePolicy, true, \ 28.1364 "Use adaptive generation sizing policies") \ 28.1365 @@ -2048,7 +2073,7 @@ 28.1366 "Use adaptive young-old sizing policies at major collections") \ 28.1367 \ 28.1368 product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \ 28.1369 - "Use statistics from System.GC for adaptive size policy") \ 28.1370 + "Include statistics from System.gc() for adaptive size policy") \ 28.1371 \ 28.1372 product(bool, UseAdaptiveGCBoundary, false, \ 28.1373 "Allow young-old boundary to move") \ 28.1374 @@ -2060,16 +2085,16 @@ 28.1375 "Resize the virtual spaces of the young or old generations") \ 28.1376 \ 28.1377 product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ 28.1378 - "Policy for changeing generation size for throughput goals") \ 28.1379 + "Policy for changing generation size for throughput goals") \ 28.1380 \ 28.1381 product(uintx, AdaptiveSizePausePolicy, 0, \ 28.1382 "Policy for changing generation size for pause goals") \ 28.1383 \ 28.1384 develop(bool, PSAdjustTenuredGenForMinorPause, false, \ 28.1385 - "Adjust tenured generation to achive a minor pause goal") \ 28.1386 + "Adjust tenured generation to achieve a minor pause goal") \ 28.1387 \ 28.1388 develop(bool, PSAdjustYoungGenForMajorPause, false, \ 28.1389 - "Adjust young generation to achive a major pause goal") \ 28.1390 + "Adjust young generation to achieve a major pause goal") \ 28.1391 \ 28.1392 product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \ 28.1393 "Number of steps where heuristics is used before data is used") \ 28.1394 @@ -2124,14 +2149,15 @@ 28.1395 "Decay factor to TenuredGenerationSizeIncrement") \ 28.1396 \ 28.1397 product(uintx, MaxGCPauseMillis, max_uintx, \ 28.1398 - "Adaptive size policy maximum GC pause time goal in msec, " \ 28.1399 - "or (G1 Only) the max. GC time per MMU time slice") \ 28.1400 + "Adaptive size policy maximum GC pause time goal in millisecond, "\ 28.1401 + "or (G1 Only) the maximum GC time per MMU time slice") \ 28.1402 \ 28.1403 product(uintx, GCPauseIntervalMillis, 0, \ 28.1404 "Time slice for MMU specification") \ 28.1405 \ 28.1406 product(uintx, MaxGCMinorPauseMillis, max_uintx, \ 28.1407 - "Adaptive size policy maximum GC minor pause time goal in msec") \ 28.1408 + "Adaptive size policy maximum GC minor pause time goal " \ 28.1409 + "in millisecond") \ 28.1410 \ 28.1411 product(uintx, GCTimeRatio, 99, \ 28.1412 "Adaptive size policy application time to GC time ratio") \ 28.1413 @@ -2159,8 +2185,8 @@ 28.1414 "before an OutOfMemory error is thrown") \ 28.1415 \ 28.1416 product(uintx, GCTimeLimit, 98, \ 28.1417 - "Limit of proportion of time spent in GC before an OutOfMemory" \ 28.1418 - "error is thrown (used with GCHeapFreeLimit)") \ 28.1419 + "Limit of the proportion of time spent in GC before " \ 28.1420 + "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \ 28.1421 \ 28.1422 product(uintx, GCHeapFreeLimit, 2, \ 28.1423 "Minimum percentage of free space after a full GC before an " \ 28.1424 @@ -2182,7 +2208,7 @@ 28.1425 "How many fields ahead to prefetch in oop scan (<= 0 means off)") \ 28.1426 \ 28.1427 diagnostic(bool, VerifySilently, false, \ 28.1428 - "Don't print print the verification progress") \ 28.1429 + "Do not print the verification progress") \ 28.1430 \ 28.1431 diagnostic(bool, VerifyDuringStartup, false, \ 28.1432 "Verify memory system before executing any Java code " \ 28.1433 @@ -2205,7 +2231,7 @@ 28.1434 \ 28.1435 diagnostic(bool, DeferInitialCardMark, false, \ 28.1436 "When +ReduceInitialCardMarks, explicitly defer any that " \ 28.1437 - "may arise from new_pre_store_barrier") \ 28.1438 + "may arise from new_pre_store_barrier") \ 28.1439 \ 28.1440 diagnostic(bool, VerifyRememberedSets, false, \ 28.1441 "Verify GC remembered sets") \ 28.1442 @@ -2214,10 +2240,10 @@ 28.1443 "Verify GC object start array if verify before/after") \ 28.1444 \ 28.1445 product(bool, DisableExplicitGC, false, \ 28.1446 - "Tells whether calling System.gc() does a full GC") \ 28.1447 + "Ignore calls to System.gc()") \ 28.1448 \ 28.1449 notproduct(bool, CheckMemoryInitialization, false, \ 28.1450 - "Checks memory initialization") \ 28.1451 + "Check memory initialization") \ 28.1452 \ 28.1453 product(bool, CollectGen0First, false, \ 28.1454 "Collect youngest generation before each full GC") \ 28.1455 @@ -2238,44 +2264,45 @@ 28.1456 "Stride through processors when distributing processes") \ 28.1457 \ 28.1458 product(uintx, CMSCoordinatorYieldSleepCount, 10, \ 28.1459 - "number of times the coordinator GC thread will sleep while " \ 28.1460 + "Number of times the coordinator GC thread will sleep while " \ 28.1461 "yielding before giving up and resuming GC") \ 28.1462 \ 28.1463 product(uintx, CMSYieldSleepCount, 0, \ 28.1464 - "number of times a GC thread (minus the coordinator) " \ 28.1465 + "Number of times a GC thread (minus the coordinator) " \ 28.1466 "will sleep while yielding before giving up and resuming GC") \ 28.1467 \ 28.1468 /* gc tracing */ \ 28.1469 manageable(bool, PrintGC, false, \ 28.1470 - "Print message at garbage collect") \ 28.1471 + "Print message at garbage collection") \ 28.1472 \ 28.1473 manageable(bool, PrintGCDetails, false, \ 28.1474 - "Print more details at garbage collect") \ 28.1475 + "Print more details at garbage collection") \ 28.1476 \ 28.1477 manageable(bool, PrintGCDateStamps, false, \ 28.1478 - "Print date stamps at garbage collect") \ 28.1479 + "Print date stamps at garbage collection") \ 28.1480 \ 28.1481 manageable(bool, PrintGCTimeStamps, false, \ 28.1482 - "Print timestamps at garbage collect") \ 28.1483 + "Print timestamps at garbage collection") \ 28.1484 \ 28.1485 product(bool, PrintGCTaskTimeStamps, false, \ 28.1486 "Print timestamps for individual gc worker thread tasks") \ 28.1487 \ 28.1488 develop(intx, ConcGCYieldTimeout, 0, \ 28.1489 - "If non-zero, assert that GC threads yield within this # of ms.") \ 28.1490 + "If non-zero, assert that GC threads yield within this " \ 28.1491 + "number of milliseconds") \ 28.1492 \ 28.1493 notproduct(bool, TraceMarkSweep, false, \ 28.1494 "Trace mark sweep") \ 28.1495 \ 28.1496 product(bool, PrintReferenceGC, false, \ 28.1497 "Print times spent handling reference objects during GC " \ 28.1498 - " (enabled only when PrintGCDetails)") \ 28.1499 + "(enabled only when PrintGCDetails)") \ 28.1500 \ 28.1501 develop(bool, TraceReferenceGC, false, \ 28.1502 "Trace handling of soft/weak/final/phantom references") \ 28.1503 \ 28.1504 develop(bool, TraceFinalizerRegistration, false, \ 28.1505 - "Trace registration of final references") \ 28.1506 + "Trace registration of final references") \ 28.1507 \ 28.1508 notproduct(bool, TraceScavenge, false, \ 28.1509 "Trace scavenge") \ 28.1510 @@ -2312,7 +2339,7 @@ 28.1511 "Print heap layout before and after each GC") \ 28.1512 \ 28.1513 product_rw(bool, PrintHeapAtGCExtended, false, \ 28.1514 - "Prints extended information about the layout of the heap " \ 28.1515 + "Print extended information about the layout of the heap " \ 28.1516 "when -XX:+PrintHeapAtGC is set") \ 28.1517 \ 28.1518 product(bool, PrintHeapAtSIGBREAK, true, \ 28.1519 @@ -2349,45 +2376,45 @@ 28.1520 "Trace actions of the GC task threads") \ 28.1521 \ 28.1522 product(bool, PrintParallelOldGCPhaseTimes, false, \ 28.1523 - "Print the time taken by each parallel old gc phase." \ 28.1524 - "PrintGCDetails must also be enabled.") \ 28.1525 + "Print the time taken by each phase in ParallelOldGC " \ 28.1526 + "(PrintGCDetails must also be enabled)") \ 28.1527 \ 28.1528 develop(bool, TraceParallelOldGCMarkingPhase, false, \ 28.1529 - "Trace parallel old gc marking phase") \ 28.1530 + "Trace marking phase in ParallelOldGC") \ 28.1531 \ 28.1532 develop(bool, TraceParallelOldGCSummaryPhase, false, \ 28.1533 - "Trace parallel old gc summary phase") \ 28.1534 + "Trace summary phase in ParallelOldGC") \ 28.1535 \ 28.1536 develop(bool, TraceParallelOldGCCompactionPhase, false, \ 28.1537 - "Trace parallel old gc compaction phase") \ 28.1538 + "Trace compaction phase in ParallelOldGC") \ 28.1539 \ 28.1540 develop(bool, TraceParallelOldGCDensePrefix, false, \ 28.1541 - "Trace parallel old gc dense prefix computation") \ 28.1542 + "Trace dense prefix computation for ParallelOldGC") \ 28.1543 \ 28.1544 develop(bool, IgnoreLibthreadGPFault, false, \ 28.1545 "Suppress workaround for libthread GP fault") \ 28.1546 \ 28.1547 product(bool, PrintJNIGCStalls, false, \ 28.1548 - "Print diagnostic message when GC is stalled" \ 28.1549 + "Print diagnostic message when GC is stalled " \ 28.1550 "by JNI critical section") \ 28.1551 \ 28.1552 experimental(double, ObjectCountCutOffPercent, 0.5, \ 28.1553 "The percentage of the used heap that the instances of a class " \ 28.1554 - "must occupy for the class to generate a trace event.") \ 28.1555 + "must occupy for the class to generate a trace event") \ 28.1556 \ 28.1557 /* GC log rotation setting */ \ 28.1558 \ 28.1559 product(bool, UseGCLogFileRotation, false, \ 28.1560 - "Prevent large gclog file for long running app. " \ 28.1561 - "Requires -Xloggc:<filename>") \ 28.1562 + "Rotate gclog files (for long running applications). It requires "\ 28.1563 + "-Xloggc:<filename>") \ 28.1564 \ 28.1565 product(uintx, NumberOfGCLogFiles, 0, \ 28.1566 - "Number of gclog files in rotation, " \ 28.1567 - "Default: 0, no rotation") \ 28.1568 + "Number of gclog files in rotation " \ 28.1569 + "(default: 0, no rotation)") \ 28.1570 \ 28.1571 product(uintx, GCLogFileSize, 0, \ 28.1572 - "GC log file size, Default: 0 bytes, no rotation " \ 28.1573 - "Only valid with UseGCLogFileRotation") \ 28.1574 + "GC log file size (default: 0 bytes, no rotation). " \ 28.1575 + "It requires UseGCLogFileRotation") \ 28.1576 \ 28.1577 /* JVMTI heap profiling */ \ 28.1578 \ 28.1579 @@ -2464,40 +2491,40 @@ 28.1580 "Generate range checks for array accesses") \ 28.1581 \ 28.1582 develop_pd(bool, ImplicitNullChecks, \ 28.1583 - "generate code for implicit null checks") \ 28.1584 + "Generate code for implicit null checks") \ 28.1585 \ 28.1586 product(bool, PrintSafepointStatistics, false, \ 28.1587 - "print statistics about safepoint synchronization") \ 28.1588 + "Print statistics about safepoint synchronization") \ 28.1589 \ 28.1590 product(intx, PrintSafepointStatisticsCount, 300, \ 28.1591 - "total number of safepoint statistics collected " \ 28.1592 + "Total number of safepoint statistics collected " \ 28.1593 "before printing them out") \ 28.1594 \ 28.1595 product(intx, PrintSafepointStatisticsTimeout, -1, \ 28.1596 - "print safepoint statistics only when safepoint takes" \ 28.1597 - " more than PrintSafepointSatisticsTimeout in millis") \ 28.1598 + "Print safepoint statistics only when safepoint takes " \ 28.1599 + "more than PrintSafepointSatisticsTimeout in millis") \ 28.1600 \ 28.1601 product(bool, TraceSafepointCleanupTime, false, \ 28.1602 - "print the break down of clean up tasks performed during" \ 28.1603 - " safepoint") \ 28.1604 + "Print the break down of clean up tasks performed during " \ 28.1605 + "safepoint") \ 28.1606 \ 28.1607 product(bool, Inline, true, \ 28.1608 - "enable inlining") \ 28.1609 + "Enable inlining") \ 28.1610 \ 28.1611 product(bool, ClipInlining, true, \ 28.1612 - "clip inlining if aggregate method exceeds DesiredMethodLimit") \ 28.1613 + "Clip inlining if aggregate method exceeds DesiredMethodLimit") \ 28.1614 \ 28.1615 develop(bool, UseCHA, true, \ 28.1616 - "enable CHA") \ 28.1617 + "Enable CHA") \ 28.1618 \ 28.1619 product(bool, UseTypeProfile, true, \ 28.1620 "Check interpreter profile for historically monomorphic calls") \ 28.1621 \ 28.1622 notproduct(bool, TimeCompiler, false, \ 28.1623 - "time the compiler") \ 28.1624 + "Time the compiler") \ 28.1625 \ 28.1626 diagnostic(bool, PrintInlining, false, \ 28.1627 - "prints inlining optimizations") \ 28.1628 + "Print inlining optimizations") \ 28.1629 \ 28.1630 product(bool, UsePopCountInstruction, false, \ 28.1631 "Use population count instruction") \ 28.1632 @@ -2509,57 +2536,59 @@ 28.1633 "Print when methods are replaced do to recompilation") \ 28.1634 \ 28.1635 develop(bool, PrintMethodFlushing, false, \ 28.1636 - "print the nmethods being flushed") \ 28.1637 + "Print the nmethods being flushed") \ 28.1638 \ 28.1639 develop(bool, UseRelocIndex, false, \ 28.1640 - "use an index to speed random access to relocations") \ 28.1641 + "Use an index to speed random access to relocations") \ 28.1642 \ 28.1643 develop(bool, StressCodeBuffers, false, \ 28.1644 - "Exercise code buffer expansion and other rare state changes") \ 28.1645 + "Exercise code buffer expansion and other rare state changes") \ 28.1646 \ 28.1647 diagnostic(bool, DebugNonSafepoints, trueInDebug, \ 28.1648 - "Generate extra debugging info for non-safepoints in nmethods") \ 28.1649 + "Generate extra debugging information for non-safepoints in " \ 28.1650 + "nmethods") \ 28.1651 \ 28.1652 product(bool, PrintVMOptions, false, \ 28.1653 - "Print flags that appeared on the command line") \ 28.1654 + "Print flags that appeared on the command line") \ 28.1655 \ 28.1656 product(bool, IgnoreUnrecognizedVMOptions, false, \ 28.1657 - "Ignore unrecognized VM options") \ 28.1658 + "Ignore unrecognized VM options") \ 28.1659 \ 28.1660 product(bool, PrintCommandLineFlags, false, \ 28.1661 - "Print flags specified on command line or set by ergonomics") \ 28.1662 + "Print flags specified on command line or set by ergonomics") \ 28.1663 \ 28.1664 product(bool, PrintFlagsInitial, false, \ 28.1665 - "Print all VM flags before argument processing and exit VM") \ 28.1666 + "Print all VM flags before argument processing and exit VM") \ 28.1667 \ 28.1668 product(bool, PrintFlagsFinal, false, \ 28.1669 - "Print all VM flags after argument and ergonomic processing") \ 28.1670 + "Print all VM flags after argument and ergonomic processing") \ 28.1671 \ 28.1672 notproduct(bool, PrintFlagsWithComments, false, \ 28.1673 - "Print all VM flags with default values and descriptions and exit")\ 28.1674 + "Print all VM flags with default values and descriptions and " \ 28.1675 + "exit") \ 28.1676 \ 28.1677 diagnostic(bool, SerializeVMOutput, true, \ 28.1678 - "Use a mutex to serialize output to tty and LogFile") \ 28.1679 + "Use a mutex to serialize output to tty and LogFile") \ 28.1680 \ 28.1681 diagnostic(bool, DisplayVMOutput, true, \ 28.1682 - "Display all VM output on the tty, independently of LogVMOutput") \ 28.1683 + "Display all VM output on the tty, independently of LogVMOutput") \ 28.1684 \ 28.1685 diagnostic(bool, LogVMOutput, false, \ 28.1686 - "Save VM output to LogFile") \ 28.1687 + "Save VM output to LogFile") \ 28.1688 \ 28.1689 diagnostic(ccstr, LogFile, NULL, \ 28.1690 - "If LogVMOutput or LogCompilation is on, save VM output to " \ 28.1691 - "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)") \ 28.1692 + "If LogVMOutput or LogCompilation is on, save VM output to " \ 28.1693 + "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\ 28.1694 \ 28.1695 product(ccstr, ErrorFile, NULL, \ 28.1696 - "If an error occurs, save the error data to this file " \ 28.1697 - "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ 28.1698 + "If an error occurs, save the error data to this file " \ 28.1699 + "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ 28.1700 \ 28.1701 product(bool, DisplayVMOutputToStderr, false, \ 28.1702 - "If DisplayVMOutput is true, display all VM output to stderr") \ 28.1703 + "If DisplayVMOutput is true, display all VM output to stderr") \ 28.1704 \ 28.1705 product(bool, DisplayVMOutputToStdout, false, \ 28.1706 - "If DisplayVMOutput is true, display all VM output to stdout") \ 28.1707 + "If DisplayVMOutput is true, display all VM output to stdout") \ 28.1708 \ 28.1709 product(bool, UseHeavyMonitors, false, \ 28.1710 "use heavyweight instead of lightweight Java monitors") \ 28.1711 @@ -2583,7 +2612,7 @@ 28.1712 \ 28.1713 notproduct(ccstr, AbortVMOnExceptionMessage, NULL, \ 28.1714 "Call fatal if the exception pointed by AbortVMOnException " \ 28.1715 - "has this message.") \ 28.1716 + "has this message") \ 28.1717 \ 28.1718 develop(bool, DebugVtables, false, \ 28.1719 "add debugging code to vtable dispatch") \ 28.1720 @@ -2650,29 +2679,29 @@ 28.1721 \ 28.1722 /* statistics */ \ 28.1723 develop(bool, CountCompiledCalls, false, \ 28.1724 - "counts method invocations") \ 28.1725 + "Count method invocations") \ 28.1726 \ 28.1727 notproduct(bool, CountRuntimeCalls, false, \ 28.1728 - "counts VM runtime calls") \ 28.1729 + "Count VM runtime calls") \ 28.1730 \ 28.1731 develop(bool, CountJNICalls, false, \ 28.1732 - "counts jni method invocations") \ 28.1733 + "Count jni method invocations") \ 28.1734 \ 28.1735 notproduct(bool, CountJVMCalls, false, \ 28.1736 - "counts jvm method invocations") \ 28.1737 + "Count jvm method invocations") \ 28.1738 \ 28.1739 notproduct(bool, CountRemovableExceptions, false, \ 28.1740 - "count exceptions that could be replaced by branches due to " \ 28.1741 + "Count exceptions that could be replaced by branches due to " \ 28.1742 "inlining") \ 28.1743 \ 28.1744 notproduct(bool, ICMissHistogram, false, \ 28.1745 - "produce histogram of IC misses") \ 28.1746 + "Produce histogram of IC misses") \ 28.1747 \ 28.1748 notproduct(bool, PrintClassStatistics, false, \ 28.1749 - "prints class statistics at end of run") \ 28.1750 + "Print class statistics at end of run") \ 28.1751 \ 28.1752 notproduct(bool, PrintMethodStatistics, false, \ 28.1753 - "prints method statistics at end of run") \ 28.1754 + "Print method statistics at end of run") \ 28.1755 \ 28.1756 /* interpreter */ \ 28.1757 develop(bool, ClearInterpreterLocals, false, \ 28.1758 @@ -2686,7 +2715,7 @@ 28.1759 "Rewrite frequently used bytecode pairs into a single bytecode") \ 28.1760 \ 28.1761 diagnostic(bool, PrintInterpreter, false, \ 28.1762 - "Prints the generated interpreter code") \ 28.1763 + "Print the generated interpreter code") \ 28.1764 \ 28.1765 product(bool, UseInterpreter, true, \ 28.1766 "Use interpreter for non-compiled methods") \ 28.1767 @@ -2704,8 +2733,8 @@ 28.1768 "Use fast method entry code for accessor methods") \ 28.1769 \ 28.1770 product_pd(bool, UseOnStackReplacement, \ 28.1771 - "Use on stack replacement, calls runtime if invoc. counter " \ 28.1772 - "overflows in loop") \ 28.1773 + "Use on stack replacement, calls runtime if invoc. counter " \ 28.1774 + "overflows in loop") \ 28.1775 \ 28.1776 notproduct(bool, TraceOnStackReplacement, false, \ 28.1777 "Trace on stack replacement") \ 28.1778 @@ -2753,10 +2782,10 @@ 28.1779 "Trace frequency based inlining") \ 28.1780 \ 28.1781 develop_pd(bool, InlineIntrinsics, \ 28.1782 - "Inline intrinsics that can be statically resolved") \ 28.1783 + "Inline intrinsics that can be statically resolved") \ 28.1784 \ 28.1785 product_pd(bool, ProfileInterpreter, \ 28.1786 - "Profile at the bytecode level during interpretation") \ 28.1787 + "Profile at the bytecode level during interpretation") \ 28.1788 \ 28.1789 develop_pd(bool, ProfileTraps, \ 28.1790 "Profile deoptimization traps at the bytecode level") \ 28.1791 @@ -2766,7 +2795,7 @@ 28.1792 "CompileThreshold) before using the method's profile") \ 28.1793 \ 28.1794 develop(bool, PrintMethodData, false, \ 28.1795 - "Print the results of +ProfileInterpreter at end of run") \ 28.1796 + "Print the results of +ProfileInterpreter at end of run") \ 28.1797 \ 28.1798 develop(bool, VerifyDataPointer, trueInDebug, \ 28.1799 "Verify the method data pointer during interpreter profiling") \ 28.1800 @@ -2781,7 +2810,7 @@ 28.1801 \ 28.1802 /* compilation */ \ 28.1803 product(bool, UseCompiler, true, \ 28.1804 - "use compilation") \ 28.1805 + "Use Just-In-Time compilation") \ 28.1806 \ 28.1807 develop(bool, TraceCompilationPolicy, false, \ 28.1808 "Trace compilation policy") \ 28.1809 @@ -2790,20 +2819,21 @@ 28.1810 "Time the compilation policy") \ 28.1811 \ 28.1812 product(bool, UseCounterDecay, true, \ 28.1813 - "adjust recompilation counters") \ 28.1814 + "Adjust recompilation counters") \ 28.1815 \ 28.1816 develop(intx, CounterHalfLifeTime, 30, \ 28.1817 - "half-life time of invocation counters (in secs)") \ 28.1818 + "Half-life time of invocation counters (in seconds)") \ 28.1819 \ 28.1820 develop(intx, CounterDecayMinIntervalLength, 500, \ 28.1821 - "Min. ms. between invocation of CounterDecay") \ 28.1822 + "The minimum interval (in milliseconds) between invocation of " \ 28.1823 + "CounterDecay") \ 28.1824 \ 28.1825 product(bool, AlwaysCompileLoopMethods, false, \ 28.1826 - "when using recompilation, never interpret methods " \ 28.1827 + "When using recompilation, never interpret methods " \ 28.1828 "containing loops") \ 28.1829 \ 28.1830 product(bool, DontCompileHugeMethods, true, \ 28.1831 - "don't compile methods > HugeMethodLimit") \ 28.1832 + "Do not compile methods > HugeMethodLimit") \ 28.1833 \ 28.1834 /* Bytecode escape analysis estimation. */ \ 28.1835 product(bool, EstimateArgEscape, true, \ 28.1836 @@ -2813,10 +2843,10 @@ 28.1837 "How much tracing to do of bytecode escape analysis estimates") \ 28.1838 \ 28.1839 product(intx, MaxBCEAEstimateLevel, 5, \ 28.1840 - "Maximum number of nested calls that are analyzed by BC EA.") \ 28.1841 + "Maximum number of nested calls that are analyzed by BC EA") \ 28.1842 \ 28.1843 product(intx, MaxBCEAEstimateSize, 150, \ 28.1844 - "Maximum bytecode size of a method to be analyzed by BC EA.") \ 28.1845 + "Maximum bytecode size of a method to be analyzed by BC EA") \ 28.1846 \ 28.1847 product(intx, AllocatePrefetchStyle, 1, \ 28.1848 "0 = no prefetch, " \ 28.1849 @@ -2831,7 +2861,8 @@ 28.1850 "Number of lines to prefetch ahead of array allocation pointer") \ 28.1851 \ 28.1852 product(intx, AllocateInstancePrefetchLines, 1, \ 28.1853 - "Number of lines to prefetch ahead of instance allocation pointer") \ 28.1854 + "Number of lines to prefetch ahead of instance allocation " \ 28.1855 + "pointer") \ 28.1856 \ 28.1857 product(intx, AllocatePrefetchStepSize, 16, \ 28.1858 "Step size in bytes of sequential prefetch instructions") \ 28.1859 @@ -2851,8 +2882,8 @@ 28.1860 "(0 means off)") \ 28.1861 \ 28.1862 product(intx, MaxJavaStackTraceDepth, 1024, \ 28.1863 - "Max. no. of lines in the stack trace for Java exceptions " \ 28.1864 - "(0 means all)") \ 28.1865 + "The maximum number of lines in the stack trace for Java " \ 28.1866 + "exceptions (0 means all)") \ 28.1867 \ 28.1868 NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \ 28.1869 "Guarantee a safepoint (at least) every so many milliseconds " \ 28.1870 @@ -2876,10 +2907,10 @@ 28.1871 "result in more aggressive sweeping") \ 28.1872 \ 28.1873 notproduct(bool, LogSweeper, false, \ 28.1874 - "Keep a ring buffer of sweeper activity") \ 28.1875 + "Keep a ring buffer of sweeper activity") \ 28.1876 \ 28.1877 notproduct(intx, SweeperLogEntries, 1024, \ 28.1878 - "Number of records in the ring buffer of sweeper activity") \ 28.1879 + "Number of records in the ring buffer of sweeper activity") \ 28.1880 \ 28.1881 notproduct(intx, MemProfilingInterval, 500, \ 28.1882 "Time between each invocation of the MemProfiler") \ 28.1883 @@ -2922,34 +2953,35 @@ 28.1884 "less than this") \ 28.1885 \ 28.1886 product(intx, MaxInlineSize, 35, \ 28.1887 - "maximum bytecode size of a method to be inlined") \ 28.1888 + "The maximum bytecode size of a method to be inlined") \ 28.1889 \ 28.1890 product_pd(intx, FreqInlineSize, \ 28.1891 - "maximum bytecode size of a frequent method to be inlined") \ 28.1892 + "The maximum bytecode size of a frequent method to be inlined") \ 28.1893 \ 28.1894 product(intx, MaxTrivialSize, 6, \ 28.1895 - "maximum bytecode size of a trivial method to be inlined") \ 28.1896 + "The maximum bytecode size of a trivial method to be inlined") \ 28.1897 \ 28.1898 product(intx, MinInliningThreshold, 250, \ 28.1899 - "min. invocation count a method needs to have to be inlined") \ 28.1900 + "The minimum invocation count a method needs to have to be " \ 28.1901 + "inlined") \ 28.1902 \ 28.1903 develop(intx, MethodHistogramCutoff, 100, \ 28.1904 - "cutoff value for method invoc. histogram (+CountCalls)") \ 28.1905 + "The cutoff value for method invocation histogram (+CountCalls)") \ 28.1906 \ 28.1907 develop(intx, ProfilerNumberOfInterpretedMethods, 25, \ 28.1908 - "# of interpreted methods to show in profile") \ 28.1909 + "Number of interpreted methods to show in profile") \ 28.1910 \ 28.1911 develop(intx, ProfilerNumberOfCompiledMethods, 25, \ 28.1912 - "# of compiled methods to show in profile") \ 28.1913 + "Number of compiled methods to show in profile") \ 28.1914 \ 28.1915 develop(intx, ProfilerNumberOfStubMethods, 25, \ 28.1916 - "# of stub methods to show in profile") \ 28.1917 + "Number of stub methods to show in profile") \ 28.1918 \ 28.1919 develop(intx, ProfilerNumberOfRuntimeStubNodes, 25, \ 28.1920 - "# of runtime stub nodes to show in profile") \ 28.1921 + "Number of runtime stub nodes to show in profile") \ 28.1922 \ 28.1923 product(intx, ProfileIntervalsTicks, 100, \ 28.1924 - "# of ticks between printing of interval profile " \ 28.1925 + "Number of ticks between printing of interval profile " \ 28.1926 "(+ProfileIntervals)") \ 28.1927 \ 28.1928 notproduct(intx, ScavengeALotInterval, 1, \ 28.1929 @@ -2970,7 +3002,7 @@ 28.1930 \ 28.1931 develop(intx, MinSleepInterval, 1, \ 28.1932 "Minimum sleep() interval (milliseconds) when " \ 28.1933 - "ConvertSleepToYield is off (used for SOLARIS)") \ 28.1934 + "ConvertSleepToYield is off (used for Solaris)") \ 28.1935 \ 28.1936 develop(intx, ProfilerPCTickThreshold, 15, \ 28.1937 "Number of ticks in a PC buckets to be a hotspot") \ 28.1938 @@ -2985,22 +3017,22 @@ 28.1939 "Mark nmethods non-entrant at registration") \ 28.1940 \ 28.1941 diagnostic(intx, MallocVerifyInterval, 0, \ 28.1942 - "if non-zero, verify C heap after every N calls to " \ 28.1943 + "If non-zero, verify C heap after every N calls to " \ 28.1944 "malloc/realloc/free") \ 28.1945 \ 28.1946 diagnostic(intx, MallocVerifyStart, 0, \ 28.1947 - "if non-zero, start verifying C heap after Nth call to " \ 28.1948 + "If non-zero, start verifying C heap after Nth call to " \ 28.1949 "malloc/realloc/free") \ 28.1950 \ 28.1951 diagnostic(uintx, MallocMaxTestWords, 0, \ 28.1952 - "if non-zero, max # of Words that malloc/realloc can allocate " \ 28.1953 - "(for testing only)") \ 28.1954 + "If non-zero, maximum number of words that malloc/realloc can " \ 28.1955 + "allocate (for testing only)") \ 28.1956 \ 28.1957 product(intx, TypeProfileWidth, 2, \ 28.1958 - "number of receiver types to record in call/cast profile") \ 28.1959 + "Number of receiver types to record in call/cast profile") \ 28.1960 \ 28.1961 develop(intx, BciProfileWidth, 2, \ 28.1962 - "number of return bci's to record in ret profile") \ 28.1963 + "Number of return bci's to record in ret profile") \ 28.1964 \ 28.1965 product(intx, PerMethodRecompilationCutoff, 400, \ 28.1966 "After recompiling N times, stay in the interpreter (-1=>'Inf')") \ 28.1967 @@ -3067,7 +3099,7 @@ 28.1968 "Percentage of Eden that can be wasted") \ 28.1969 \ 28.1970 product(uintx, TLABRefillWasteFraction, 64, \ 28.1971 - "Max TLAB waste at a refill (internal fragmentation)") \ 28.1972 + "Maximum TLAB waste at a refill (internal fragmentation)") \ 28.1973 \ 28.1974 product(uintx, TLABWasteIncrement, 4, \ 28.1975 "Increment allowed waste at slow allocation") \ 28.1976 @@ -3076,7 +3108,7 @@ 28.1977 "Ratio of eden/survivor space size") \ 28.1978 \ 28.1979 product(uintx, NewRatio, 2, \ 28.1980 - "Ratio of new/old generation sizes") \ 28.1981 + "Ratio of old/new generation sizes") \ 28.1982 \ 28.1983 product_pd(uintx, NewSizeThreadIncrease, \ 28.1984 "Additional size added to desired new generation size per " \ 28.1985 @@ -3093,28 +3125,30 @@ 28.1986 "class pointers are used") \ 28.1987 \ 28.1988 product(uintx, MinHeapFreeRatio, 40, \ 28.1989 - "Min percentage of heap free after GC to avoid expansion") \ 28.1990 + "The minimum percentage of heap free after GC to avoid expansion")\ 28.1991 \ 28.1992 product(uintx, MaxHeapFreeRatio, 70, \ 28.1993 - "Max percentage of heap free after GC to avoid shrinking") \ 28.1994 + "The maximum percentage of heap free after GC to avoid shrinking")\ 28.1995 \ 28.1996 product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ 28.1997 "Number of milliseconds per MB of free space in the heap") \ 28.1998 \ 28.1999 product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K), \ 28.2000 - "Min change in heap space due to GC (in bytes)") \ 28.2001 + "The minimum change in heap space due to GC (in bytes)") \ 28.2002 \ 28.2003 product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K), \ 28.2004 - "Min expansion of Metaspace (in bytes)") \ 28.2005 + "The minimum expansion of Metaspace (in bytes)") \ 28.2006 \ 28.2007 product(uintx, MinMetaspaceFreeRatio, 40, \ 28.2008 - "Min percentage of Metaspace free after GC to avoid expansion") \ 28.2009 + "The minimum percentage of Metaspace free after GC to avoid " \ 28.2010 + "expansion") \ 28.2011 \ 28.2012 product(uintx, MaxMetaspaceFreeRatio, 70, \ 28.2013 - "Max percentage of Metaspace free after GC to avoid shrinking") \ 28.2014 + "The maximum percentage of Metaspace free after GC to avoid " \ 28.2015 + "shrinking") \ 28.2016 \ 28.2017 product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \ 28.2018 - "Max expansion of Metaspace without full GC (in bytes)") \ 28.2019 + "The maximum expansion of Metaspace without full GC (in bytes)") \ 28.2020 \ 28.2021 product(uintx, QueuedAllocationWarningCount, 0, \ 28.2022 "Number of times an allocation that queues behind a GC " \ 28.2023 @@ -3136,13 +3170,14 @@ 28.2024 "Desired percentage of survivor space used after scavenge") \ 28.2025 \ 28.2026 product(uintx, MarkSweepDeadRatio, 5, \ 28.2027 - "Percentage (0-100) of the old gen allowed as dead wood." \ 28.2028 - "Serial mark sweep treats this as both the min and max value." \ 28.2029 - "CMS uses this value only if it falls back to mark sweep." \ 28.2030 - "Par compact uses a variable scale based on the density of the" \ 28.2031 - "generation and treats this as the max value when the heap is" \ 28.2032 - "either completely full or completely empty. Par compact also" \ 28.2033 - "has a smaller default value; see arguments.cpp.") \ 28.2034 + "Percentage (0-100) of the old gen allowed as dead wood. " \ 28.2035 + "Serial mark sweep treats this as both the minimum and maximum " \ 28.2036 + "value. " \ 28.2037 + "CMS uses this value only if it falls back to mark sweep. " \ 28.2038 + "Par compact uses a variable scale based on the density of the " \ 28.2039 + "generation and treats this as the maximum value when the heap " \ 28.2040 + "is either completely full or completely empty. Par compact " \ 28.2041 + "also has a smaller default value; see arguments.cpp.") \ 28.2042 \ 28.2043 product(uintx, MarkSweepAlwaysCompactCount, 4, \ 28.2044 "How often should we fully compact the heap (ignoring the dead " \ 28.2045 @@ -3161,27 +3196,27 @@ 28.2046 "Census for CMS' FreeListSpace") \ 28.2047 \ 28.2048 develop(uintx, GCExpandToAllocateDelayMillis, 0, \ 28.2049 - "Delay in ms between expansion and allocation") \ 28.2050 + "Delay between expansion and allocation (in milliseconds)") \ 28.2051 \ 28.2052 develop(uintx, GCWorkerDelayMillis, 0, \ 28.2053 - "Delay in ms in scheduling GC workers") \ 28.2054 + "Delay in scheduling GC workers (in milliseconds)") \ 28.2055 \ 28.2056 product(intx, DeferThrSuspendLoopCount, 4000, \ 28.2057 "(Unstable) Number of times to iterate in safepoint loop " \ 28.2058 - " before blocking VM threads ") \ 28.2059 + "before blocking VM threads ") \ 28.2060 \ 28.2061 product(intx, DeferPollingPageLoopCount, -1, \ 28.2062 "(Unsafe,Unstable) Number of iterations in safepoint loop " \ 28.2063 "before changing safepoint polling page to RO ") \ 28.2064 \ 28.2065 - product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ 28.2066 + product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ 28.2067 \ 28.2068 product(bool, PSChunkLargeArrays, true, \ 28.2069 - "true: process large arrays in chunks") \ 28.2070 + "Process large arrays in chunks") \ 28.2071 \ 28.2072 product(uintx, GCDrainStackTargetSize, 64, \ 28.2073 - "how many entries we'll try to leave on the stack during " \ 28.2074 - "parallel GC") \ 28.2075 + "Number of entries we will try to leave on the stack " \ 28.2076 + "during parallel gc") \ 28.2077 \ 28.2078 /* stack parameters */ \ 28.2079 product_pd(intx, StackYellowPages, \ 28.2080 @@ -3191,8 +3226,8 @@ 28.2081 "Number of red zone (unrecoverable overflows) pages") \ 28.2082 \ 28.2083 product_pd(intx, StackShadowPages, \ 28.2084 - "Number of shadow zone (for overflow checking) pages" \ 28.2085 - " this should exceed the depth of the VM and native call stack") \ 28.2086 + "Number of shadow zone (for overflow checking) pages " \ 28.2087 + "this should exceed the depth of the VM and native call stack") \ 28.2088 \ 28.2089 product_pd(intx, ThreadStackSize, \ 28.2090 "Thread Stack Size (in Kbytes)") \ 28.2091 @@ -3232,16 +3267,16 @@ 28.2092 "Reserved code cache size (in bytes) - maximum code cache size") \ 28.2093 \ 28.2094 product(uintx, CodeCacheMinimumFreeSpace, 500*K, \ 28.2095 - "When less than X space left, we stop compiling.") \ 28.2096 + "When less than X space left, we stop compiling") \ 28.2097 \ 28.2098 product_pd(uintx, CodeCacheExpansionSize, \ 28.2099 "Code cache expansion size (in bytes)") \ 28.2100 \ 28.2101 develop_pd(uintx, CodeCacheMinBlockLength, \ 28.2102 - "Minimum number of segments in a code cache block.") \ 28.2103 + "Minimum number of segments in a code cache block") \ 28.2104 \ 28.2105 notproduct(bool, ExitOnFullCodeCache, false, \ 28.2106 - "Exit the VM if we fill the code cache.") \ 28.2107 + "Exit the VM if we fill the code cache") \ 28.2108 \ 28.2109 product(bool, UseCodeCacheFlushing, true, \ 28.2110 "Attempt to clean the code cache before shutting off compiler") \ 28.2111 @@ -3252,31 +3287,31 @@ 28.2112 "switch") \ 28.2113 \ 28.2114 develop(intx, StopInterpreterAt, 0, \ 28.2115 - "Stops interpreter execution at specified bytecode number") \ 28.2116 + "Stop interpreter execution at specified bytecode number") \ 28.2117 \ 28.2118 develop(intx, TraceBytecodesAt, 0, \ 28.2119 - "Traces bytecodes starting with specified bytecode number") \ 28.2120 + "Trace bytecodes starting with specified bytecode number") \ 28.2121 \ 28.2122 /* compiler interface */ \ 28.2123 develop(intx, CIStart, 0, \ 28.2124 - "the id of the first compilation to permit") \ 28.2125 + "The id of the first compilation to permit") \ 28.2126 \ 28.2127 develop(intx, CIStop, -1, \ 28.2128 - "the id of the last compilation to permit") \ 28.2129 + "The id of the last compilation to permit") \ 28.2130 \ 28.2131 develop(intx, CIStartOSR, 0, \ 28.2132 - "the id of the first osr compilation to permit " \ 28.2133 + "The id of the first osr compilation to permit " \ 28.2134 "(CICountOSR must be on)") \ 28.2135 \ 28.2136 develop(intx, CIStopOSR, -1, \ 28.2137 - "the id of the last osr compilation to permit " \ 28.2138 + "The id of the last osr compilation to permit " \ 28.2139 "(CICountOSR must be on)") \ 28.2140 \ 28.2141 develop(intx, CIBreakAtOSR, -1, \ 28.2142 - "id of osr compilation to break at") \ 28.2143 + "The id of osr compilation to break at") \ 28.2144 \ 28.2145 develop(intx, CIBreakAt, -1, \ 28.2146 - "id of compilation to break at") \ 28.2147 + "The id of compilation to break at") \ 28.2148 \ 28.2149 product(ccstrlist, CompileOnly, "", \ 28.2150 "List of methods (pkg/class.name) to restrict compilation to") \ 28.2151 @@ -3295,11 +3330,11 @@ 28.2152 "[default: ./replay_pid%p.log] (%p replaced with pid)") \ 28.2153 \ 28.2154 develop(intx, ReplaySuppressInitializers, 2, \ 28.2155 - "Controls handling of class initialization during replay" \ 28.2156 - "0 - don't do anything special" \ 28.2157 - "1 - treat all class initializers as empty" \ 28.2158 - "2 - treat class initializers for application classes as empty" \ 28.2159 - "3 - allow all class initializers to run during bootstrap but" \ 28.2160 + "Control handling of class initialization during replay: " \ 28.2161 + "0 - don't do anything special; " \ 28.2162 + "1 - treat all class initializers as empty; " \ 28.2163 + "2 - treat class initializers for application classes as empty; " \ 28.2164 + "3 - allow all class initializers to run during bootstrap but " \ 28.2165 " pretend they are empty after starting replay") \ 28.2166 \ 28.2167 develop(bool, ReplayIgnoreInitErrors, false, \ 28.2168 @@ -3328,14 +3363,15 @@ 28.2169 "0 : Normal. "\ 28.2170 " VM chooses priorities that are appropriate for normal "\ 28.2171 " applications. On Solaris NORM_PRIORITY and above are mapped "\ 28.2172 - " to normal native priority. Java priorities below NORM_PRIORITY"\ 28.2173 - " map to lower native priority values. On Windows applications"\ 28.2174 - " are allowed to use higher native priorities. However, with "\ 28.2175 - " ThreadPriorityPolicy=0, VM will not use the highest possible"\ 28.2176 - " native priority, THREAD_PRIORITY_TIME_CRITICAL, as it may "\ 28.2177 - " interfere with system threads. On Linux thread priorities "\ 28.2178 - " are ignored because the OS does not support static priority "\ 28.2179 - " in SCHED_OTHER scheduling class which is the only choice for"\ 28.2180 + " to normal native priority. Java priorities below " \ 28.2181 + " NORM_PRIORITY map to lower native priority values. On "\ 28.2182 + " Windows applications are allowed to use higher native "\ 28.2183 + " priorities. However, with ThreadPriorityPolicy=0, VM will "\ 28.2184 + " not use the highest possible native priority, "\ 28.2185 + " THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with "\ 28.2186 + " system threads. On Linux thread priorities are ignored "\ 28.2187 + " because the OS does not support static priority in "\ 28.2188 + " SCHED_OTHER scheduling class which is the only choice for "\ 28.2189 " non-root, non-realtime applications. "\ 28.2190 "1 : Aggressive. "\ 28.2191 " Java thread priorities map over to the entire range of "\ 28.2192 @@ -3366,16 +3402,35 @@ 28.2193 product(bool, VMThreadHintNoPreempt, false, \ 28.2194 "(Solaris only) Give VM thread an extra quanta") \ 28.2195 \ 28.2196 - product(intx, JavaPriority1_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 28.2197 - product(intx, JavaPriority2_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 28.2198 - product(intx, JavaPriority3_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 28.2199 - product(intx, JavaPriority4_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 28.2200 - product(intx, JavaPriority5_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 28.2201 - product(intx, JavaPriority6_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 28.2202 - product(intx, JavaPriority7_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 28.2203 - product(intx, JavaPriority8_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 28.2204 - product(intx, JavaPriority9_To_OSPriority, -1, "Map Java priorities to OS priorities") \ 28.2205 - product(intx, JavaPriority10_To_OSPriority,-1, "Map Java priorities to OS priorities") \ 28.2206 + product(intx, JavaPriority1_To_OSPriority, -1, \ 28.2207 + "Map Java priorities to OS priorities") \ 28.2208 + \ 28.2209 + product(intx, JavaPriority2_To_OSPriority, -1, \ 28.2210 + "Map Java priorities to OS priorities") \ 28.2211 + \ 28.2212 + product(intx, JavaPriority3_To_OSPriority, -1, \ 28.2213 + "Map Java priorities to OS priorities") \ 28.2214 + \ 28.2215 + product(intx, JavaPriority4_To_OSPriority, -1, \ 28.2216 + "Map Java priorities to OS priorities") \ 28.2217 + \ 28.2218 + product(intx, JavaPriority5_To_OSPriority, -1, \ 28.2219 + "Map Java priorities to OS priorities") \ 28.2220 + \ 28.2221 + product(intx, JavaPriority6_To_OSPriority, -1, \ 28.2222 + "Map Java priorities to OS priorities") \ 28.2223 + \ 28.2224 + product(intx, JavaPriority7_To_OSPriority, -1, \ 28.2225 + "Map Java priorities to OS priorities") \ 28.2226 + \ 28.2227 + product(intx, JavaPriority8_To_OSPriority, -1, \ 28.2228 + "Map Java priorities to OS priorities") \ 28.2229 + \ 28.2230 + product(intx, JavaPriority9_To_OSPriority, -1, \ 28.2231 + "Map Java priorities to OS priorities") \ 28.2232 + \ 28.2233 + product(intx, JavaPriority10_To_OSPriority,-1, \ 28.2234 + "Map Java priorities to OS priorities") \ 28.2235 \ 28.2236 experimental(bool, UseCriticalJavaThreadPriority, false, \ 28.2237 "Java thread priority 10 maps to critical scheduling priority") \ 28.2238 @@ -3406,37 +3461,38 @@ 28.2239 "Used with +TraceLongCompiles") \ 28.2240 \ 28.2241 product(intx, StarvationMonitorInterval, 200, \ 28.2242 - "Pause between each check in ms") \ 28.2243 + "Pause between each check (in milliseconds)") \ 28.2244 \ 28.2245 /* recompilation */ \ 28.2246 product_pd(intx, CompileThreshold, \ 28.2247 "number of interpreted method invocations before (re-)compiling") \ 28.2248 \ 28.2249 product_pd(intx, BackEdgeThreshold, \ 28.2250 - "Interpreter Back edge threshold at which an OSR compilation is invoked")\ 28.2251 + "Interpreter Back edge threshold at which an OSR compilation is " \ 28.2252 + "invoked") \ 28.2253 \ 28.2254 product(intx, Tier0InvokeNotifyFreqLog, 7, \ 28.2255 - "Interpreter (tier 0) invocation notification frequency.") \ 28.2256 + "Interpreter (tier 0) invocation notification frequency") \ 28.2257 \ 28.2258 product(intx, Tier2InvokeNotifyFreqLog, 11, \ 28.2259 - "C1 without MDO (tier 2) invocation notification frequency.") \ 28.2260 + "C1 without MDO (tier 2) invocation notification frequency") \ 28.2261 \ 28.2262 product(intx, Tier3InvokeNotifyFreqLog, 10, \ 28.2263 "C1 with MDO profiling (tier 3) invocation notification " \ 28.2264 - "frequency.") \ 28.2265 + "frequency") \ 28.2266 \ 28.2267 product(intx, Tier23InlineeNotifyFreqLog, 20, \ 28.2268 "Inlinee invocation (tiers 2 and 3) notification frequency") \ 28.2269 \ 28.2270 product(intx, Tier0BackedgeNotifyFreqLog, 10, \ 28.2271 - "Interpreter (tier 0) invocation notification frequency.") \ 28.2272 + "Interpreter (tier 0) invocation notification frequency") \ 28.2273 \ 28.2274 product(intx, Tier2BackedgeNotifyFreqLog, 14, \ 28.2275 - "C1 without MDO (tier 2) invocation notification frequency.") \ 28.2276 + "C1 without MDO (tier 2) invocation notification frequency") \ 28.2277 \ 28.2278 product(intx, Tier3BackedgeNotifyFreqLog, 13, \ 28.2279 "C1 with MDO profiling (tier 3) invocation notification " \ 28.2280 - "frequency.") \ 28.2281 + "frequency") \ 28.2282 \ 28.2283 product(intx, Tier2CompileThreshold, 0, \ 28.2284 "threshold at which tier 2 compilation is invoked") \ 28.2285 @@ -3453,7 +3509,7 @@ 28.2286 \ 28.2287 product(intx, Tier3CompileThreshold, 2000, \ 28.2288 "Threshold at which tier 3 compilation is invoked (invocation " \ 28.2289 - "minimum must be satisfied.") \ 28.2290 + "minimum must be satisfied") \ 28.2291 \ 28.2292 product(intx, Tier3BackEdgeThreshold, 60000, \ 28.2293 "Back edge threshold at which tier 3 OSR compilation is invoked") \ 28.2294 @@ -3467,7 +3523,7 @@ 28.2295 \ 28.2296 product(intx, Tier4CompileThreshold, 15000, \ 28.2297 "Threshold at which tier 4 compilation is invoked (invocation " \ 28.2298 - "minimum must be satisfied.") \ 28.2299 + "minimum must be satisfied") \ 28.2300 \ 28.2301 product(intx, Tier4BackEdgeThreshold, 40000, \ 28.2302 "Back edge threshold at which tier 4 OSR compilation is invoked") \ 28.2303 @@ -3496,12 +3552,12 @@ 28.2304 "Stop at given compilation level") \ 28.2305 \ 28.2306 product(intx, Tier0ProfilingStartPercentage, 200, \ 28.2307 - "Start profiling in interpreter if the counters exceed tier 3" \ 28.2308 + "Start profiling in interpreter if the counters exceed tier 3 " \ 28.2309 "thresholds by the specified percentage") \ 28.2310 \ 28.2311 product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \ 28.2312 - "Increase the compile threshold for C1 compilation if the code" \ 28.2313 - "cache is filled by the specified percentage.") \ 28.2314 + "Increase the compile threshold for C1 compilation if the code " \ 28.2315 + "cache is filled by the specified percentage") \ 28.2316 \ 28.2317 product(intx, TieredRateUpdateMinTime, 1, \ 28.2318 "Minimum rate sampling interval (in milliseconds)") \ 28.2319 @@ -3516,24 +3572,26 @@ 28.2320 "Print tiered events notifications") \ 28.2321 \ 28.2322 product_pd(intx, OnStackReplacePercentage, \ 28.2323 - "NON_TIERED number of method invocations/branches (expressed as %"\ 28.2324 - "of CompileThreshold) before (re-)compiling OSR code") \ 28.2325 + "NON_TIERED number of method invocations/branches (expressed as " \ 28.2326 + "% of CompileThreshold) before (re-)compiling OSR code") \ 28.2327 \ 28.2328 product(intx, InterpreterProfilePercentage, 33, \ 28.2329 - "NON_TIERED number of method invocations/branches (expressed as %"\ 28.2330 - "of CompileThreshold) before profiling in the interpreter") \ 28.2331 + "NON_TIERED number of method invocations/branches (expressed as " \ 28.2332 + "% of CompileThreshold) before profiling in the interpreter") \ 28.2333 \ 28.2334 develop(intx, MaxRecompilationSearchLength, 10, \ 28.2335 - "max. # frames to inspect searching for recompilee") \ 28.2336 + "The maximum number of frames to inspect when searching for " \ 28.2337 + "recompilee") \ 28.2338 \ 28.2339 develop(intx, MaxInterpretedSearchLength, 3, \ 28.2340 - "max. # interp. frames to skip when searching for recompilee") \ 28.2341 + "The maximum number of interpreted frames to skip when searching "\ 28.2342 + "for recompilee") \ 28.2343 \ 28.2344 develop(intx, DesiredMethodLimit, 8000, \ 28.2345 - "desired max. method size (in bytecodes) after inlining") \ 28.2346 + "The desired maximum method size (in bytecodes) after inlining") \ 28.2347 \ 28.2348 develop(intx, HugeMethodLimit, 8000, \ 28.2349 - "don't compile methods larger than this if " \ 28.2350 + "Don't compile methods larger than this if " \ 28.2351 "+DontCompileHugeMethods") \ 28.2352 \ 28.2353 /* New JDK 1.4 reflection implementation */ \ 28.2354 @@ -3555,7 +3613,7 @@ 28.2355 "in InvocationTargetException. See 6531596") \ 28.2356 \ 28.2357 develop(bool, VerifyLambdaBytecodes, false, \ 28.2358 - "Force verification of jdk 8 lambda metafactory bytecodes.") \ 28.2359 + "Force verification of jdk 8 lambda metafactory bytecodes") \ 28.2360 \ 28.2361 develop(intx, FastSuperclassLimit, 8, \ 28.2362 "Depth of hardwired instanceof accelerator array") \ 28.2363 @@ -3579,18 +3637,19 @@ 28.2364 /* flags for performance data collection */ \ 28.2365 \ 28.2366 product(bool, UsePerfData, falseInEmbedded, \ 28.2367 - "Flag to disable jvmstat instrumentation for performance testing" \ 28.2368 - "and problem isolation purposes.") \ 28.2369 + "Flag to disable jvmstat instrumentation for performance testing "\ 28.2370 + "and problem isolation purposes") \ 28.2371 \ 28.2372 product(bool, PerfDataSaveToFile, false, \ 28.2373 "Save PerfData memory to hsperfdata_<pid> file on exit") \ 28.2374 \ 28.2375 product(ccstr, PerfDataSaveFile, NULL, \ 28.2376 - "Save PerfData memory to the specified absolute pathname," \ 28.2377 - "%p in the file name if present will be replaced by pid") \ 28.2378 - \ 28.2379 - product(intx, PerfDataSamplingInterval, 50 /*ms*/, \ 28.2380 - "Data sampling interval in milliseconds") \ 28.2381 + "Save PerfData memory to the specified absolute pathname. " \ 28.2382 + "The string %p in the file name (if present) " \ 28.2383 + "will be replaced by pid") \ 28.2384 + \ 28.2385 + product(intx, PerfDataSamplingInterval, 50, \ 28.2386 + "Data sampling interval (in milliseconds)") \ 28.2387 \ 28.2388 develop(bool, PerfTraceDataCreation, false, \ 28.2389 "Trace creation of Performance Data Entries") \ 28.2390 @@ -3615,7 +3674,7 @@ 28.2391 "Bypass Win32 file system criteria checks (Windows Only)") \ 28.2392 \ 28.2393 product(intx, UnguardOnExecutionViolation, 0, \ 28.2394 - "Unguard page and retry on no-execute fault (Win32 only)" \ 28.2395 + "Unguard page and retry on no-execute fault (Win32 only) " \ 28.2396 "0=off, 1=conservative, 2=aggressive") \ 28.2397 \ 28.2398 /* Serviceability Support */ \ 28.2399 @@ -3624,7 +3683,7 @@ 28.2400 "Create JMX Management Server") \ 28.2401 \ 28.2402 product(bool, DisableAttachMechanism, false, \ 28.2403 - "Disable mechanism that allows tools to attach to this VM") \ 28.2404 + "Disable mechanism that allows tools to attach to this VM") \ 28.2405 \ 28.2406 product(bool, StartAttachListener, false, \ 28.2407 "Always start Attach Listener at VM startup") \ 28.2408 @@ -3647,9 +3706,9 @@ 28.2409 "Require shared spaces for metadata") \ 28.2410 \ 28.2411 product(bool, DumpSharedSpaces, false, \ 28.2412 - "Special mode: JVM reads a class list, loads classes, builds " \ 28.2413 - "shared spaces, and dumps the shared spaces to a file to be " \ 28.2414 - "used in future JVM runs.") \ 28.2415 + "Special mode: JVM reads a class list, loads classes, builds " \ 28.2416 + "shared spaces, and dumps the shared spaces to a file to be " \ 28.2417 + "used in future JVM runs") \ 28.2418 \ 28.2419 product(bool, PrintSharedSpaces, false, \ 28.2420 "Print usage of shared spaces") \ 28.2421 @@ -3722,7 +3781,7 @@ 28.2422 "Relax the access control checks in the verifier") \ 28.2423 \ 28.2424 diagnostic(bool, PrintDTraceDOF, false, \ 28.2425 - "Print the DTrace DOF passed to the system for JSDT probes") \ 28.2426 + "Print the DTrace DOF passed to the system for JSDT probes") \ 28.2427 \ 28.2428 product(uintx, StringTableSize, defaultStringTableSize, \ 28.2429 "Number of buckets in the interned String table") \ 28.2430 @@ -3738,8 +3797,8 @@ 28.2431 \ 28.2432 product(bool, UseVMInterruptibleIO, false, \ 28.2433 "(Unstable, Solaris-specific) Thread interrupt before or with " \ 28.2434 - "EINTR for I/O operations results in OS_INTRPT. The default value"\ 28.2435 - " of this flag is true for JDK 6 and earlier") \ 28.2436 + "EINTR for I/O operations results in OS_INTRPT. The default " \ 28.2437 + "value of this flag is true for JDK 6 and earlier") \ 28.2438 \ 28.2439 diagnostic(bool, WhiteBoxAPI, false, \ 28.2440 "Enable internal testing APIs") \ 28.2441 @@ -3760,6 +3819,7 @@ 28.2442 \ 28.2443 product(bool, EnableTracing, false, \ 28.2444 "Enable event-based tracing") \ 28.2445 + \ 28.2446 product(bool, UseLockedTracing, false, \ 28.2447 "Use locked-tracing when doing event-based tracing") 28.2448
29.1 --- a/src/share/vm/runtime/virtualspace.cpp Thu Oct 10 13:25:51 2013 -0700 29.2 +++ b/src/share/vm/runtime/virtualspace.cpp Fri Oct 11 08:27:21 2013 -0700 29.3 @@ -368,8 +368,15 @@ 29.4 29.5 29.6 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { 29.7 + const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1); 29.8 + return initialize_with_granularity(rs, committed_size, max_commit_granularity); 29.9 +} 29.10 + 29.11 +bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { 29.12 if(!rs.is_reserved()) return false; // allocation failed. 29.13 assert(_low_boundary == NULL, "VirtualSpace already initialized"); 29.14 + assert(max_commit_granularity > 0, "Granularity must be non-zero."); 29.15 + 29.16 _low_boundary = rs.base(); 29.17 _high_boundary = low_boundary() + rs.size(); 29.18 29.19 @@ -390,7 +397,7 @@ 29.20 // No attempt is made to force large page alignment at the very top and 29.21 // bottom of the space if they are not aligned so already. 29.22 _lower_alignment = os::vm_page_size(); 29.23 - _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1); 29.24 + _middle_alignment = max_commit_granularity; 29.25 _upper_alignment = os::vm_page_size(); 29.26 29.27 // End of each region 29.28 @@ -966,17 +973,52 @@ 29.29 29.30 29.31 class TestVirtualSpace : AllStatic { 29.32 + enum TestLargePages { 29.33 + Default, 29.34 + Disable, 29.35 + Reserve, 29.36 + Commit 29.37 + }; 29.38 + 29.39 + static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) { 29.40 + switch(mode) { 29.41 + default: 29.42 + case Default: 29.43 + case Reserve: 29.44 + return ReservedSpace(reserve_size_aligned); 29.45 + case Disable: 29.46 + case Commit: 29.47 + return ReservedSpace(reserve_size_aligned, 29.48 + os::vm_allocation_granularity(), 29.49 + /* large */ false, /* exec */ false); 29.50 + } 29.51 + } 29.52 + 29.53 + static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) { 29.54 + switch(mode) { 29.55 + default: 29.56 + case Default: 29.57 + case Reserve: 29.58 + return vs.initialize(rs, 0); 29.59 + case Disable: 29.60 + return vs.initialize_with_granularity(rs, 0, os::vm_page_size()); 29.61 + case Commit: 29.62 + return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1)); 29.63 + } 29.64 + } 29.65 + 29.66 public: 29.67 - static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) { 29.68 + static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, 29.69 + TestLargePages mode = Default) { 29.70 size_t granularity = os::vm_allocation_granularity(); 29.71 size_t reserve_size_aligned = align_size_up(reserve_size, granularity); 29.72 29.73 - ReservedSpace reserved(reserve_size_aligned); 29.74 + ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode); 29.75 29.76 assert(reserved.is_reserved(), "Must be"); 29.77 29.78 VirtualSpace vs; 29.79 - bool initialized = vs.initialize(reserved, 0); 29.80 + bool initialized = initialize_virtual_space(vs, reserved, mode); 29.81 assert(initialized, "Failed to initialize VirtualSpace"); 29.82 29.83 vs.expand_by(commit_size, false); 29.84 @@ -986,7 +1028,10 @@ 29.85 } else { 29.86 assert_ge(vs.actual_committed_size(), commit_size); 29.87 // Approximate the commit granularity. 29.88 - size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size(); 29.89 + // Make sure that we don't commit using large pages 29.90 + // if large pages has been disabled for this VirtualSpace. 29.91 + size_t commit_granularity = (mode == Disable || !UseLargePages) ? 29.92 + os::vm_page_size() : os::large_page_size(); 29.93 assert_lt(vs.actual_committed_size(), commit_size + commit_granularity); 29.94 } 29.95 29.96 @@ -1042,9 +1087,40 @@ 29.97 test_virtual_space_actual_committed_space(10 * M, 10 * M); 29.98 } 29.99 29.100 + static void test_virtual_space_disable_large_pages() { 29.101 + if (!UseLargePages) { 29.102 + return; 29.103 + } 29.104 + // These test cases verify that if we force VirtualSpace to disable large pages 29.105 + test_virtual_space_actual_committed_space(10 * M, 0, Disable); 29.106 + test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable); 29.107 + test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable); 29.108 + test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable); 29.109 + test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable); 29.110 + test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable); 29.111 + test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable); 29.112 + 29.113 + test_virtual_space_actual_committed_space(10 * M, 0, Reserve); 29.114 + test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve); 29.115 + test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve); 29.116 + test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve); 29.117 + test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve); 29.118 + test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve); 29.119 + test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve); 29.120 + 29.121 + test_virtual_space_actual_committed_space(10 * M, 0, Commit); 29.122 + test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit); 29.123 + test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit); 29.124 + test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit); 29.125 + test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit); 29.126 + test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit); 29.127 + test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit); 29.128 + } 29.129 + 29.130 static void test_virtual_space() { 29.131 test_virtual_space_actual_committed_space(); 29.132 test_virtual_space_actual_committed_space_one_large_page(); 29.133 + test_virtual_space_disable_large_pages(); 29.134 } 29.135 }; 29.136
30.1 --- a/src/share/vm/runtime/virtualspace.hpp Thu Oct 10 13:25:51 2013 -0700 30.2 +++ b/src/share/vm/runtime/virtualspace.hpp Fri Oct 11 08:27:21 2013 -0700 30.3 @@ -178,6 +178,7 @@ 30.4 public: 30.5 // Initialization 30.6 VirtualSpace(); 30.7 + bool initialize_with_granularity(ReservedSpace rs, size_t committed_byte_size, size_t max_commit_ganularity); 30.8 bool initialize(ReservedSpace rs, size_t committed_byte_size); 30.9 30.10 // Destruction
31.1 --- a/src/share/vm/runtime/vmStructs.cpp Thu Oct 10 13:25:51 2013 -0700 31.2 +++ b/src/share/vm/runtime/vmStructs.cpp Fri Oct 11 08:27:21 2013 -0700 31.3 @@ -716,11 +716,17 @@ 31.4 nonstatic_field(PlaceholderEntry, _loader_data, ClassLoaderData*) \ 31.5 \ 31.6 /**************************/ \ 31.7 - /* ProctectionDomainEntry */ \ 31.8 + /* ProtectionDomainEntry */ \ 31.9 /**************************/ \ 31.10 \ 31.11 nonstatic_field(ProtectionDomainEntry, _next, ProtectionDomainEntry*) \ 31.12 - nonstatic_field(ProtectionDomainEntry, _protection_domain, oop) \ 31.13 + nonstatic_field(ProtectionDomainEntry, _pd_cache, ProtectionDomainCacheEntry*) \ 31.14 + \ 31.15 + /*******************************/ \ 31.16 + /* ProtectionDomainCacheEntry */ \ 31.17 + /*******************************/ \ 31.18 + \ 31.19 + nonstatic_field(ProtectionDomainCacheEntry, _literal, oop) \ 31.20 \ 31.21 /*************************/ \ 31.22 /* LoaderConstraintEntry */ \ 31.23 @@ -1563,6 +1569,7 @@ 31.24 declare_toplevel_type(SystemDictionary) \ 31.25 declare_toplevel_type(vmSymbols) \ 31.26 declare_toplevel_type(ProtectionDomainEntry) \ 31.27 + declare_toplevel_type(ProtectionDomainCacheEntry) \ 31.28 \ 31.29 declare_toplevel_type(GenericGrowableArray) \ 31.30 declare_toplevel_type(GrowableArray<int>) \
32.1 --- a/src/share/vm/services/memoryService.hpp Thu Oct 10 13:25:51 2013 -0700 32.2 +++ b/src/share/vm/services/memoryService.hpp Fri Oct 11 08:27:21 2013 -0700 32.3 @@ -148,6 +148,12 @@ 32.4 static void track_code_cache_memory_usage() { 32.5 track_memory_pool_usage(_code_heap_pool); 32.6 } 32.7 + static void track_metaspace_memory_usage() { 32.8 + track_memory_pool_usage(_metaspace_pool); 32.9 + } 32.10 + static void track_compressed_class_memory_usage() { 32.11 + track_memory_pool_usage(_compressed_class_pool); 32.12 + } 32.13 static void track_memory_pool_usage(MemoryPool* pool); 32.14 32.15 static void gc_begin(bool fullGC, bool recordGCBeginTime,
33.1 --- a/src/share/vm/utilities/globalDefinitions.hpp Thu Oct 10 13:25:51 2013 -0700 33.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp Fri Oct 11 08:27:21 2013 -0700 33.3 @@ -326,12 +326,15 @@ 33.4 33.5 const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (p.134) 33.6 33.7 +// Default ProtectionDomainCacheSize values 33.8 + 33.9 +const int defaultProtectionDomainCacheSize = NOT_LP64(137) LP64_ONLY(2017); 33.10 33.11 //---------------------------------------------------------------------------------------------------- 33.12 // Default and minimum StringTableSize values 33.13 33.14 const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013); 33.15 -const int minimumStringTableSize=1009; 33.16 +const int minimumStringTableSize = 1009; 33.17 33.18 const int defaultSymbolTableSize = 20011; 33.19 const int minimumSymbolTableSize = 1009;
34.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 34.2 +++ b/test/runtime/memory/LargePages/TestLargePagesFlags.java Fri Oct 11 08:27:21 2013 -0700 34.3 @@ -0,0 +1,389 @@ 34.4 +/* 34.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 34.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 34.7 + * 34.8 + * This code is free software; you can redistribute it and/or modify it 34.9 + * under the terms of the GNU General Public License version 2 only, as 34.10 + * published by the Free Software Foundation. 34.11 + * 34.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 34.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 34.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 34.15 + * version 2 for more details (a copy is included in the LICENSE file that 34.16 + * accompanied this code). 34.17 + * 34.18 + * You should have received a copy of the GNU General Public License version 34.19 + * 2 along with this work; if not, write to the Free Software Foundation, 34.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 34.21 + * 34.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 34.23 + * or visit www.oracle.com if you need additional information or have any 34.24 + * questions. 34.25 + */ 34.26 + 34.27 +/* @test TestLargePagesFlags 34.28 + * @summary Tests how large pages are choosen depending on the given large pages flag combinations. 34.29 + * @library /testlibrary 34.30 + * @run main TestLargePagesFlags 34.31 + */ 34.32 + 34.33 +import com.oracle.java.testlibrary.OutputAnalyzer; 34.34 +import com.oracle.java.testlibrary.Platform; 34.35 +import com.oracle.java.testlibrary.ProcessTools; 34.36 +import java.util.ArrayList; 34.37 + 34.38 +public class TestLargePagesFlags { 34.39 + 34.40 + public static void main(String [] args) throws Exception { 34.41 + if (!Platform.isLinux()) { 34.42 + System.out.println("Skipping. TestLargePagesFlags has only been implemented for Linux."); 34.43 + return; 34.44 + } 34.45 + 34.46 + testUseTransparentHugePages(); 34.47 + testUseHugeTLBFS(); 34.48 + testUseSHM(); 34.49 + testCombinations(); 34.50 + } 34.51 + 34.52 + public static void testUseTransparentHugePages() throws Exception { 34.53 + if (!canUse(UseTransparentHugePages(true))) { 34.54 + System.out.println("Skipping testUseTransparentHugePages"); 34.55 + return; 34.56 + } 34.57 + 34.58 + // -XX:-UseLargePages overrides all other flags. 34.59 + new FlagTester() 34.60 + .use(UseLargePages(false), 34.61 + UseTransparentHugePages(true)) 34.62 + .expect( 34.63 + UseLargePages(false), 34.64 + UseTransparentHugePages(false), 34.65 + UseHugeTLBFS(false), 34.66 + UseSHM(false)); 34.67 + 34.68 + // Explicitly turn on UseTransparentHugePages. 34.69 + new FlagTester() 34.70 + .use(UseTransparentHugePages(true)) 34.71 + .expect( 34.72 + UseLargePages(true), 34.73 + UseTransparentHugePages(true), 34.74 + UseHugeTLBFS(false), 34.75 + UseSHM(false)); 34.76 + 34.77 + new FlagTester() 34.78 + .use(UseLargePages(true), 34.79 + UseTransparentHugePages(true)) 34.80 + .expect( 34.81 + UseLargePages(true), 34.82 + UseTransparentHugePages(true), 34.83 + UseHugeTLBFS(false), 34.84 + UseSHM(false)); 34.85 + 34.86 + // Setting a specific large pages flag will turn 34.87 + // off heuristics to choose large pages type. 34.88 + new FlagTester() 34.89 + .use(UseLargePages(true), 34.90 + UseTransparentHugePages(false)) 34.91 + .expect( 34.92 + UseLargePages(false), 34.93 + UseTransparentHugePages(false), 34.94 + UseHugeTLBFS(false), 34.95 + UseSHM(false)); 34.96 + 34.97 + // Don't turn on UseTransparentHugePages 34.98 + // unless the user explicitly asks for them. 34.99 + new FlagTester() 34.100 + .use(UseLargePages(true)) 34.101 + .expect( 34.102 + UseTransparentHugePages(false)); 34.103 + } 34.104 + 34.105 + public static void testUseHugeTLBFS() throws Exception { 34.106 + if (!canUse(UseHugeTLBFS(true))) { 34.107 + System.out.println("Skipping testUseHugeTLBFS"); 34.108 + return; 34.109 + } 34.110 + 34.111 + // -XX:-UseLargePages overrides all other flags. 34.112 + new FlagTester() 34.113 + .use(UseLargePages(false), 34.114 + UseHugeTLBFS(true)) 34.115 + .expect( 34.116 + UseLargePages(false), 34.117 + UseTransparentHugePages(false), 34.118 + UseHugeTLBFS(false), 34.119 + UseSHM(false)); 34.120 + 34.121 + // Explicitly turn on UseHugeTLBFS. 34.122 + new FlagTester() 34.123 + .use(UseHugeTLBFS(true)) 34.124 + .expect( 34.125 + UseLargePages(true), 34.126 + UseTransparentHugePages(false), 34.127 + UseHugeTLBFS(true), 34.128 + UseSHM(false)); 34.129 + 34.130 + new FlagTester() 34.131 + .use(UseLargePages(true), 34.132 + UseHugeTLBFS(true)) 34.133 + .expect( 34.134 + UseLargePages(true), 34.135 + UseTransparentHugePages(false), 34.136 + UseHugeTLBFS(true), 34.137 + UseSHM(false)); 34.138 + 34.139 + // Setting a specific large pages flag will turn 34.140 + // off heuristics to choose large pages type. 34.141 + new FlagTester() 34.142 + .use(UseLargePages(true), 34.143 + UseHugeTLBFS(false)) 34.144 + .expect( 34.145 + UseLargePages(false), 34.146 + UseTransparentHugePages(false), 34.147 + UseHugeTLBFS(false), 34.148 + UseSHM(false)); 34.149 + 34.150 + // Using UseLargePages will default to UseHugeTLBFS large pages. 34.151 + new FlagTester() 34.152 + .use(UseLargePages(true)) 34.153 + .expect( 34.154 + UseLargePages(true), 34.155 + UseTransparentHugePages(false), 34.156 + UseHugeTLBFS(true), 34.157 + UseSHM(false)); 34.158 + } 34.159 + 34.160 + public static void testUseSHM() throws Exception { 34.161 + if (!canUse(UseSHM(true))) { 34.162 + System.out.println("Skipping testUseSHM"); 34.163 + return; 34.164 + } 34.165 + 34.166 + // -XX:-UseLargePages overrides all other flags. 34.167 + new FlagTester() 34.168 + .use(UseLargePages(false), 34.169 + UseSHM(true)) 34.170 + .expect( 34.171 + UseLargePages(false), 34.172 + UseTransparentHugePages(false), 34.173 + UseHugeTLBFS(false), 34.174 + UseSHM(false)); 34.175 + 34.176 + // Explicitly turn on UseSHM. 34.177 + new FlagTester() 34.178 + .use(UseSHM(true)) 34.179 + .expect( 34.180 + UseLargePages(true), 34.181 + UseTransparentHugePages(false), 34.182 + UseHugeTLBFS(false), 34.183 + UseSHM(true)) ; 34.184 + 34.185 + new FlagTester() 34.186 + .use(UseLargePages(true), 34.187 + UseSHM(true)) 34.188 + .expect( 34.189 + UseLargePages(true), 34.190 + UseTransparentHugePages(false), 34.191 + UseHugeTLBFS(false), 34.192 + UseSHM(true)) ; 34.193 + 34.194 + // Setting a specific large pages flag will turn 34.195 + // off heuristics to choose large pages type. 34.196 + new FlagTester() 34.197 + .use(UseLargePages(true), 34.198 + UseSHM(false)) 34.199 + .expect( 34.200 + UseLargePages(false), 34.201 + UseTransparentHugePages(false), 34.202 + UseHugeTLBFS(false), 34.203 + UseSHM(false)); 34.204 + 34.205 + // Setting UseLargePages can allow the system to choose 34.206 + // UseHugeTLBFS instead of UseSHM, but never UseTransparentHugePages. 34.207 + new FlagTester() 34.208 + .use(UseLargePages(true)) 34.209 + .expect( 34.210 + UseLargePages(true), 34.211 + UseTransparentHugePages(false)); 34.212 + } 34.213 + 34.214 + public static void testCombinations() throws Exception { 34.215 + if (!canUse(UseSHM(true)) || !canUse(UseHugeTLBFS(true))) { 34.216 + System.out.println("Skipping testUseHugeTLBFSAndUseSHMCombination"); 34.217 + return; 34.218 + } 34.219 + 34.220 + // UseHugeTLBFS takes precedence over SHM. 34.221 + 34.222 + new FlagTester() 34.223 + .use(UseLargePages(true), 34.224 + UseHugeTLBFS(true), 34.225 + UseSHM(true)) 34.226 + .expect( 34.227 + UseLargePages(true), 34.228 + UseTransparentHugePages(false), 34.229 + UseHugeTLBFS(true), 34.230 + UseSHM(false)); 34.231 + 34.232 + new FlagTester() 34.233 + .use(UseLargePages(true), 34.234 + UseHugeTLBFS(false), 34.235 + UseSHM(true)) 34.236 + .expect( 34.237 + UseLargePages(true), 34.238 + UseTransparentHugePages(false), 34.239 + UseHugeTLBFS(false), 34.240 + UseSHM(true)); 34.241 + 34.242 + new FlagTester() 34.243 + .use(UseLargePages(true), 34.244 + UseHugeTLBFS(true), 34.245 + UseSHM(false)) 34.246 + .expect( 34.247 + UseLargePages(true), 34.248 + UseTransparentHugePages(false), 34.249 + UseHugeTLBFS(true), 34.250 + UseSHM(false)); 34.251 + 34.252 + new FlagTester() 34.253 + .use(UseLargePages(true), 34.254 + UseHugeTLBFS(false), 34.255 + UseSHM(false)) 34.256 + .expect( 34.257 + UseLargePages(false), 34.258 + UseTransparentHugePages(false), 34.259 + UseHugeTLBFS(false), 34.260 + UseSHM(false)); 34.261 + 34.262 + 34.263 + if (!canUse(UseTransparentHugePages(true))) { 34.264 + return; 34.265 + } 34.266 + 34.267 + // UseTransparentHugePages takes precedence. 34.268 + 34.269 + new FlagTester() 34.270 + .use(UseLargePages(true), 34.271 + UseTransparentHugePages(true), 34.272 + UseHugeTLBFS(true), 34.273 + UseSHM(true)) 34.274 + .expect( 34.275 + UseLargePages(true), 34.276 + UseTransparentHugePages(true), 34.277 + UseHugeTLBFS(false), 34.278 + UseSHM(false)); 34.279 + 34.280 + new FlagTester() 34.281 + .use(UseTransparentHugePages(true), 34.282 + UseHugeTLBFS(true), 34.283 + UseSHM(true)) 34.284 + .expect( 34.285 + UseLargePages(true), 34.286 + UseTransparentHugePages(true), 34.287 + UseHugeTLBFS(false), 34.288 + UseSHM(false)); 34.289 + } 34.290 + 34.291 + private static class FlagTester { 34.292 + private Flag [] useFlags; 34.293 + 34.294 + public FlagTester use(Flag... useFlags) { 34.295 + this.useFlags = useFlags; 34.296 + return this; 34.297 + } 34.298 + 34.299 + public void expect(Flag... expectedFlags) throws Exception { 34.300 + if (useFlags == null) { 34.301 + throw new IllegalStateException("Must run use() before expect()"); 34.302 + } 34.303 + 34.304 + OutputAnalyzer output = executeNewJVM(useFlags); 34.305 + 34.306 + for (Flag flag : expectedFlags) { 34.307 + System.out.println("Looking for: " + flag.flagString()); 34.308 + String strValue = output.firstMatch(".* " + flag.name() + " .* :?= (\\S+).*", 1); 34.309 + 34.310 + if (strValue == null) { 34.311 + throw new RuntimeException("Flag " + flag.name() + " couldn't be found"); 34.312 + } 34.313 + 34.314 + if (!flag.value().equals(strValue)) { 34.315 + throw new RuntimeException("Wrong value for: " + flag.name() 34.316 + + " expected: " + flag.value() 34.317 + + " got: " + strValue); 34.318 + } 34.319 + } 34.320 + 34.321 + output.shouldHaveExitValue(0); 34.322 + } 34.323 + } 34.324 + 34.325 + private static OutputAnalyzer executeNewJVM(Flag... flags) throws Exception { 34.326 + ArrayList<String> args = new ArrayList<>(); 34.327 + for (Flag flag : flags) { 34.328 + args.add(flag.flagString()); 34.329 + } 34.330 + args.add("-XX:+PrintFlagsFinal"); 34.331 + args.add("-version"); 34.332 + 34.333 + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[args.size()])); 34.334 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 34.335 + 34.336 + return output; 34.337 + } 34.338 + 34.339 + private static boolean canUse(Flag flag) { 34.340 + try { 34.341 + new FlagTester().use(flag).expect(flag); 34.342 + } catch (Exception e) { 34.343 + return false; 34.344 + } 34.345 + 34.346 + return true; 34.347 + } 34.348 + 34.349 + private static Flag UseLargePages(boolean value) { 34.350 + return new BooleanFlag("UseLargePages", value); 34.351 + } 34.352 + 34.353 + private static Flag UseTransparentHugePages(boolean value) { 34.354 + return new BooleanFlag("UseTransparentHugePages", value); 34.355 + } 34.356 + 34.357 + private static Flag UseHugeTLBFS(boolean value) { 34.358 + return new BooleanFlag("UseHugeTLBFS", value); 34.359 + } 34.360 + 34.361 + private static Flag UseSHM(boolean value) { 34.362 + return new BooleanFlag("UseSHM", value); 34.363 + } 34.364 + 34.365 + private static class BooleanFlag implements Flag { 34.366 + private String name; 34.367 + private boolean value; 34.368 + 34.369 + BooleanFlag(String name, boolean value) { 34.370 + this.name = name; 34.371 + this.value = value; 34.372 + } 34.373 + 34.374 + public String flagString() { 34.375 + return "-XX:" + (value ? "+" : "-") + name; 34.376 + } 34.377 + 34.378 + public String name() { 34.379 + return name; 34.380 + } 34.381 + 34.382 + public String value() { 34.383 + return Boolean.toString(value); 34.384 + } 34.385 + } 34.386 + 34.387 + private static interface Flag { 34.388 + public String flagString(); 34.389 + public String name(); 34.390 + public String value(); 34.391 + } 34.392 +}