Fri, 25 Mar 2011 18:19:22 -0400
Merge
1.1 --- a/.hgtags Fri Mar 25 11:29:30 2011 -0700 1.2 +++ b/.hgtags Fri Mar 25 18:19:22 2011 -0400 1.3 @@ -154,3 +154,5 @@ 1.4 0e531ab5ba04967a0e9aa6aef65e6eb3a0dcf632 jdk7-b132 1.5 a8d643a4db47c7b58e0bcb49c77b5c3610de86a8 hs21-b03 1.6 1b3a350709e4325d759bb453ff3fb6a463270488 jdk7-b133 1.7 +447e6faab4a8755d4860c2366630729dbaec111c jdk7-b134 1.8 +3c76374706ea8a77e15aec8310e831e5734f8775 hs21-b04
2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/jdi/FieldImpl.java Fri Mar 25 11:29:30 2011 -0700 2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/FieldImpl.java Fri Mar 25 18:19:22 2011 -0400 2.3 @@ -1,5 +1,5 @@ 2.4 /* 2.5 - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. 2.6 + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. 2.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 2.8 * 2.9 * This code is free software; you can redistribute it and/or modify it 2.10 @@ -62,7 +62,7 @@ 2.11 2.12 // get the value of static field 2.13 ValueImpl getValue() { 2.14 - return getValue(saField.getFieldHolder()); 2.15 + return getValue(saField.getFieldHolder().getJavaMirror()); 2.16 } 2.17 2.18 // get the value of this Field from a specific Oop
3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/memory/StringTable.java Fri Mar 25 11:29:30 2011 -0700 3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/StringTable.java Fri Mar 25 18:19:22 2011 -0400 3.3 @@ -44,12 +44,10 @@ 3.4 private static synchronized void initialize(TypeDataBase db) { 3.5 Type type = db.lookupType("StringTable"); 3.6 theTableField = type.getAddressField("_the_table"); 3.7 - stringTableSize = db.lookupIntConstant("StringTable::string_table_size").intValue(); 3.8 } 3.9 3.10 // Fields 3.11 private static AddressField theTableField; 3.12 - private static int stringTableSize; 3.13 3.14 // Accessors 3.15 public static StringTable getTheTable() { 3.16 @@ -57,10 +55,6 @@ 3.17 return (StringTable) VMObjectFactory.newObject(StringTable.class, tmp); 3.18 } 3.19 3.20 - public static int getStringTableSize() { 3.21 - return stringTableSize; 3.22 - } 3.23 - 3.24 public StringTable(Address addr) { 3.25 super(addr); 3.26 }
4.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Fri Mar 25 11:29:30 2011 -0700 4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Fri Mar 25 18:19:22 2011 -0400 4.3 @@ -1,5 +1,5 @@ 4.4 /* 4.5 - * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. 4.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 4.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4.8 * 4.9 * This code is free software; you can redistribute it and/or modify it 4.10 @@ -87,7 +87,7 @@ 4.11 innerClasses = new OopField(type.getOopField("_inner_classes"), Oop.getHeaderSize()); 4.12 nonstaticFieldSize = new CIntField(type.getCIntegerField("_nonstatic_field_size"), Oop.getHeaderSize()); 4.13 staticFieldSize = new CIntField(type.getCIntegerField("_static_field_size"), Oop.getHeaderSize()); 4.14 - staticOopFieldSize = new CIntField(type.getCIntegerField("_static_oop_field_size"), Oop.getHeaderSize()); 4.15 + staticOopFieldCount = new CIntField(type.getCIntegerField("_static_oop_field_count"), Oop.getHeaderSize()); 4.16 nonstaticOopMapSize = new CIntField(type.getCIntegerField("_nonstatic_oop_map_size"), Oop.getHeaderSize()); 4.17 isMarkedDependent = new CIntField(type.getCIntegerField("_is_marked_dependent"), Oop.getHeaderSize()); 4.18 initState = new CIntField(type.getCIntegerField("_init_state"), Oop.getHeaderSize()); 4.19 @@ -140,7 +140,7 @@ 4.20 private static OopField innerClasses; 4.21 private static CIntField nonstaticFieldSize; 4.22 private static CIntField staticFieldSize; 4.23 - private static CIntField staticOopFieldSize; 4.24 + private static CIntField staticOopFieldCount; 4.25 private static CIntField nonstaticOopMapSize; 4.26 private static CIntField isMarkedDependent; 4.27 private static CIntField initState; 4.28 @@ -261,8 +261,7 @@ 4.29 public Symbol getSourceDebugExtension(){ return getSymbol(sourceDebugExtension); } 4.30 public TypeArray getInnerClasses() { return (TypeArray) innerClasses.getValue(this); } 4.31 public long getNonstaticFieldSize() { return nonstaticFieldSize.getValue(this); } 4.32 - public long getStaticFieldSize() { return staticFieldSize.getValue(this); } 4.33 - public long getStaticOopFieldSize() { return staticOopFieldSize.getValue(this); } 4.34 + public long getStaticOopFieldCount() { return staticOopFieldCount.getValue(this); } 4.35 public long getNonstaticOopMapSize() { return nonstaticOopMapSize.getValue(this); } 4.36 public boolean getIsMarkedDependent() { return isMarkedDependent.getValue(this) != 0; } 4.37 public long getVtableLen() { return vtableLen.getValue(this); } 4.38 @@ -453,7 +452,7 @@ 4.39 visitor.doOop(innerClasses, true); 4.40 visitor.doCInt(nonstaticFieldSize, true); 4.41 visitor.doCInt(staticFieldSize, true); 4.42 - visitor.doCInt(staticOopFieldSize, true); 4.43 + visitor.doCInt(staticOopFieldCount, true); 4.44 visitor.doCInt(nonstaticOopMapSize, true); 4.45 visitor.doCInt(isMarkedDependent, true); 4.46 visitor.doCInt(initState, true); 4.47 @@ -692,7 +691,7 @@ 4.48 public long getObjectSize() { 4.49 long bodySize = alignObjectOffset(getVtableLen() * getHeap().getOopSize()) 4.50 + alignObjectOffset(getItableLen() * getHeap().getOopSize()) 4.51 - + (getStaticFieldSize() + getNonstaticOopMapSize()) * getHeap().getOopSize(); 4.52 + + (getNonstaticOopMapSize()) * getHeap().getOopSize(); 4.53 return alignObjectSize(headerSize + bodySize); 4.54 } 4.55
5.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/IntField.java Fri Mar 25 11:29:30 2011 -0700 5.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/IntField.java Fri Mar 25 18:19:22 2011 -0400 5.3 @@ -1,5 +1,5 @@ 5.4 /* 5.5 - * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved. 5.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 5.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5.8 * 5.9 * This code is free software; you can redistribute it and/or modify it 5.10 @@ -40,7 +40,12 @@ 5.11 super(holder, fieldArrayIndex); 5.12 } 5.13 5.14 - public int getValue(Oop obj) { return obj.getHandle().getJIntAt(getOffset()); } 5.15 + public int getValue(Oop obj) { 5.16 + if (!isVMField() && !obj.isInstance() && !obj.isArray()) { 5.17 + throw new InternalError(obj.toString()); 5.18 + } 5.19 + return obj.getHandle().getJIntAt(getOffset()); 5.20 + } 5.21 public void setValue(Oop obj, int value) throws MutationException { 5.22 // Fix this: setJIntAt is missing in Address 5.23 }
6.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/OopField.java Fri Mar 25 11:29:30 2011 -0700 6.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/OopField.java Fri Mar 25 18:19:22 2011 -0400 6.3 @@ -1,5 +1,5 @@ 6.4 /* 6.5 - * Copyright (c) 2000, 2002, Oracle and/or its affiliates. All rights reserved. 6.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 6.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6.8 * 6.9 * This code is free software; you can redistribute it and/or modify it 6.10 @@ -41,11 +41,17 @@ 6.11 } 6.12 6.13 public Oop getValue(Oop obj) { 6.14 + if (!isVMField() && !obj.isInstance() && !obj.isArray()) { 6.15 + throw new InternalError(); 6.16 + } 6.17 return obj.getHeap().newOop(getValueAsOopHandle(obj)); 6.18 } 6.19 6.20 /** Debugging support */ 6.21 public OopHandle getValueAsOopHandle(Oop obj) { 6.22 + if (!isVMField() && !obj.isInstance() && !obj.isArray()) { 6.23 + throw new InternalError(obj.toString()); 6.24 + } 6.25 return obj.getHandle().getOopHandleAt(getOffset()); 6.26 } 6.27
7.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java Fri Mar 25 11:29:30 2011 -0700 7.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java Fri Mar 25 18:19:22 2011 -0400 7.3 @@ -1,5 +1,5 @@ 7.4 /* 7.5 - * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. 7.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 7.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7.8 * 7.9 * This code is free software; you can redistribute it and/or modify it 7.10 @@ -274,13 +274,7 @@ 7.11 // hc_klass is a HotSpot magic field and hence we can't 7.12 // find it from InstanceKlass for java.lang.Class. 7.13 TypeDataBase db = VM.getVM().getTypeDataBase(); 7.14 - int hcKlassOffset = (int) Instance.getHeaderSize(); 7.15 - try { 7.16 - hcKlassOffset += (db.lookupIntConstant("java_lang_Class::hc_klass_offset").intValue() * 7.17 - VM.getVM().getHeapOopSize()); 7.18 - } catch (RuntimeException re) { 7.19 - // ignore, currently java_lang_Class::hc_klass_offset is zero 7.20 - } 7.21 + int hcKlassOffset = (int) db.lookupType("java_lang_Class").getCIntegerField("klass_offset").getValue(); 7.22 if (VM.getVM().isCompressedOopsEnabled()) { 7.23 hcKlassField = new NarrowOopField(new NamedFieldIdentifier("hc_klass"), hcKlassOffset, true); 7.24 } else {
8.1 --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Fri Mar 25 11:29:30 2011 -0700 8.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Fri Mar 25 18:19:22 2011 -0400 8.3 @@ -1,5 +1,5 @@ 8.4 /* 8.5 - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 8.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 8.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 8.8 * 8.9 * This code is free software; you can redistribute it and/or modify it 8.10 @@ -839,13 +839,13 @@ 8.11 } 8.12 8.13 private void readSystemProperties() { 8.14 - InstanceKlass systemKls = getSystemDictionary().getSystemKlass(); 8.15 + final InstanceKlass systemKls = getSystemDictionary().getSystemKlass(); 8.16 systemKls.iterate(new DefaultOopVisitor() { 8.17 ObjectReader objReader = new ObjectReader(); 8.18 public void doOop(sun.jvm.hotspot.oops.OopField field, boolean isVMField) { 8.19 if (field.getID().getName().equals("props")) { 8.20 try { 8.21 - sysProps = (Properties) objReader.readObject(field.getValue(getObj())); 8.22 + sysProps = (Properties) objReader.readObject(field.getValue(systemKls.getJavaMirror())); 8.23 } catch (Exception e) { 8.24 if (Assert.ASSERTS_ENABLED) { 8.25 e.printStackTrace();
9.1 --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java Fri Mar 25 11:29:30 2011 -0700 9.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java Fri Mar 25 18:19:22 2011 -0400 9.3 @@ -1,5 +1,5 @@ 9.4 /* 9.5 - * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved. 9.6 + * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. 9.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 9.8 * 9.9 * This code is free software; you can redistribute it and/or modify it 9.10 @@ -746,7 +746,7 @@ 9.11 out.writeByte((byte)kind); 9.12 if (ik != null) { 9.13 // static field 9.14 - writeField(field, ik); 9.15 + writeField(field, ik.getJavaMirror()); 9.16 } 9.17 } 9.18 }
10.1 --- a/agent/test/jdi/sasanity.sh Fri Mar 25 11:29:30 2011 -0700 10.2 +++ b/agent/test/jdi/sasanity.sh Fri Mar 25 18:19:22 2011 -0400 10.3 @@ -43,6 +43,7 @@ 10.4 fi 10.5 10.6 jdk=$1 10.7 +shift 10.8 OS=`uname` 10.9 10.10 if [ "$OS" != "Linux" ]; then 10.11 @@ -68,7 +69,7 @@ 10.12 10.13 tmp=/tmp/sagsetup 10.14 rm -f $tmp 10.15 -$jdk/bin/java sagtarg > $tmp & 10.16 +$jdk/bin/java $* sagtarg > $tmp & 10.17 pid=$! 10.18 while [ ! -s $tmp ] ; do 10.19 # Kludge alert!
11.1 --- a/make/hotspot_version Fri Mar 25 11:29:30 2011 -0700 11.2 +++ b/make/hotspot_version Fri Mar 25 18:19:22 2011 -0400 11.3 @@ -35,7 +35,7 @@ 11.4 11.5 HS_MAJOR_VER=21 11.6 HS_MINOR_VER=0 11.7 -HS_BUILD_NUMBER=04 11.8 +HS_BUILD_NUMBER=05 11.9 11.10 JDK_MAJOR_VER=1 11.11 JDK_MINOR_VER=7
12.1 --- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Fri Mar 25 11:29:30 2011 -0700 12.2 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Fri Mar 25 18:19:22 2011 -0400 12.3 @@ -301,7 +301,8 @@ 12.4 // thread. 12.5 assert(_obj != noreg, "must be a valid register"); 12.6 assert(_oop_index >= 0, "must have oop index"); 12.7 - __ ld_ptr(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3); 12.8 + __ load_heap_oop(_obj, java_lang_Class::klass_offset_in_bytes(), G3); 12.9 + __ ld_ptr(G3, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3); 12.10 __ cmp(G2_thread, G3); 12.11 __ br(Assembler::notEqual, false, Assembler::pn, call_patch); 12.12 __ delayed()->nop();
13.1 --- a/src/cpu/sparc/vm/dump_sparc.cpp Fri Mar 25 11:29:30 2011 -0700 13.2 +++ b/src/cpu/sparc/vm/dump_sparc.cpp Fri Mar 25 18:19:22 2011 -0400 13.3 @@ -80,13 +80,19 @@ 13.4 for (int j = 0; j < num_virtuals; ++j) { 13.5 dummy_vtable[num_virtuals * i + j] = (void*)masm->pc(); 13.6 __ save(SP, -256, SP); 13.7 + int offset = (i << 8) + j; 13.8 + Register src = G0; 13.9 + if (!Assembler::is_simm13(offset)) { 13.10 + __ sethi(offset, L0); 13.11 + src = L0; 13.12 + offset = offset & ((1 << 10) - 1); 13.13 + } 13.14 __ brx(Assembler::always, false, Assembler::pt, common_code); 13.15 13.16 // Load L0 with a value indicating vtable/offset pair. 13.17 // -- bits[ 7..0] (8 bits) which virtual method in table? 13.18 - // -- bits[12..8] (5 bits) which virtual method table? 13.19 - // -- must fit in 13-bit instruction immediate field. 13.20 - __ delayed()->set((i << 8) + j, L0); 13.21 + // -- bits[13..8] (6 bits) which virtual method table? 13.22 + __ delayed()->or3(src, offset, L0); 13.23 } 13.24 } 13.25
14.1 --- a/src/cpu/sparc/vm/globals_sparc.hpp Fri Mar 25 11:29:30 2011 -0700 14.2 +++ b/src/cpu/sparc/vm/globals_sparc.hpp Fri Mar 25 18:19:22 2011 -0400 14.3 @@ -1,5 +1,5 @@ 14.4 /* 14.5 - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 14.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 14.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 14.8 * 14.9 * This code is free software; you can redistribute it and/or modify it 14.10 @@ -51,6 +51,7 @@ 14.11 define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize 14.12 define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC 14.13 define_pd_global(intx, InlineSmallCode, 1500); 14.14 + 14.15 #ifdef _LP64 14.16 // Stack slots are 2X larger in LP64 than in the 32 bit VM. 14.17 define_pd_global(intx, ThreadStackSize, 1024); 14.18 @@ -71,4 +72,6 @@ 14.19 14.20 define_pd_global(bool, UseMembar, false); 14.21 14.22 +// GC Ergo Flags 14.23 +define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread 14.24 #endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
15.1 --- a/src/cpu/sparc/vm/nativeInst_sparc.cpp Fri Mar 25 11:29:30 2011 -0700 15.2 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp Fri Mar 25 18:19:22 2011 -0400 15.3 @@ -52,6 +52,22 @@ 15.4 ICache::invalidate_range(instaddr, 7 * BytesPerInstWord); 15.5 } 15.6 15.7 +void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) { 15.8 + ResourceMark rm; 15.9 + unsigned char buffer[10 * BytesPerInstWord]; 15.10 + CodeBuffer buf(buffer, 10 * BytesPerInstWord); 15.11 + MacroAssembler masm(&buf); 15.12 + 15.13 + Register destreg = inv_rd(*(unsigned int *)instaddr); 15.14 + // Generate the proper sequence into a temporary buffer and compare 15.15 + // it with the original sequence. 15.16 + masm.patchable_sethi(x, destreg); 15.17 + int len = buffer - masm.pc(); 15.18 + for (int i = 0; i < len; i++) { 15.19 + assert(instaddr[i] == buffer[i], "instructions must match"); 15.20 + } 15.21 +} 15.22 + 15.23 void NativeInstruction::verify() { 15.24 // make sure code pattern is actually an instruction address 15.25 address addr = addr_at(0);
16.1 --- a/src/cpu/sparc/vm/nativeInst_sparc.hpp Fri Mar 25 11:29:30 2011 -0700 16.2 +++ b/src/cpu/sparc/vm/nativeInst_sparc.hpp Fri Mar 25 18:19:22 2011 -0400 16.3 @@ -254,6 +254,7 @@ 16.4 // sethi. This only does the sethi. The disp field (bottom 10 bits) 16.5 // must be handled separately. 16.6 static void set_data64_sethi(address instaddr, intptr_t x); 16.7 + static void verify_data64_sethi(address instaddr, intptr_t x); 16.8 16.9 // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st) 16.10 static int data32(int sethi_insn, int arith_insn) {
17.1 --- a/src/cpu/sparc/vm/relocInfo_sparc.cpp Fri Mar 25 11:29:30 2011 -0700 17.2 +++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp Fri Mar 25 18:19:22 2011 -0400 17.3 @@ -30,7 +30,7 @@ 17.4 #include "oops/oop.inline.hpp" 17.5 #include "runtime/safepoint.hpp" 17.6 17.7 -void Relocation::pd_set_data_value(address x, intptr_t o) { 17.8 +void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { 17.9 NativeInstruction* ip = nativeInstruction_at(addr()); 17.10 jint inst = ip->long_at(0); 17.11 assert(inst != NativeInstruction::illegal_instruction(), "no breakpoint"); 17.12 @@ -83,7 +83,11 @@ 17.13 guarantee(Assembler::is_simm13(simm13), "offset can't overflow simm13"); 17.14 inst &= ~Assembler::simm( -1, 13); 17.15 inst |= Assembler::simm(simm13, 13); 17.16 - ip->set_long_at(0, inst); 17.17 + if (verify_only) { 17.18 + assert(ip->long_at(0) == inst, "instructions must match"); 17.19 + } else { 17.20 + ip->set_long_at(0, inst); 17.21 + } 17.22 } 17.23 break; 17.24 17.25 @@ -97,19 +101,36 @@ 17.26 jint np = oopDesc::encode_heap_oop((oop)x); 17.27 inst &= ~Assembler::hi22(-1); 17.28 inst |= Assembler::hi22((intptr_t)np); 17.29 - ip->set_long_at(0, inst); 17.30 + if (verify_only) { 17.31 + assert(ip->long_at(0) == inst, "instructions must match"); 17.32 + } else { 17.33 + ip->set_long_at(0, inst); 17.34 + } 17.35 inst2 = ip->long_at( NativeInstruction::nop_instruction_size ); 17.36 guarantee(Assembler::inv_op(inst2)==Assembler::arith_op, "arith op"); 17.37 - ip->set_long_at(NativeInstruction::nop_instruction_size, ip->set_data32_simm13( inst2, (intptr_t)np)); 17.38 + if (verify_only) { 17.39 + assert(ip->long_at(NativeInstruction::nop_instruction_size) == NativeInstruction::set_data32_simm13( inst2, (intptr_t)np), 17.40 + "instructions must match"); 17.41 + } else { 17.42 + ip->set_long_at(NativeInstruction::nop_instruction_size, NativeInstruction::set_data32_simm13( inst2, (intptr_t)np)); 17.43 + } 17.44 break; 17.45 } 17.46 - ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x ); 17.47 + if (verify_only) { 17.48 + ip->verify_data64_sethi( ip->addr_at(0), (intptr_t)x ); 17.49 + } else { 17.50 + ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x ); 17.51 + } 17.52 #else 17.53 guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi"); 17.54 inst &= ~Assembler::hi22( -1); 17.55 inst |= Assembler::hi22((intptr_t)x); 17.56 // (ignore offset; it doesn't play into the sethi) 17.57 - ip->set_long_at(0, inst); 17.58 + if (verify_only) { 17.59 + assert(ip->long_at(0) == inst, "instructions must match"); 17.60 + } else { 17.61 + ip->set_long_at(0, inst); 17.62 + } 17.63 #endif 17.64 } 17.65 break;
18.1 --- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Fri Mar 25 11:29:30 2011 -0700 18.2 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Fri Mar 25 18:19:22 2011 -0400 18.3 @@ -313,10 +313,13 @@ 18.4 } 18.5 assert(_obj != noreg, "must be a valid register"); 18.6 Register tmp = rax; 18.7 - if (_obj == tmp) tmp = rbx; 18.8 + Register tmp2 = rbx; 18.9 __ push(tmp); 18.10 + __ push(tmp2); 18.11 + __ load_heap_oop(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); 18.12 __ get_thread(tmp); 18.13 - __ cmpptr(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); 18.14 + __ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); 18.15 + __ pop(tmp2); 18.16 __ pop(tmp); 18.17 __ jcc(Assembler::notEqual, call_patch); 18.18
19.1 --- a/src/cpu/x86/vm/globals_x86.hpp Fri Mar 25 11:29:30 2011 -0700 19.2 +++ b/src/cpu/x86/vm/globals_x86.hpp Fri Mar 25 18:19:22 2011 -0400 19.3 @@ -1,5 +1,5 @@ 19.4 /* 19.5 - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 19.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 19.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 19.8 * 19.9 * This code is free software; you can redistribute it and/or modify it 19.10 @@ -72,4 +72,6 @@ 19.11 19.12 define_pd_global(bool, UseMembar, false); 19.13 19.14 +// GC Ergo Flags 19.15 +define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread 19.16 #endif // CPU_X86_VM_GLOBALS_X86_HPP
20.1 --- a/src/cpu/x86/vm/relocInfo_x86.cpp Fri Mar 25 11:29:30 2011 -0700 20.2 +++ b/src/cpu/x86/vm/relocInfo_x86.cpp Fri Mar 25 18:19:22 2011 -0400 20.3 @@ -31,7 +31,7 @@ 20.4 #include "runtime/safepoint.hpp" 20.5 20.6 20.7 -void Relocation::pd_set_data_value(address x, intptr_t o) { 20.8 +void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { 20.9 #ifdef AMD64 20.10 x += o; 20.11 typedef Assembler::WhichOperand WhichOperand; 20.12 @@ -40,19 +40,35 @@ 20.13 which == Assembler::narrow_oop_operand || 20.14 which == Assembler::imm_operand, "format unpacks ok"); 20.15 if (which == Assembler::imm_operand) { 20.16 - *pd_address_in_code() = x; 20.17 + if (verify_only) { 20.18 + assert(*pd_address_in_code() == x, "instructions must match"); 20.19 + } else { 20.20 + *pd_address_in_code() = x; 20.21 + } 20.22 } else if (which == Assembler::narrow_oop_operand) { 20.23 address disp = Assembler::locate_operand(addr(), which); 20.24 - *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x); 20.25 + if (verify_only) { 20.26 + assert(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match"); 20.27 + } else { 20.28 + *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x); 20.29 + } 20.30 } else { 20.31 // Note: Use runtime_call_type relocations for call32_operand. 20.32 address ip = addr(); 20.33 address disp = Assembler::locate_operand(ip, which); 20.34 address next_ip = Assembler::locate_next_instruction(ip); 20.35 - *(int32_t*) disp = x - next_ip; 20.36 + if (verify_only) { 20.37 + assert(*(int32_t*) disp == (x - next_ip), "instructions must match"); 20.38 + } else { 20.39 + *(int32_t*) disp = x - next_ip; 20.40 + } 20.41 } 20.42 #else 20.43 - *pd_address_in_code() = x + o; 20.44 + if (verify_only) { 20.45 + assert(*pd_address_in_code() == (x + o), "instructions must match"); 20.46 + } else { 20.47 + *pd_address_in_code() = x + o; 20.48 + } 20.49 #endif // AMD64 20.50 } 20.51
21.1 --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Fri Mar 25 11:29:30 2011 -0700 21.2 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Fri Mar 25 18:19:22 2011 -0400 21.3 @@ -1,5 +1,5 @@ 21.4 /* 21.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 21.6 + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 21.7 * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. 21.8 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 21.9 * 21.10 @@ -281,7 +281,7 @@ 21.11 21.12 if (method->is_static()) { 21.13 istate->set_oop_temp( 21.14 - method->constants()->pool_holder()->klass_part()->java_mirror()); 21.15 + method->constants()->pool_holder()->java_mirror()); 21.16 mirror = istate->oop_temp_addr(); 21.17 *(dst++) = &mirror; 21.18 } 21.19 @@ -667,7 +667,7 @@ 21.20 (BasicObjectLock *) stack->alloc(monitor_words * wordSize); 21.21 oop object; 21.22 if (method->is_static()) 21.23 - object = method->constants()->pool_holder()->klass_part()->java_mirror(); 21.24 + object = method->constants()->pool_holder()->java_mirror(); 21.25 else 21.26 object = (oop) locals[0]; 21.27 monitor->set_obj(object);
22.1 --- a/src/os/windows/vm/os_windows.cpp Fri Mar 25 11:29:30 2011 -0700 22.2 +++ b/src/os/windows/vm/os_windows.cpp Fri Mar 25 18:19:22 2011 -0400 22.3 @@ -3297,9 +3297,14 @@ 22.4 "possibility of dangling Thread pointer"); 22.5 22.6 OSThread* osthread = thread->osthread(); 22.7 - bool interrupted; 22.8 - interrupted = osthread->interrupted(); 22.9 - if (clear_interrupted == true) { 22.10 + bool interrupted = osthread->interrupted(); 22.11 + // There is no synchronization between the setting of the interrupt 22.12 + // and it being cleared here. It is critical - see 6535709 - that 22.13 + // we only clear the interrupt state, and reset the interrupt event, 22.14 + // if we are going to report that we were indeed interrupted - else 22.15 + // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 22.16 + // depending on the timing 22.17 + if (interrupted && clear_interrupted) { 22.18 osthread->set_interrupted(false); 22.19 ResetEvent(osthread->interrupt_event()); 22.20 } // Otherwise leave the interrupted state alone
23.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp Fri Mar 25 11:29:30 2011 -0700 23.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Fri Mar 25 18:19:22 2011 -0400 23.3 @@ -1471,9 +1471,9 @@ 23.4 if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) { 23.5 if (state_before != NULL) { 23.6 // build a patching constant 23.7 - obj = new Constant(new ClassConstant(holder), state_before); 23.8 + obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before); 23.9 } else { 23.10 - obj = new Constant(new ClassConstant(holder)); 23.11 + obj = new Constant(new InstanceConstant(holder->java_mirror())); 23.12 } 23.13 } 23.14
24.1 --- a/src/share/vm/c1/c1_Runtime1.cpp Fri Mar 25 11:29:30 2011 -0700 24.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp Fri Mar 25 18:19:22 2011 -0400 24.3 @@ -808,7 +808,7 @@ 24.4 { klassOop klass = resolve_field_return_klass(caller_method, bci, CHECK); 24.5 // Save a reference to the class that has to be checked for initialization 24.6 init_klass = KlassHandle(THREAD, klass); 24.7 - k = klass; 24.8 + k = klass->java_mirror(); 24.9 } 24.10 break; 24.11 case Bytecodes::_new:
25.1 --- a/src/share/vm/ci/ciCPCache.cpp Fri Mar 25 11:29:30 2011 -0700 25.2 +++ b/src/share/vm/ci/ciCPCache.cpp Fri Mar 25 18:19:22 2011 -0400 25.3 @@ -1,5 +1,5 @@ 25.4 /* 25.5 - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 25.6 + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. 25.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 25.8 * 25.9 * This code is free software; you can redistribute it and/or modify it 25.10 @@ -46,8 +46,7 @@ 25.11 // ciCPCache::is_f1_null_at 25.12 bool ciCPCache::is_f1_null_at(int index) { 25.13 VM_ENTRY_MARK; 25.14 - oop f1 = entry_at(index)->f1(); 25.15 - return (f1 == NULL); 25.16 + return entry_at(index)->is_f1_null(); 25.17 } 25.18 25.19
26.1 --- a/src/share/vm/ci/ciField.cpp Fri Mar 25 11:29:30 2011 -0700 26.2 +++ b/src/share/vm/ci/ciField.cpp Fri Mar 25 18:19:22 2011 -0400 26.3 @@ -213,7 +213,7 @@ 26.4 // may change. The three examples are java.lang.System.in, 26.5 // java.lang.System.out, and java.lang.System.err. 26.6 26.7 - Handle k = _holder->get_klassOop(); 26.8 + KlassHandle k = _holder->get_klassOop(); 26.9 assert( SystemDictionary::System_klass() != NULL, "Check once per vm"); 26.10 if( k() == SystemDictionary::System_klass() ) { 26.11 // Check offsets for case 2: System.in, System.out, or System.err 26.12 @@ -225,36 +225,38 @@ 26.13 } 26.14 } 26.15 26.16 + Handle mirror = k->java_mirror(); 26.17 + 26.18 _is_constant = true; 26.19 switch(type()->basic_type()) { 26.20 case T_BYTE: 26.21 - _constant_value = ciConstant(type()->basic_type(), k->byte_field(_offset)); 26.22 + _constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset)); 26.23 break; 26.24 case T_CHAR: 26.25 - _constant_value = ciConstant(type()->basic_type(), k->char_field(_offset)); 26.26 + _constant_value = ciConstant(type()->basic_type(), mirror->char_field(_offset)); 26.27 break; 26.28 case T_SHORT: 26.29 - _constant_value = ciConstant(type()->basic_type(), k->short_field(_offset)); 26.30 + _constant_value = ciConstant(type()->basic_type(), mirror->short_field(_offset)); 26.31 break; 26.32 case T_BOOLEAN: 26.33 - _constant_value = ciConstant(type()->basic_type(), k->bool_field(_offset)); 26.34 + _constant_value = ciConstant(type()->basic_type(), mirror->bool_field(_offset)); 26.35 break; 26.36 case T_INT: 26.37 - _constant_value = ciConstant(type()->basic_type(), k->int_field(_offset)); 26.38 + _constant_value = ciConstant(type()->basic_type(), mirror->int_field(_offset)); 26.39 break; 26.40 case T_FLOAT: 26.41 - _constant_value = ciConstant(k->float_field(_offset)); 26.42 + _constant_value = ciConstant(mirror->float_field(_offset)); 26.43 break; 26.44 case T_DOUBLE: 26.45 - _constant_value = ciConstant(k->double_field(_offset)); 26.46 + _constant_value = ciConstant(mirror->double_field(_offset)); 26.47 break; 26.48 case T_LONG: 26.49 - _constant_value = ciConstant(k->long_field(_offset)); 26.50 + _constant_value = ciConstant(mirror->long_field(_offset)); 26.51 break; 26.52 case T_OBJECT: 26.53 case T_ARRAY: 26.54 { 26.55 - oop o = k->obj_field(_offset); 26.56 + oop o = mirror->obj_field(_offset); 26.57 26.58 // A field will be "constant" if it is known always to be 26.59 // a non-null reference to an instance of a particular class,
27.1 --- a/src/share/vm/ci/ciInstance.cpp Fri Mar 25 11:29:30 2011 -0700 27.2 +++ b/src/share/vm/ci/ciInstance.cpp Fri Mar 25 18:19:22 2011 -0400 27.3 @@ -1,5 +1,5 @@ 27.4 /* 27.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 27.6 + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 27.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 27.8 * 27.9 * This code is free software; you can redistribute it and/or modify it 27.10 @@ -138,3 +138,9 @@ 27.11 st->print(" type="); 27.12 klass()->print(st); 27.13 } 27.14 + 27.15 + 27.16 +ciKlass* ciInstance::java_lang_Class_klass() { 27.17 + VM_ENTRY_MARK; 27.18 + return CURRENT_ENV->get_object(java_lang_Class::as_klassOop(get_oop()))->as_klass(); 27.19 +}
28.1 --- a/src/share/vm/ci/ciInstance.hpp Fri Mar 25 11:29:30 2011 -0700 28.2 +++ b/src/share/vm/ci/ciInstance.hpp Fri Mar 25 18:19:22 2011 -0400 28.3 @@ -1,5 +1,5 @@ 28.4 /* 28.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 28.6 + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 28.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 28.8 * 28.9 * This code is free software; you can redistribute it and/or modify it 28.10 @@ -64,6 +64,8 @@ 28.11 28.12 // Constant value of a field at the specified offset. 28.13 ciConstant field_value_by_offset(int field_offset); 28.14 + 28.15 + ciKlass* java_lang_Class_klass(); 28.16 }; 28.17 28.18 #endif // SHARE_VM_CI_CIINSTANCE_HPP
29.1 --- a/src/share/vm/ci/ciInstanceKlass.cpp Fri Mar 25 11:29:30 2011 -0700 29.2 +++ b/src/share/vm/ci/ciInstanceKlass.cpp Fri Mar 25 18:19:22 2011 -0400 29.3 @@ -1,5 +1,5 @@ 29.4 /* 29.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 29.6 + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 29.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 29.8 * 29.9 * This code is free software; you can redistribute it and/or modify it 29.10 @@ -85,7 +85,6 @@ 29.11 if (h_k() != SystemDictionary::Object_klass()) { 29.12 super(); 29.13 } 29.14 - java_mirror(); 29.15 //compute_nonstatic_fields(); // done outside of constructor 29.16 } 29.17 29.18 @@ -320,6 +319,9 @@ 29.19 // Get the instance of java.lang.Class corresponding to this klass. 29.20 // Cache it on this->_java_mirror. 29.21 ciInstance* ciInstanceKlass::java_mirror() { 29.22 + if (is_shared()) { 29.23 + return ciKlass::java_mirror(); 29.24 + } 29.25 if (_java_mirror == NULL) { 29.26 _java_mirror = ciKlass::java_mirror(); 29.27 }
30.1 --- a/src/share/vm/ci/ciObjectFactory.cpp Fri Mar 25 11:29:30 2011 -0700 30.2 +++ b/src/share/vm/ci/ciObjectFactory.cpp Fri Mar 25 18:19:22 2011 -0400 30.3 @@ -663,7 +663,7 @@ 30.4 if (key->is_perm() && _non_perm_count == 0) { 30.5 return emptyBucket; 30.6 } else if (key->is_instance()) { 30.7 - if (key->klass() == SystemDictionary::Class_klass()) { 30.8 + if (key->klass() == SystemDictionary::Class_klass() && JavaObjectsInPerm) { 30.9 // class mirror instances are always perm 30.10 return emptyBucket; 30.11 }
31.1 --- a/src/share/vm/classfile/classFileParser.cpp Fri Mar 25 11:29:30 2011 -0700 31.2 +++ b/src/share/vm/classfile/classFileParser.cpp Fri Mar 25 18:19:22 2011 -0400 31.3 @@ -37,6 +37,7 @@ 31.4 #include "memory/universe.inline.hpp" 31.5 #include "oops/constantPoolOop.hpp" 31.6 #include "oops/instanceKlass.hpp" 31.7 +#include "oops/instanceMirrorKlass.hpp" 31.8 #include "oops/klass.inline.hpp" 31.9 #include "oops/klassOop.hpp" 31.10 #include "oops/klassVtable.hpp" 31.11 @@ -2606,54 +2607,6 @@ 31.12 } 31.13 31.14 31.15 -static void initialize_static_field(fieldDescriptor* fd, TRAPS) { 31.16 - KlassHandle h_k (THREAD, fd->field_holder()); 31.17 - assert(h_k.not_null() && fd->is_static(), "just checking"); 31.18 - if (fd->has_initial_value()) { 31.19 - BasicType t = fd->field_type(); 31.20 - switch (t) { 31.21 - case T_BYTE: 31.22 - h_k()->byte_field_put(fd->offset(), fd->int_initial_value()); 31.23 - break; 31.24 - case T_BOOLEAN: 31.25 - h_k()->bool_field_put(fd->offset(), fd->int_initial_value()); 31.26 - break; 31.27 - case T_CHAR: 31.28 - h_k()->char_field_put(fd->offset(), fd->int_initial_value()); 31.29 - break; 31.30 - case T_SHORT: 31.31 - h_k()->short_field_put(fd->offset(), fd->int_initial_value()); 31.32 - break; 31.33 - case T_INT: 31.34 - h_k()->int_field_put(fd->offset(), fd->int_initial_value()); 31.35 - break; 31.36 - case T_FLOAT: 31.37 - h_k()->float_field_put(fd->offset(), fd->float_initial_value()); 31.38 - break; 31.39 - case T_DOUBLE: 31.40 - h_k()->double_field_put(fd->offset(), fd->double_initial_value()); 31.41 - break; 31.42 - case T_LONG: 31.43 - h_k()->long_field_put(fd->offset(), fd->long_initial_value()); 31.44 - break; 31.45 - case T_OBJECT: 31.46 - { 31.47 - #ifdef ASSERT 31.48 - TempNewSymbol sym = SymbolTable::new_symbol("Ljava/lang/String;", CHECK); 31.49 - assert(fd->signature() == sym, "just checking"); 31.50 - #endif 31.51 - oop string = fd->string_initial_value(CHECK); 31.52 - h_k()->obj_field_put(fd->offset(), string); 31.53 - } 31.54 - break; 31.55 - default: 31.56 - THROW_MSG(vmSymbols::java_lang_ClassFormatError(), 31.57 - "Illegal ConstantValue attribute in class file"); 31.58 - } 31.59 - } 31.60 -} 31.61 - 31.62 - 31.63 void ClassFileParser::java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr, 31.64 constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS) { 31.65 // This code is for compatibility with earlier jdk's that do not 31.66 @@ -2769,8 +2722,8 @@ 31.67 } 31.68 31.69 31.70 -void ClassFileParser::java_lang_Class_fix_pre(objArrayHandle* methods_ptr, 31.71 - FieldAllocationCount *fac_ptr, TRAPS) { 31.72 +void ClassFileParser::java_lang_Class_fix_pre(int* nonstatic_field_size, 31.73 + FieldAllocationCount *fac_ptr) { 31.74 // Add fake fields for java.lang.Class instances 31.75 // 31.76 // This is not particularly nice. We should consider adding a 31.77 @@ -2787,10 +2740,13 @@ 31.78 // versions because when the offsets are computed at bootstrap 31.79 // time we don't know yet which version of the JDK we're running in. 31.80 31.81 - // The values below are fake but will force two non-static oop fields and 31.82 + // The values below are fake but will force three non-static oop fields and 31.83 // a corresponding non-static oop map block to be allocated. 31.84 const int extra = java_lang_Class::number_of_fake_oop_fields; 31.85 fac_ptr->nonstatic_oop_count += extra; 31.86 + 31.87 + // Reserve some leading space for fake ints 31.88 + *nonstatic_field_size += align_size_up(java_lang_Class::hc_number_of_fake_int_fields * BytesPerInt, heapOopSize) / heapOopSize; 31.89 } 31.90 31.91 31.92 @@ -3205,9 +3161,7 @@ 31.93 int next_nonstatic_field_offset; 31.94 31.95 // Calculate the starting byte offsets 31.96 - next_static_oop_offset = (instanceKlass::header_size() + 31.97 - align_object_offset(vtable_size) + 31.98 - align_object_offset(itable_size)) * wordSize; 31.99 + next_static_oop_offset = instanceMirrorKlass::offset_of_static_fields(); 31.100 next_static_double_offset = next_static_oop_offset + 31.101 (fac.static_oop_count * heapOopSize); 31.102 if ( fac.static_double_count && 31.103 @@ -3226,15 +3180,16 @@ 31.104 fac.static_byte_count ), wordSize ); 31.105 static_field_size = (next_static_type_offset - 31.106 next_static_oop_offset) / wordSize; 31.107 + 31.108 + // Add fake fields for java.lang.Class instances (also see below) 31.109 + if (class_name == vmSymbols::java_lang_Class() && class_loader.is_null()) { 31.110 + java_lang_Class_fix_pre(&nonstatic_field_size, &fac); 31.111 + } 31.112 + 31.113 first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() + 31.114 nonstatic_field_size * heapOopSize; 31.115 next_nonstatic_field_offset = first_nonstatic_field_offset; 31.116 31.117 - // Add fake fields for java.lang.Class instances (also see below) 31.118 - if (class_name == vmSymbols::java_lang_Class() && class_loader.is_null()) { 31.119 - java_lang_Class_fix_pre(&methods, &fac, CHECK_(nullHandle)); 31.120 - } 31.121 - 31.122 // adjust the vmentry field declaration in java.lang.invoke.MethodHandle 31.123 if (EnableMethodHandles && class_name == vmSymbols::java_lang_invoke_MethodHandle() && class_loader.is_null()) { 31.124 java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle)); 31.125 @@ -3566,7 +3521,7 @@ 31.126 } 31.127 31.128 // We can now create the basic klassOop for this klass 31.129 - klassOop ik = oopFactory::new_instanceKlass(vtable_size, itable_size, 31.130 + klassOop ik = oopFactory::new_instanceKlass(name, vtable_size, itable_size, 31.131 static_field_size, 31.132 total_oop_map_count, 31.133 rt, CHECK_(nullHandle)); 31.134 @@ -3588,7 +3543,7 @@ 31.135 this_klass->set_class_loader(class_loader()); 31.136 this_klass->set_nonstatic_field_size(nonstatic_field_size); 31.137 this_klass->set_has_nonstatic_fields(has_nonstatic_fields); 31.138 - this_klass->set_static_oop_field_size(fac.static_oop_count); 31.139 + this_klass->set_static_oop_field_count(fac.static_oop_count); 31.140 cp->set_pool_holder(this_klass()); 31.141 error_handler.set_in_error(false); // turn off error handler for cp 31.142 this_klass->set_constants(cp()); 31.143 @@ -3649,9 +3604,6 @@ 31.144 // Make sure this is the end of class file stream 31.145 guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle)); 31.146 31.147 - // Initialize static fields 31.148 - this_klass->do_local_static_fields(&initialize_static_field, CHECK_(nullHandle)); 31.149 - 31.150 // VerifyOops believes that once this has been set, the object is completely loaded. 31.151 // Compute transitive closure of interfaces this class implements 31.152 this_klass->set_transitive_interfaces(transitive_interfaces()); 31.153 @@ -3685,6 +3637,9 @@ 31.154 check_illegal_static_method(this_klass, CHECK_(nullHandle)); 31.155 } 31.156 31.157 + // Allocate mirror and initialize static fields 31.158 + java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle)); 31.159 + 31.160 ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()), 31.161 false /* not shared class */); 31.162
32.1 --- a/src/share/vm/classfile/classFileParser.hpp Fri Mar 25 11:29:30 2011 -0700 32.2 +++ b/src/share/vm/classfile/classFileParser.hpp Fri Mar 25 18:19:22 2011 -0400 32.3 @@ -154,11 +154,12 @@ 32.4 // Add the "discovered" field to java.lang.ref.Reference if 32.5 // it does not exist. 32.6 void java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr, 32.7 - constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS); 32.8 + constantPoolHandle cp, 32.9 + FieldAllocationCount *fac_ptr, TRAPS); 32.10 // Adjust the field allocation counts for java.lang.Class to add 32.11 // fake fields. 32.12 - void java_lang_Class_fix_pre(objArrayHandle* methods_ptr, 32.13 - FieldAllocationCount *fac_ptr, TRAPS); 32.14 + void java_lang_Class_fix_pre(int* nonstatic_field_size, 32.15 + FieldAllocationCount *fac_ptr); 32.16 // Adjust the next_nonstatic_oop_offset to place the fake fields 32.17 // before any Java fields. 32.18 void java_lang_Class_fix_post(int* next_nonstatic_oop_offset);
33.1 --- a/src/share/vm/classfile/javaClasses.cpp Fri Mar 25 11:29:30 2011 -0700 33.2 +++ b/src/share/vm/classfile/javaClasses.cpp Fri Mar 25 18:19:22 2011 -0400 33.3 @@ -33,6 +33,7 @@ 33.4 #include "memory/resourceArea.hpp" 33.5 #include "memory/universe.inline.hpp" 33.6 #include "oops/instanceKlass.hpp" 33.7 +#include "oops/instanceMirrorKlass.hpp" 33.8 #include "oops/klass.hpp" 33.9 #include "oops/klassOop.hpp" 33.10 #include "oops/methodOop.hpp" 33.11 @@ -161,7 +162,7 @@ 33.12 } 33.13 33.14 Handle java_lang_String::create_tenured_from_unicode(jchar* unicode, int length, TRAPS) { 33.15 - return basic_create_from_unicode(unicode, length, true, CHECK_NH); 33.16 + return basic_create_from_unicode(unicode, length, JavaObjectsInPerm, CHECK_NH); 33.17 } 33.18 33.19 oop java_lang_String::create_oop_from_unicode(jchar* unicode, int length, TRAPS) { 33.20 @@ -391,6 +392,75 @@ 33.21 } 33.22 } 33.23 33.24 +static void initialize_static_field(fieldDescriptor* fd, TRAPS) { 33.25 + Handle mirror (THREAD, fd->field_holder()->java_mirror()); 33.26 + assert(mirror.not_null() && fd->is_static(), "just checking"); 33.27 + if (fd->has_initial_value()) { 33.28 + BasicType t = fd->field_type(); 33.29 + switch (t) { 33.30 + case T_BYTE: 33.31 + mirror()->byte_field_put(fd->offset(), fd->int_initial_value()); 33.32 + break; 33.33 + case T_BOOLEAN: 33.34 + mirror()->bool_field_put(fd->offset(), fd->int_initial_value()); 33.35 + break; 33.36 + case T_CHAR: 33.37 + mirror()->char_field_put(fd->offset(), fd->int_initial_value()); 33.38 + break; 33.39 + case T_SHORT: 33.40 + mirror()->short_field_put(fd->offset(), fd->int_initial_value()); 33.41 + break; 33.42 + case T_INT: 33.43 + mirror()->int_field_put(fd->offset(), fd->int_initial_value()); 33.44 + break; 33.45 + case T_FLOAT: 33.46 + mirror()->float_field_put(fd->offset(), fd->float_initial_value()); 33.47 + break; 33.48 + case T_DOUBLE: 33.49 + mirror()->double_field_put(fd->offset(), fd->double_initial_value()); 33.50 + break; 33.51 + case T_LONG: 33.52 + mirror()->long_field_put(fd->offset(), fd->long_initial_value()); 33.53 + break; 33.54 + case T_OBJECT: 33.55 + { 33.56 + #ifdef ASSERT 33.57 + TempNewSymbol sym = SymbolTable::new_symbol("Ljava/lang/String;", CHECK); 33.58 + assert(fd->signature() == sym, "just checking"); 33.59 + #endif 33.60 + oop string = fd->string_initial_value(CHECK); 33.61 + mirror()->obj_field_put(fd->offset(), string); 33.62 + } 33.63 + break; 33.64 + default: 33.65 + THROW_MSG(vmSymbols::java_lang_ClassFormatError(), 33.66 + "Illegal ConstantValue attribute in class file"); 33.67 + } 33.68 + } 33.69 +} 33.70 + 33.71 + 33.72 +// During bootstrap, java.lang.Class wasn't loaded so static field 33.73 +// offsets were computed without the size added it. Go back and 33.74 +// update all the static field offsets to included the size. 33.75 +static void fixup_static_field(fieldDescriptor* fd, TRAPS) { 33.76 + if (fd->is_static()) { 33.77 + int real_offset = fd->offset() + instanceMirrorKlass::offset_of_static_fields(); 33.78 + typeArrayOop fields = instanceKlass::cast(fd->field_holder())->fields(); 33.79 + fields->short_at_put(fd->index() + instanceKlass::low_offset, extract_low_short_from_int(real_offset)); 33.80 + fields->short_at_put(fd->index() + instanceKlass::high_offset, extract_high_short_from_int(real_offset)); 33.81 + } 33.82 +} 33.83 + 33.84 +void java_lang_Class::fixup_mirror(KlassHandle k, TRAPS) { 33.85 + assert(instanceMirrorKlass::offset_of_static_fields() != 0, "must have been computed already"); 33.86 + 33.87 + if (k->oop_is_instance()) { 33.88 + // Fixup the offsets 33.89 + instanceKlass::cast(k())->do_local_static_fields(&fixup_static_field, CHECK); 33.90 + } 33.91 + create_mirror(k, CHECK); 33.92 +} 33.93 33.94 oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) { 33.95 assert(k->java_mirror() == NULL, "should only assign mirror once"); 33.96 @@ -400,12 +470,17 @@ 33.97 // class is put into the system dictionary. 33.98 int computed_modifiers = k->compute_modifier_flags(CHECK_0); 33.99 k->set_modifier_flags(computed_modifiers); 33.100 - if (SystemDictionary::Class_klass_loaded()) { 33.101 + if (SystemDictionary::Class_klass_loaded() && (k->oop_is_instance() || k->oop_is_javaArray())) { 33.102 // Allocate mirror (java.lang.Class instance) 33.103 - Handle mirror = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0); 33.104 + Handle mirror = instanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK_0); 33.105 // Setup indirections 33.106 mirror->obj_field_put(klass_offset, k()); 33.107 k->set_java_mirror(mirror()); 33.108 + 33.109 + instanceMirrorKlass* mk = instanceMirrorKlass::cast(mirror->klass()); 33.110 + java_lang_Class::set_oop_size(mirror(), mk->instance_size(k)); 33.111 + java_lang_Class::set_static_oop_field_count(mirror(), mk->compute_static_oop_field_count(mirror())); 33.112 + 33.113 // It might also have a component mirror. This mirror must already exist. 33.114 if (k->oop_is_javaArray()) { 33.115 Handle comp_mirror; 33.116 @@ -428,6 +503,9 @@ 33.117 arrayKlass::cast(k->as_klassOop())->set_component_mirror(comp_mirror()); 33.118 set_array_klass(comp_mirror(), k->as_klassOop()); 33.119 } 33.120 + } else if (k->oop_is_instance()) { 33.121 + // Initialize static fields 33.122 + instanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL); 33.123 } 33.124 return mirror(); 33.125 } else { 33.126 @@ -436,21 +514,46 @@ 33.127 } 33.128 33.129 33.130 + 33.131 +int java_lang_Class::oop_size(oop java_class) { 33.132 + assert(oop_size_offset != 0, "must be set"); 33.133 + return java_class->int_field(oop_size_offset); 33.134 +} 33.135 +void java_lang_Class::set_oop_size(oop java_class, int size) { 33.136 + assert(oop_size_offset != 0, "must be set"); 33.137 + java_class->int_field_put(oop_size_offset, size); 33.138 +} 33.139 +int java_lang_Class::static_oop_field_count(oop java_class) { 33.140 + assert(static_oop_field_count_offset != 0, "must be set"); 33.141 + return java_class->int_field(static_oop_field_count_offset); 33.142 +} 33.143 +void java_lang_Class::set_static_oop_field_count(oop java_class, int size) { 33.144 + assert(static_oop_field_count_offset != 0, "must be set"); 33.145 + java_class->int_field_put(static_oop_field_count_offset, size); 33.146 +} 33.147 + 33.148 + 33.149 + 33.150 + 33.151 oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) { 33.152 // This should be improved by adding a field at the Java level or by 33.153 // introducing a new VM klass (see comment in ClassFileParser) 33.154 - oop java_class = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0); 33.155 + oop java_class = instanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance((oop)NULL, CHECK_0); 33.156 if (type != T_VOID) { 33.157 klassOop aklass = Universe::typeArrayKlassObj(type); 33.158 assert(aklass != NULL, "correct bootstrap"); 33.159 set_array_klass(java_class, aklass); 33.160 } 33.161 + instanceMirrorKlass* mk = instanceMirrorKlass::cast(SystemDictionary::Class_klass()); 33.162 + java_lang_Class::set_oop_size(java_class, mk->instance_size(oop(NULL))); 33.163 + java_lang_Class::set_static_oop_field_count(java_class, 0); 33.164 return java_class; 33.165 } 33.166 33.167 33.168 klassOop java_lang_Class::as_klassOop(oop java_class) { 33.169 //%note memory_2 33.170 + assert(java_lang_Class::is_instance(java_class), "must be a Class object"); 33.171 klassOop k = klassOop(java_class->obj_field(klass_offset)); 33.172 assert(k == NULL || k->is_klass(), "type check"); 33.173 return k; 33.174 @@ -2152,7 +2255,7 @@ 33.175 // Support for java_lang_ref_Reference 33.176 oop java_lang_ref_Reference::pending_list_lock() { 33.177 instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass()); 33.178 - char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset); 33.179 + address addr = ik->static_field_addr(static_lock_offset); 33.180 if (UseCompressedOops) { 33.181 return oopDesc::load_decode_heap_oop((narrowOop *)addr); 33.182 } else { 33.183 @@ -2162,7 +2265,7 @@ 33.184 33.185 HeapWord *java_lang_ref_Reference::pending_list_addr() { 33.186 instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass()); 33.187 - char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset); 33.188 + address addr = ik->static_field_addr(static_pending_offset); 33.189 // XXX This might not be HeapWord aligned, almost rather be char *. 33.190 return (HeapWord*)addr; 33.191 } 33.192 @@ -2185,16 +2288,14 @@ 33.193 33.194 jlong java_lang_ref_SoftReference::clock() { 33.195 instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass()); 33.196 - int offset = ik->offset_of_static_fields() + static_clock_offset; 33.197 - 33.198 - return SystemDictionary::SoftReference_klass()->long_field(offset); 33.199 + jlong* offset = (jlong*)ik->static_field_addr(static_clock_offset); 33.200 + return *offset; 33.201 } 33.202 33.203 void java_lang_ref_SoftReference::set_clock(jlong value) { 33.204 instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass()); 33.205 - int offset = ik->offset_of_static_fields() + static_clock_offset; 33.206 - 33.207 - SystemDictionary::SoftReference_klass()->long_field_put(offset, value); 33.208 + jlong* offset = (jlong*)ik->static_field_addr(static_clock_offset); 33.209 + *offset = value; 33.210 } 33.211 33.212 33.213 @@ -2625,26 +2726,18 @@ 33.214 33.215 33.216 // Support for java_lang_System 33.217 - 33.218 -void java_lang_System::compute_offsets() { 33.219 - assert(offset_of_static_fields == 0, "offsets should be initialized only once"); 33.220 - 33.221 - instanceKlass* ik = instanceKlass::cast(SystemDictionary::System_klass()); 33.222 - offset_of_static_fields = ik->offset_of_static_fields(); 33.223 +int java_lang_System::in_offset_in_bytes() { 33.224 + return (instanceMirrorKlass::offset_of_static_fields() + static_in_offset); 33.225 } 33.226 33.227 -int java_lang_System::in_offset_in_bytes() { 33.228 - return (offset_of_static_fields + static_in_offset); 33.229 + 33.230 +int java_lang_System::out_offset_in_bytes() { 33.231 + return (instanceMirrorKlass::offset_of_static_fields() + static_out_offset); 33.232 } 33.233 33.234 33.235 -int java_lang_System::out_offset_in_bytes() { 33.236 - return (offset_of_static_fields + static_out_offset); 33.237 -} 33.238 - 33.239 - 33.240 int java_lang_System::err_offset_in_bytes() { 33.241 - return (offset_of_static_fields + static_err_offset); 33.242 + return (instanceMirrorKlass::offset_of_static_fields() + static_err_offset); 33.243 } 33.244 33.245 33.246 @@ -2657,6 +2750,8 @@ 33.247 int java_lang_Class::array_klass_offset; 33.248 int java_lang_Class::resolved_constructor_offset; 33.249 int java_lang_Class::number_of_fake_oop_fields; 33.250 +int java_lang_Class::oop_size_offset; 33.251 +int java_lang_Class::static_oop_field_count_offset; 33.252 int java_lang_Throwable::backtrace_offset; 33.253 int java_lang_Throwable::detailMessage_offset; 33.254 int java_lang_Throwable::cause_offset; 33.255 @@ -2700,7 +2795,6 @@ 33.256 int java_lang_ref_SoftReference::timestamp_offset; 33.257 int java_lang_ref_SoftReference::static_clock_offset; 33.258 int java_lang_ClassLoader::parent_offset; 33.259 -int java_lang_System::offset_of_static_fields; 33.260 int java_lang_System::static_in_offset; 33.261 int java_lang_System::static_out_offset; 33.262 int java_lang_System::static_err_offset; 33.263 @@ -2817,10 +2911,19 @@ 33.264 java_lang_String::count_offset = java_lang_String::offset_offset + sizeof (jint); 33.265 java_lang_String::hash_offset = java_lang_String::count_offset + sizeof (jint); 33.266 33.267 - // Do the Class Class 33.268 - java_lang_Class::klass_offset = java_lang_Class::hc_klass_offset * x + header; 33.269 - java_lang_Class::array_klass_offset = java_lang_Class::hc_array_klass_offset * x + header; 33.270 - java_lang_Class::resolved_constructor_offset = java_lang_Class::hc_resolved_constructor_offset * x + header; 33.271 + { 33.272 + // Do the Class Class 33.273 + int offset = header; 33.274 + java_lang_Class::oop_size_offset = header; 33.275 + offset += BytesPerInt; 33.276 + java_lang_Class::static_oop_field_count_offset = offset; 33.277 + offset = align_size_up(offset + BytesPerInt, x); 33.278 + java_lang_Class::klass_offset = offset; 33.279 + offset += x; 33.280 + java_lang_Class::array_klass_offset = offset; 33.281 + offset += x; 33.282 + java_lang_Class::resolved_constructor_offset = offset; 33.283 + } 33.284 33.285 // This is NOT an offset 33.286 java_lang_Class::number_of_fake_oop_fields = java_lang_Class::hc_number_of_fake_oop_fields; 33.287 @@ -2877,7 +2980,6 @@ 33.288 void JavaClasses::compute_offsets() { 33.289 33.290 java_lang_Class::compute_offsets(); 33.291 - java_lang_System::compute_offsets(); 33.292 java_lang_Thread::compute_offsets(); 33.293 java_lang_ThreadGroup::compute_offsets(); 33.294 if (EnableMethodHandles) { 33.295 @@ -2961,10 +3063,10 @@ 33.296 tty->print_cr("Static field %s.%s appears to be nonstatic", klass_name, field_name); 33.297 return false; 33.298 } 33.299 - if (fd.offset() == hardcoded_offset + h_klass->offset_of_static_fields()) { 33.300 + if (fd.offset() == hardcoded_offset + instanceMirrorKlass::offset_of_static_fields()) { 33.301 return true; 33.302 } else { 33.303 - tty->print_cr("Offset of static field %s.%s is hardcoded as %d but should really be %d.", klass_name, field_name, hardcoded_offset, fd.offset() - h_klass->offset_of_static_fields()); 33.304 + tty->print_cr("Offset of static field %s.%s is hardcoded as %d but should really be %d.", klass_name, field_name, hardcoded_offset, fd.offset() - instanceMirrorKlass::offset_of_static_fields()); 33.305 return false; 33.306 } 33.307 }
34.1 --- a/src/share/vm/classfile/javaClasses.hpp Fri Mar 25 11:29:30 2011 -0700 34.2 +++ b/src/share/vm/classfile/javaClasses.hpp Fri Mar 25 18:19:22 2011 -0400 34.3 @@ -138,10 +138,8 @@ 34.4 // The fake offsets are added by the class loader when java.lang.Class is loaded 34.5 34.6 enum { 34.7 - hc_klass_offset = 0, 34.8 - hc_array_klass_offset = 1, 34.9 - hc_resolved_constructor_offset = 2, 34.10 - hc_number_of_fake_oop_fields = 3 34.11 + hc_number_of_fake_oop_fields = 3, 34.12 + hc_number_of_fake_int_fields = 2 34.13 }; 34.14 34.15 static int klass_offset; 34.16 @@ -149,6 +147,9 @@ 34.17 static int array_klass_offset; 34.18 static int number_of_fake_oop_fields; 34.19 34.20 + static int oop_size_offset; 34.21 + static int static_oop_field_count_offset; 34.22 + 34.23 static void compute_offsets(); 34.24 static bool offsets_computed; 34.25 static int classRedefinedCount_offset; 34.26 @@ -157,6 +158,7 @@ 34.27 public: 34.28 // Instance creation 34.29 static oop create_mirror(KlassHandle k, TRAPS); 34.30 + static void fixup_mirror(KlassHandle k, TRAPS); 34.31 static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS); 34.32 // Conversion 34.33 static klassOop as_klassOop(oop java_class); 34.34 @@ -191,6 +193,12 @@ 34.35 static void set_classRedefinedCount(oop the_class_mirror, int value); 34.36 // Support for parallelCapable field 34.37 static bool parallelCapable(oop the_class_mirror); 34.38 + 34.39 + static int oop_size(oop java_class); 34.40 + static void set_oop_size(oop java_class, int size); 34.41 + static int static_oop_field_count(oop java_class); 34.42 + static void set_static_oop_field_count(oop java_class, int size); 34.43 + 34.44 // Debugging 34.45 friend class JavaClasses; 34.46 friend class instanceKlass; // verification code accesses offsets 34.47 @@ -1165,13 +1173,10 @@ 34.48 hc_static_err_offset = 2 34.49 }; 34.50 34.51 - static int offset_of_static_fields; 34.52 static int static_in_offset; 34.53 static int static_out_offset; 34.54 static int static_err_offset; 34.55 34.56 - static void compute_offsets(); 34.57 - 34.58 public: 34.59 static int in_offset_in_bytes(); 34.60 static int out_offset_in_bytes();
35.1 --- a/src/share/vm/classfile/symbolTable.cpp Fri Mar 25 11:29:30 2011 -0700 35.2 +++ b/src/share/vm/classfile/symbolTable.cpp Fri Mar 25 18:19:22 2011 -0400 35.3 @@ -530,7 +530,7 @@ 35.4 35.5 Handle string; 35.6 // try to reuse the string if possible 35.7 - if (!string_or_null.is_null() && string_or_null()->is_perm()) { 35.8 + if (!string_or_null.is_null() && (!JavaObjectsInPerm || string_or_null()->is_perm())) { 35.9 string = string_or_null; 35.10 } else { 35.11 string = java_lang_String::create_tenured_from_unicode(name, len, CHECK_NULL); 35.12 @@ -662,7 +662,7 @@ 35.13 for ( ; p != NULL; p = p->next()) { 35.14 oop s = p->literal(); 35.15 guarantee(s != NULL, "interned string is NULL"); 35.16 - guarantee(s->is_perm(), "interned string not in permspace"); 35.17 + guarantee(s->is_perm() || !JavaObjectsInPerm, "interned string not in permspace"); 35.18 35.19 int length; 35.20 jchar* chars = java_lang_String::as_unicode_string(s, length);
36.1 --- a/src/share/vm/classfile/symbolTable.hpp Fri Mar 25 11:29:30 2011 -0700 36.2 +++ b/src/share/vm/classfile/symbolTable.hpp Fri Mar 25 18:19:22 2011 -0400 36.3 @@ -216,18 +216,14 @@ 36.4 oop basic_add(int index, Handle string_or_null, jchar* name, int len, 36.5 unsigned int hashValue, TRAPS); 36.6 36.7 - // Table size 36.8 - enum { 36.9 - string_table_size = 1009 36.10 - }; 36.11 - 36.12 oop lookup(int index, jchar* chars, int length, unsigned int hashValue); 36.13 36.14 - StringTable() : Hashtable<oop>(string_table_size, sizeof (HashtableEntry<oop>)) {} 36.15 + StringTable() : Hashtable<oop>((int)StringTableSize, 36.16 + sizeof (HashtableEntry<oop>)) {} 36.17 36.18 StringTable(HashtableBucket* t, int number_of_entries) 36.19 - : Hashtable<oop>(string_table_size, sizeof (HashtableEntry<oop>), t, 36.20 - number_of_entries) {} 36.21 + : Hashtable<oop>((int)StringTableSize, sizeof (HashtableEntry<oop>), t, 36.22 + number_of_entries) {} 36.23 36.24 public: 36.25 // The string table 36.26 @@ -241,7 +237,7 @@ 36.27 static void create_table(HashtableBucket* t, int length, 36.28 int number_of_entries) { 36.29 assert(_the_table == NULL, "One string table allowed."); 36.30 - assert(length == string_table_size * sizeof(HashtableBucket), 36.31 + assert((size_t)length == StringTableSize * sizeof(HashtableBucket), 36.32 "bad shared string size."); 36.33 _the_table = new StringTable(t, number_of_entries); 36.34 }
37.1 --- a/src/share/vm/code/codeCache.cpp Fri Mar 25 11:29:30 2011 -0700 37.2 +++ b/src/share/vm/code/codeCache.cpp Fri Mar 25 18:19:22 2011 -0400 37.3 @@ -1,5 +1,5 @@ 37.4 /* 37.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 37.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 37.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 37.8 * 37.9 * This code is free software; you can redistribute it and/or modify it 37.10 @@ -337,7 +337,6 @@ 37.11 if (is_live) { 37.12 // Perform cur->oops_do(f), maybe just once per nmethod. 37.13 f->do_code_blob(cur); 37.14 - cur->fix_oop_relocations(); 37.15 } 37.16 } 37.17 37.18 @@ -552,6 +551,19 @@ 37.19 } 37.20 37.21 37.22 +void CodeCache::verify_oops() { 37.23 + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 37.24 + VerifyOopClosure voc; 37.25 + FOR_ALL_ALIVE_BLOBS(cb) { 37.26 + if (cb->is_nmethod()) { 37.27 + nmethod *nm = (nmethod*)cb; 37.28 + nm->oops_do(&voc); 37.29 + nm->verify_oop_relocations(); 37.30 + } 37.31 + } 37.32 +} 37.33 + 37.34 + 37.35 address CodeCache::first_address() { 37.36 assert_locked_or_safepoint(CodeCache_lock); 37.37 return (address)_heap->begin();
38.1 --- a/src/share/vm/code/codeCache.hpp Fri Mar 25 11:29:30 2011 -0700 38.2 +++ b/src/share/vm/code/codeCache.hpp Fri Mar 25 18:19:22 2011 -0400 38.3 @@ -122,6 +122,7 @@ 38.4 // GC support 38.5 static void gc_epilogue(); 38.6 static void gc_prologue(); 38.7 + static void verify_oops(); 38.8 // If "unloading_occurred" is true, then unloads (i.e., breaks root links 38.9 // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" 38.10 // to "true" iff some code got unloaded.
39.1 --- a/src/share/vm/code/nmethod.cpp Fri Mar 25 11:29:30 2011 -0700 39.2 +++ b/src/share/vm/code/nmethod.cpp Fri Mar 25 18:19:22 2011 -0400 39.3 @@ -653,6 +653,9 @@ 39.4 _pc_desc_cache.reset_to(NULL); 39.5 39.6 code_buffer->copy_oops_to(this); 39.7 + if (ScavengeRootsInCode && detect_scavenge_root_oops()) { 39.8 + CodeCache::add_scavenge_root_nmethod(this); 39.9 + } 39.10 debug_only(verify_scavenge_root_oops()); 39.11 CodeCache::commit(this); 39.12 } 39.13 @@ -1105,6 +1108,20 @@ 39.14 } 39.15 39.16 39.17 +void nmethod::verify_oop_relocations() { 39.18 + // Ensure sure that the code matches the current oop values 39.19 + RelocIterator iter(this, NULL, NULL); 39.20 + while (iter.next()) { 39.21 + if (iter.type() == relocInfo::oop_type) { 39.22 + oop_Relocation* reloc = iter.oop_reloc(); 39.23 + if (!reloc->oop_is_immediate()) { 39.24 + reloc->verify_oop_relocation(); 39.25 + } 39.26 + } 39.27 + } 39.28 +} 39.29 + 39.30 + 39.31 ScopeDesc* nmethod::scope_desc_at(address pc) { 39.32 PcDesc* pd = pc_desc_at(pc); 39.33 guarantee(pd != NULL, "scope must be present"); 39.34 @@ -1823,6 +1840,7 @@ 39.35 assert(cur != NULL, "not NULL-terminated"); 39.36 nmethod* next = cur->_oops_do_mark_link; 39.37 cur->_oops_do_mark_link = NULL; 39.38 + cur->fix_oop_relocations(); 39.39 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark\n")); 39.40 cur = next; 39.41 }
40.1 --- a/src/share/vm/code/nmethod.hpp Fri Mar 25 11:29:30 2011 -0700 40.2 +++ b/src/share/vm/code/nmethod.hpp Fri Mar 25 18:19:22 2011 -0400 40.3 @@ -459,6 +459,7 @@ 40.4 public: 40.5 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } 40.6 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } 40.7 + void verify_oop_relocations(); 40.8 40.9 bool is_at_poll_return(address pc); 40.10 bool is_at_poll_or_poll_return(address pc);
41.1 --- a/src/share/vm/code/relocInfo.cpp Fri Mar 25 11:29:30 2011 -0700 41.2 +++ b/src/share/vm/code/relocInfo.cpp Fri Mar 25 18:19:22 2011 -0400 41.3 @@ -798,6 +798,14 @@ 41.4 } 41.5 41.6 41.7 +void oop_Relocation::verify_oop_relocation() { 41.8 + if (!oop_is_immediate()) { 41.9 + // get the oop from the pool, and re-insert it into the instruction: 41.10 + verify_value(value()); 41.11 + } 41.12 +} 41.13 + 41.14 + 41.15 RelocIterator virtual_call_Relocation::parse_ic(nmethod* &nm, address &ic_call, address &first_oop, 41.16 oop* &oop_addr, bool *is_optimized) { 41.17 assert(ic_call != NULL, "ic_call address must be set");
42.1 --- a/src/share/vm/code/relocInfo.hpp Fri Mar 25 11:29:30 2011 -0700 42.2 +++ b/src/share/vm/code/relocInfo.hpp Fri Mar 25 18:19:22 2011 -0400 42.3 @@ -765,7 +765,8 @@ 42.4 42.5 protected: 42.6 // platform-dependent utilities for decoding and patching instructions 42.7 - void pd_set_data_value (address x, intptr_t off); // a set or mem-ref 42.8 + void pd_set_data_value (address x, intptr_t off, bool verify_only = false); // a set or mem-ref 42.9 + void pd_verify_data_value (address x, intptr_t off) { pd_set_data_value(x, off, true); } 42.10 address pd_call_destination (address orig_addr = NULL); 42.11 void pd_set_call_destination (address x); 42.12 void pd_swap_in_breakpoint (address x, short* instrs, int instrlen); 42.13 @@ -880,6 +881,12 @@ 42.14 else 42.15 pd_set_data_value(x, o); 42.16 } 42.17 + void verify_value(address x) { 42.18 + if (addr_in_const()) 42.19 + assert(*(address*)addr() == x, "must agree"); 42.20 + else 42.21 + pd_verify_data_value(x, offset()); 42.22 + } 42.23 42.24 // The "o" (displacement) argument is relevant only to split relocations 42.25 // on RISC machines. In some CPUs (SPARC), the set-hi and set-lo ins'ns 42.26 @@ -950,6 +957,8 @@ 42.27 42.28 void fix_oop_relocation(); // reasserts oop value 42.29 42.30 + void verify_oop_relocation(); 42.31 + 42.32 address value() { return (address) *oop_addr(); } 42.33 42.34 bool oop_is_immediate() { return oop_index() == 0; }
43.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Mar 25 11:29:30 2011 -0700 43.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Mar 25 18:19:22 2011 -0400 43.3 @@ -1,5 +1,5 @@ 43.4 /* 43.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 43.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 43.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 43.8 * 43.9 * This code is free software; you can redistribute it and/or modify it 43.10 @@ -292,13 +292,15 @@ 43.11 void CMSCollector::ref_processor_init() { 43.12 if (_ref_processor == NULL) { 43.13 // Allocate and initialize a reference processor 43.14 - _ref_processor = ReferenceProcessor::create_ref_processor( 43.15 - _span, // span 43.16 - _cmsGen->refs_discovery_is_atomic(), // atomic_discovery 43.17 - _cmsGen->refs_discovery_is_mt(), // mt_discovery 43.18 - &_is_alive_closure, 43.19 - ParallelGCThreads, 43.20 - ParallelRefProcEnabled); 43.21 + _ref_processor = 43.22 + new ReferenceProcessor(_span, // span 43.23 + (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing 43.24 + (int) ParallelGCThreads, // mt processing degree 43.25 + _cmsGen->refs_discovery_is_mt(), // mt discovery 43.26 + (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree 43.27 + _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic 43.28 + &_is_alive_closure, // closure for liveness info 43.29 + false); // next field updates do not need write barrier 43.30 // Initialize the _ref_processor field of CMSGen 43.31 _cmsGen->set_ref_processor(_ref_processor); 43.32 43.33 @@ -641,7 +643,7 @@ 43.34 } 43.35 43.36 // Support for multi-threaded concurrent phases 43.37 - if (CollectedHeap::use_parallel_gc_threads() && CMSConcurrentMTEnabled) { 43.38 + if (CMSConcurrentMTEnabled) { 43.39 if (FLAG_IS_DEFAULT(ConcGCThreads)) { 43.40 // just for now 43.41 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); 43.42 @@ -1689,6 +1691,8 @@ 43.43 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); 43.44 _full_gc_requested = true; 43.45 CGC_lock->notify(); // nudge CMS thread 43.46 + } else { 43.47 + assert(gc_count > full_gc_count, "Error: causal loop"); 43.48 } 43.49 } 43.50 43.51 @@ -1988,17 +1992,16 @@ 43.52 // Temporarily widen the span of the weak reference processing to 43.53 // the entire heap. 43.54 MemRegion new_span(GenCollectedHeap::heap()->reserved_region()); 43.55 - ReferenceProcessorSpanMutator x(ref_processor(), new_span); 43.56 - 43.57 + ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span); 43.58 // Temporarily, clear the "is_alive_non_header" field of the 43.59 // reference processor. 43.60 - ReferenceProcessorIsAliveMutator y(ref_processor(), NULL); 43.61 - 43.62 + ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL); 43.63 // Temporarily make reference _processing_ single threaded (non-MT). 43.64 - ReferenceProcessorMTProcMutator z(ref_processor(), false); 43.65 - 43.66 + ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false); 43.67 // Temporarily make refs discovery atomic 43.68 - ReferenceProcessorAtomicMutator w(ref_processor(), true); 43.69 + ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true); 43.70 + // Temporarily make reference _discovery_ single threaded (non-MT) 43.71 + ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); 43.72 43.73 ref_processor()->set_enqueuing_is_done(false); 43.74 ref_processor()->enable_discovery(); 43.75 @@ -4263,9 +4266,7 @@ 43.76 43.77 // Refs discovery is already non-atomic. 43.78 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); 43.79 - // Mutate the Refs discovery so it is MT during the 43.80 - // multi-threaded marking phase. 43.81 - ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1); 43.82 + assert(ref_processor()->discovery_is_mt(), "Discovery should be MT"); 43.83 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) 43.84 conc_workers()->start_task(&tsk); 43.85 while (tsk.yielded()) { 43.86 @@ -4318,6 +4319,8 @@ 43.87 ResourceMark rm; 43.88 HandleMark hm; 43.89 43.90 + // Temporarily make refs discovery single threaded (non-MT) 43.91 + ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); 43.92 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, 43.93 &_markStack, &_revisitStack, CMSYield && asynch); 43.94 // the last argument to iterate indicates whether the iteration 43.95 @@ -4356,10 +4359,6 @@ 43.96 verify_overflow_empty(); 43.97 _abort_preclean = false; 43.98 if (CMSPrecleaningEnabled) { 43.99 - // Precleaning is currently not MT but the reference processor 43.100 - // may be set for MT. Disable it temporarily here. 43.101 - ReferenceProcessor* rp = ref_processor(); 43.102 - ReferenceProcessorMTProcMutator z(rp, false); 43.103 _eden_chunk_index = 0; 43.104 size_t used = get_eden_used(); 43.105 size_t capacity = get_eden_capacity(); 43.106 @@ -4502,11 +4501,16 @@ 43.107 _collectorState == AbortablePreclean, "incorrect state"); 43.108 ResourceMark rm; 43.109 HandleMark hm; 43.110 + 43.111 + // Precleaning is currently not MT but the reference processor 43.112 + // may be set for MT. Disable it temporarily here. 43.113 + ReferenceProcessor* rp = ref_processor(); 43.114 + ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 43.115 + 43.116 // Do one pass of scrubbing the discovered reference lists 43.117 // to remove any reference objects with strongly-reachable 43.118 // referents. 43.119 if (clean_refs) { 43.120 - ReferenceProcessor* rp = ref_processor(); 43.121 CMSPrecleanRefsYieldClosure yield_cl(this); 43.122 assert(rp->span().equals(_span), "Spans should be equal"); 43.123 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, 43.124 @@ -5576,8 +5580,10 @@ 43.125 // in the multi-threaded case, but we special-case n=1 here to get 43.126 // repeatable measurements of the 1-thread overhead of the parallel code. 43.127 if (n_workers > 1) { 43.128 - // Make refs discovery MT-safe 43.129 - ReferenceProcessorMTMutator mt(ref_processor(), true); 43.130 + // Make refs discovery MT-safe, if it isn't already: it may not 43.131 + // necessarily be so, since it's possible that we are doing 43.132 + // ST marking. 43.133 + ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true); 43.134 GenCollectedHeap::StrongRootsScope srs(gch); 43.135 workers->run_task(&tsk); 43.136 } else { 43.137 @@ -5703,14 +5709,19 @@ 43.138 CMSBitMap* mark_bit_map, 43.139 AbstractWorkGang* workers, 43.140 OopTaskQueueSet* task_queues): 43.141 + // XXX Should superclass AGTWOQ also know about AWG since it knows 43.142 + // about the task_queues used by the AWG? Then it could initialize 43.143 + // the terminator() object. See 6984287. The set_for_termination() 43.144 + // below is a temporary band-aid for the regression in 6984287. 43.145 AbstractGangTaskWOopQueues("Process referents by policy in parallel", 43.146 task_queues), 43.147 _task(task), 43.148 _collector(collector), _span(span), _mark_bit_map(mark_bit_map) 43.149 - { 43.150 - assert(_collector->_span.equals(_span) && !_span.is_empty(), 43.151 - "Inconsistency in _span"); 43.152 - } 43.153 + { 43.154 + assert(_collector->_span.equals(_span) && !_span.is_empty(), 43.155 + "Inconsistency in _span"); 43.156 + set_for_termination(workers->active_workers()); 43.157 + } 43.158 43.159 OopTaskQueueSet* task_queues() { return queues(); } 43.160 43.161 @@ -5872,8 +5883,7 @@ 43.162 // That is OK as long as the Reference lists are balanced (see 43.163 // balance_all_queues() and balance_queues()). 43.164 43.165 - 43.166 - rp->set_mt_degree(ParallelGCThreads); 43.167 + rp->set_active_mt_degree(ParallelGCThreads); 43.168 CMSRefProcTaskExecutor task_executor(*this); 43.169 rp->process_discovered_references(&_is_alive_closure, 43.170 &cmsKeepAliveClosure, 43.171 @@ -5920,14 +5930,18 @@ 43.172 } 43.173 43.174 { 43.175 - TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty); 43.176 - // Now clean up stale oops in StringTable 43.177 - StringTable::unlink(&_is_alive_closure); 43.178 + TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty); 43.179 // Clean up unreferenced symbols in symbol table. 43.180 SymbolTable::unlink(); 43.181 } 43.182 } 43.183 43.184 + if (should_unload_classes() || !JavaObjectsInPerm) { 43.185 + TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); 43.186 + // Now clean up stale oops in StringTable 43.187 + StringTable::unlink(&_is_alive_closure); 43.188 + } 43.189 + 43.190 verify_work_stacks_empty(); 43.191 // Restore any preserved marks as a result of mark stack or 43.192 // work queue overflow
44.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Mar 25 11:29:30 2011 -0700 44.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Mar 25 18:19:22 2011 -0400 44.3 @@ -1,5 +1,5 @@ 44.4 /* 44.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 44.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 44.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 44.8 * 44.9 * This code is free software; you can redistribute it and/or modify it 44.10 @@ -1133,7 +1133,7 @@ 44.11 // rare that the cost of the CAS's involved is in the 44.12 // noise. That's a measurement that should be done, and 44.13 // the code simplified if that turns out to be the case. 44.14 - return false; 44.15 + return ConcGCThreads > 1; 44.16 } 44.17 44.18 // Override
45.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Fri Mar 25 11:29:30 2011 -0700 45.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Fri Mar 25 18:19:22 2011 -0400 45.3 @@ -1,5 +1,5 @@ 45.4 /* 45.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 45.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 45.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 45.8 * 45.9 * This code is free software; you can redistribute it and/or modify it 45.10 @@ -51,7 +51,7 @@ 45.11 volatile jint ConcurrentMarkSweepThread::_pending_yields = 0; 45.12 volatile jint ConcurrentMarkSweepThread::_pending_decrements = 0; 45.13 45.14 -volatile bool ConcurrentMarkSweepThread::_icms_enabled = false; 45.15 +volatile jint ConcurrentMarkSweepThread::_icms_disabled = 0; 45.16 volatile bool ConcurrentMarkSweepThread::_should_run = false; 45.17 // When icms is enabled, the icms thread is stopped until explicitly 45.18 // started. 45.19 @@ -84,7 +84,7 @@ 45.20 } 45.21 } 45.22 _sltMonitor = SLT_lock; 45.23 - set_icms_enabled(CMSIncrementalMode); 45.24 + assert(!CMSIncrementalMode || icms_is_enabled(), "Error"); 45.25 } 45.26 45.27 void ConcurrentMarkSweepThread::run() { 45.28 @@ -341,11 +341,11 @@ 45.29 45.30 void ConcurrentMarkSweepThread::icms_wait() { 45.31 assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking"); 45.32 - if (_should_stop && icms_enabled()) { 45.33 + if (_should_stop && icms_is_enabled()) { 45.34 MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag); 45.35 trace_state("pause_icms"); 45.36 _collector->stats().stop_cms_timer(); 45.37 - while(!_should_run && icms_enabled()) { 45.38 + while(!_should_run && icms_is_enabled()) { 45.39 iCMS_lock->wait(Mutex::_no_safepoint_check_flag); 45.40 } 45.41 _collector->stats().start_cms_timer();
46.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Fri Mar 25 11:29:30 2011 -0700 46.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Fri Mar 25 18:19:22 2011 -0400 46.3 @@ -1,5 +1,5 @@ 46.4 /* 46.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 46.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 46.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 46.8 * 46.9 * This code is free software; you can redistribute it and/or modify it 46.10 @@ -40,7 +40,7 @@ 46.11 class ConcurrentMarkSweepGeneration; 46.12 class CMSCollector; 46.13 46.14 -// The Concurrent Mark Sweep GC Thread (could be several in the future). 46.15 +// The Concurrent Mark Sweep GC Thread 46.16 class ConcurrentMarkSweepThread: public ConcurrentGCThread { 46.17 friend class VMStructs; 46.18 friend class ConcurrentMarkSweepGeneration; // XXX should remove friendship 46.19 @@ -55,8 +55,6 @@ 46.20 static SurrogateLockerThread::SLT_msg_type _sltBuffer; 46.21 static Monitor* _sltMonitor; 46.22 46.23 - ConcurrentMarkSweepThread* _next; 46.24 - 46.25 static bool _should_terminate; 46.26 46.27 enum CMS_flag_type { 46.28 @@ -84,7 +82,7 @@ 46.29 // Tracing messages, enabled by CMSTraceThreadState. 46.30 static inline void trace_state(const char* desc); 46.31 46.32 - static volatile bool _icms_enabled; // iCMS enabled? 46.33 + static volatile int _icms_disabled; // a counter to track #iCMS disable & enable 46.34 static volatile bool _should_run; // iCMS may run 46.35 static volatile bool _should_stop; // iCMS should stop 46.36 46.37 @@ -214,10 +212,25 @@ 46.38 46.39 // Incremental mode is enabled globally by the flag CMSIncrementalMode. It 46.40 // must also be enabled/disabled dynamically to allow foreground collections. 46.41 - static inline void enable_icms() { _icms_enabled = true; } 46.42 - static inline void disable_icms() { _icms_enabled = false; } 46.43 - static inline void set_icms_enabled(bool val) { _icms_enabled = val; } 46.44 - static inline bool icms_enabled() { return _icms_enabled; } 46.45 +#define ICMS_ENABLING_ASSERT \ 46.46 + assert((CMSIncrementalMode && _icms_disabled >= 0) || \ 46.47 + (!CMSIncrementalMode && _icms_disabled <= 0), "Error") 46.48 + 46.49 + static inline void enable_icms() { 46.50 + ICMS_ENABLING_ASSERT; 46.51 + Atomic::dec(&_icms_disabled); 46.52 + } 46.53 + static inline void disable_icms() { 46.54 + ICMS_ENABLING_ASSERT; 46.55 + Atomic::inc(&_icms_disabled); 46.56 + } 46.57 + static inline bool icms_is_disabled() { 46.58 + ICMS_ENABLING_ASSERT; 46.59 + return _icms_disabled > 0; 46.60 + } 46.61 + static inline bool icms_is_enabled() { 46.62 + return !icms_is_disabled(); 46.63 + } 46.64 }; 46.65 46.66 inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
47.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Fri Mar 25 11:29:30 2011 -0700 47.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Fri Mar 25 18:19:22 2011 -0400 47.3 @@ -1,5 +1,5 @@ 47.4 /* 47.5 - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 47.6 + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. 47.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 47.8 * 47.9 * This code is free software; you can redistribute it and/or modify it 47.10 @@ -192,14 +192,18 @@ 47.11 "total_collections() should be monotonically increasing"); 47.12 47.13 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); 47.14 + assert(_full_gc_count_before <= gch->total_full_collections(), "Error"); 47.15 if (gch->total_full_collections() == _full_gc_count_before) { 47.16 - // Disable iCMS until the full collection is done. 47.17 + // Disable iCMS until the full collection is done, and 47.18 + // remember that we did so. 47.19 CMSCollector::disable_icms(); 47.20 + _disabled_icms = true; 47.21 // In case CMS thread was in icms_wait(), wake it up. 47.22 CMSCollector::start_icms(); 47.23 // Nudge the CMS thread to start a concurrent collection. 47.24 CMSCollector::request_full_gc(_full_gc_count_before); 47.25 } else { 47.26 + assert(_full_gc_count_before < gch->total_full_collections(), "Error"); 47.27 FullGCCount_lock->notify_all(); // Inform the Java thread its work is done 47.28 } 47.29 } 47.30 @@ -259,6 +263,8 @@ 47.31 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); 47.32 } 47.33 } 47.34 - // Enable iCMS back. 47.35 - CMSCollector::enable_icms(); 47.36 + // Enable iCMS back if we disabled it earlier. 47.37 + if (_disabled_icms) { 47.38 + CMSCollector::enable_icms(); 47.39 + } 47.40 }
48.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp Fri Mar 25 11:29:30 2011 -0700 48.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp Fri Mar 25 18:19:22 2011 -0400 48.3 @@ -128,11 +128,14 @@ 48.4 // VM operation to invoke a concurrent collection of the heap as a 48.5 // GenCollectedHeap heap. 48.6 class VM_GenCollectFullConcurrent: public VM_GC_Operation { 48.7 + bool _disabled_icms; 48.8 public: 48.9 VM_GenCollectFullConcurrent(unsigned int gc_count_before, 48.10 unsigned int full_gc_count_before, 48.11 GCCause::Cause gc_cause) 48.12 - : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */) { 48.13 + : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */), 48.14 + _disabled_icms(false) 48.15 + { 48.16 assert(FullGCCount_lock != NULL, "Error"); 48.17 assert(UseAsyncConcMarkSweepGC, "Else will hang caller"); 48.18 }
49.1 --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Fri Mar 25 11:29:30 2011 -0700 49.2 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Fri Mar 25 18:19:22 2011 -0400 49.3 @@ -373,7 +373,7 @@ 49.4 // RSet updating while within an evacuation pause. 49.5 // In this case worker_i should be the id of a GC worker thread 49.6 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 49.7 - assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "incorrect worker id"); 49.8 + assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id"); 49.9 into_cset_dcq->enqueue(entry); 49.10 } 49.11 }
50.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Mar 25 11:29:30 2011 -0700 50.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Mar 25 18:19:22 2011 -0400 50.3 @@ -1828,7 +1828,7 @@ 50.4 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 50.5 50.6 _cleanup_list.verify_optional(); 50.7 - FreeRegionList local_free_list("Local Cleanup List"); 50.8 + FreeRegionList tmp_free_list("Tmp Free List"); 50.9 50.10 if (G1ConcRegionFreeingVerbose) { 50.11 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 50.12 @@ -1842,7 +1842,7 @@ 50.13 HeapRegion* hr = _cleanup_list.remove_head(); 50.14 assert(hr != NULL, "the list was not empty"); 50.15 hr->rem_set()->clear(); 50.16 - local_free_list.add_as_tail(hr); 50.17 + tmp_free_list.add_as_tail(hr); 50.18 50.19 // Instead of adding one region at a time to the secondary_free_list, 50.20 // we accumulate them in the local list and move them a few at a 50.21 @@ -1850,20 +1850,20 @@ 50.22 // we do during this process. We'll also append the local list when 50.23 // _cleanup_list is empty (which means we just removed the last 50.24 // region from the _cleanup_list). 50.25 - if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 50.26 + if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 50.27 _cleanup_list.is_empty()) { 50.28 if (G1ConcRegionFreeingVerbose) { 50.29 gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " 50.30 "appending "SIZE_FORMAT" entries to the " 50.31 "secondary_free_list, clean list still has " 50.32 SIZE_FORMAT" entries", 50.33 - local_free_list.length(), 50.34 + tmp_free_list.length(), 50.35 _cleanup_list.length()); 50.36 } 50.37 50.38 { 50.39 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 50.40 - g1h->secondary_free_list_add_as_tail(&local_free_list); 50.41 + g1h->secondary_free_list_add_as_tail(&tmp_free_list); 50.42 SecondaryFreeList_lock->notify_all(); 50.43 } 50.44 50.45 @@ -1874,7 +1874,7 @@ 50.46 } 50.47 } 50.48 } 50.49 - assert(local_free_list.is_empty(), "post-condition"); 50.50 + assert(tmp_free_list.is_empty(), "post-condition"); 50.51 } 50.52 50.53 // Support closures for reference procssing in G1 50.54 @@ -2141,21 +2141,22 @@ 50.55 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap()); 50.56 G1CMDrainMarkingStackClosure 50.57 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive); 50.58 - 50.59 // We use the work gang from the G1CollectedHeap and we utilize all 50.60 // the worker threads. 50.61 - int active_workers = MAX2(MIN2(g1h->workers()->total_workers(), (int)_max_task_num), 1); 50.62 + int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1; 50.63 + active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1); 50.64 50.65 G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(), 50.66 g1h->workers(), active_workers); 50.67 50.68 + 50.69 if (rp->processing_is_mt()) { 50.70 // Set the degree of MT here. If the discovery is done MT, there 50.71 // may have been a different number of threads doing the discovery 50.72 // and a different number of discovered lists may have Ref objects. 50.73 // That is OK as long as the Reference lists are balanced (see 50.74 // balance_all_queues() and balance_queues()). 50.75 - rp->set_mt_degree(active_workers); 50.76 + rp->set_active_mt_degree(active_workers); 50.77 50.78 rp->process_discovered_references(&g1_is_alive, 50.79 &g1_keep_alive, 50.80 @@ -3182,7 +3183,7 @@ 50.81 50.82 template <class T> void do_oop_work(T* p) { 50.83 assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant"); 50.84 - assert(!_g1h->is_on_free_list( 50.85 + assert(!_g1h->is_on_master_free_list( 50.86 _g1h->heap_region_containing((HeapWord*) p)), "invariant"); 50.87 50.88 oop obj = oopDesc::load_decode_heap_oop(p); 50.89 @@ -3403,7 +3404,7 @@ 50.90 void CMTask::push(oop obj) { 50.91 HeapWord* objAddr = (HeapWord*) obj; 50.92 assert(_g1h->is_in_g1_reserved(objAddr), "invariant"); 50.93 - assert(!_g1h->is_on_free_list( 50.94 + assert(!_g1h->is_on_master_free_list( 50.95 _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant"); 50.96 assert(!_g1h->is_obj_ill(obj), "invariant"); 50.97 assert(_nextMarkBitMap->isMarked(objAddr), "invariant"); 50.98 @@ -3649,7 +3650,7 @@ 50.99 (void*) obj); 50.100 50.101 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 50.102 - assert(!_g1h->is_on_free_list( 50.103 + assert(!_g1h->is_on_master_free_list( 50.104 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 50.105 50.106 scan_object(obj);
51.1 --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Fri Mar 25 11:29:30 2011 -0700 51.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Fri Mar 25 18:19:22 2011 -0400 51.3 @@ -237,9 +237,9 @@ 51.4 // The following will finish freeing up any regions that we 51.5 // found to be empty during cleanup. We'll do this part 51.6 // without joining the suspendible set. If an evacuation pause 51.7 - // takes places, then we would carry on freeing regions in 51.8 + // takes place, then we would carry on freeing regions in 51.9 // case they are needed by the pause. If a Full GC takes 51.10 - // places, it would wait for us to process the regions 51.11 + // place, it would wait for us to process the regions 51.12 // reclaimed by cleanup. 51.13 51.14 double cleanup_start_sec = os::elapsedTime();
52.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Mar 25 11:29:30 2011 -0700 52.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Mar 25 18:19:22 2011 -0400 52.3 @@ -479,7 +479,7 @@ 52.4 // Private methods. 52.5 52.6 HeapRegion* 52.7 -G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) { 52.8 +G1CollectedHeap::new_region_try_secondary_free_list() { 52.9 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 52.10 while (!_secondary_free_list.is_empty() || free_regions_coming()) { 52.11 if (!_secondary_free_list.is_empty()) { 52.12 @@ -531,7 +531,7 @@ 52.13 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 52.14 "forced to look at the secondary_free_list"); 52.15 } 52.16 - res = new_region_try_secondary_free_list(word_size); 52.17 + res = new_region_try_secondary_free_list(); 52.18 if (res != NULL) { 52.19 return res; 52.20 } 52.21 @@ -543,7 +543,7 @@ 52.22 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 52.23 "res == NULL, trying the secondary_free_list"); 52.24 } 52.25 - res = new_region_try_secondary_free_list(word_size); 52.26 + res = new_region_try_secondary_free_list(); 52.27 } 52.28 if (res == NULL && do_expand) { 52.29 if (expand(word_size * HeapWordSize)) { 52.30 @@ -579,6 +579,9 @@ 52.31 52.32 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, 52.33 size_t word_size) { 52.34 + assert(isHumongous(word_size), "word_size should be humongous"); 52.35 + assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 52.36 + 52.37 int first = -1; 52.38 if (num_regions == 1) { 52.39 // Only one region to allocate, no need to go through the slower 52.40 @@ -600,7 +603,7 @@ 52.41 // request. If we are only allocating one region we use the common 52.42 // region allocation code (see above). 52.43 wait_while_free_regions_coming(); 52.44 - append_secondary_free_list_if_not_empty(); 52.45 + append_secondary_free_list_if_not_empty_with_lock(); 52.46 52.47 if (free_regions() >= num_regions) { 52.48 first = _hrs->find_contiguous(num_regions); 52.49 @@ -608,7 +611,7 @@ 52.50 for (int i = first; i < first + (int) num_regions; ++i) { 52.51 HeapRegion* hr = _hrs->at(i); 52.52 assert(hr->is_empty(), "sanity"); 52.53 - assert(is_on_free_list(hr), "sanity"); 52.54 + assert(is_on_master_free_list(hr), "sanity"); 52.55 hr->set_pending_removal(true); 52.56 } 52.57 _free_list.remove_all_pending(num_regions); 52.58 @@ -618,6 +621,126 @@ 52.59 return first; 52.60 } 52.61 52.62 +HeapWord* 52.63 +G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, 52.64 + size_t num_regions, 52.65 + size_t word_size) { 52.66 + assert(first != -1, "pre-condition"); 52.67 + assert(isHumongous(word_size), "word_size should be humongous"); 52.68 + assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 52.69 + 52.70 + // Index of last region in the series + 1. 52.71 + int last = first + (int) num_regions; 52.72 + 52.73 + // We need to initialize the region(s) we just discovered. This is 52.74 + // a bit tricky given that it can happen concurrently with 52.75 + // refinement threads refining cards on these regions and 52.76 + // potentially wanting to refine the BOT as they are scanning 52.77 + // those cards (this can happen shortly after a cleanup; see CR 52.78 + // 6991377). So we have to set up the region(s) carefully and in 52.79 + // a specific order. 52.80 + 52.81 + // The word size sum of all the regions we will allocate. 52.82 + size_t word_size_sum = num_regions * HeapRegion::GrainWords; 52.83 + assert(word_size <= word_size_sum, "sanity"); 52.84 + 52.85 + // This will be the "starts humongous" region. 52.86 + HeapRegion* first_hr = _hrs->at(first); 52.87 + // The header of the new object will be placed at the bottom of 52.88 + // the first region. 52.89 + HeapWord* new_obj = first_hr->bottom(); 52.90 + // This will be the new end of the first region in the series that 52.91 + // should also match the end of the last region in the seriers. 52.92 + HeapWord* new_end = new_obj + word_size_sum; 52.93 + // This will be the new top of the first region that will reflect 52.94 + // this allocation. 52.95 + HeapWord* new_top = new_obj + word_size; 52.96 + 52.97 + // First, we need to zero the header of the space that we will be 52.98 + // allocating. When we update top further down, some refinement 52.99 + // threads might try to scan the region. By zeroing the header we 52.100 + // ensure that any thread that will try to scan the region will 52.101 + // come across the zero klass word and bail out. 52.102 + // 52.103 + // NOTE: It would not have been correct to have used 52.104 + // CollectedHeap::fill_with_object() and make the space look like 52.105 + // an int array. The thread that is doing the allocation will 52.106 + // later update the object header to a potentially different array 52.107 + // type and, for a very short period of time, the klass and length 52.108 + // fields will be inconsistent. This could cause a refinement 52.109 + // thread to calculate the object size incorrectly. 52.110 + Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); 52.111 + 52.112 + // We will set up the first region as "starts humongous". This 52.113 + // will also update the BOT covering all the regions to reflect 52.114 + // that there is a single object that starts at the bottom of the 52.115 + // first region. 52.116 + first_hr->set_startsHumongous(new_top, new_end); 52.117 + 52.118 + // Then, if there are any, we will set up the "continues 52.119 + // humongous" regions. 52.120 + HeapRegion* hr = NULL; 52.121 + for (int i = first + 1; i < last; ++i) { 52.122 + hr = _hrs->at(i); 52.123 + hr->set_continuesHumongous(first_hr); 52.124 + } 52.125 + // If we have "continues humongous" regions (hr != NULL), then the 52.126 + // end of the last one should match new_end. 52.127 + assert(hr == NULL || hr->end() == new_end, "sanity"); 52.128 + 52.129 + // Up to this point no concurrent thread would have been able to 52.130 + // do any scanning on any region in this series. All the top 52.131 + // fields still point to bottom, so the intersection between 52.132 + // [bottom,top] and [card_start,card_end] will be empty. Before we 52.133 + // update the top fields, we'll do a storestore to make sure that 52.134 + // no thread sees the update to top before the zeroing of the 52.135 + // object header and the BOT initialization. 52.136 + OrderAccess::storestore(); 52.137 + 52.138 + // Now that the BOT and the object header have been initialized, 52.139 + // we can update top of the "starts humongous" region. 52.140 + assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), 52.141 + "new_top should be in this region"); 52.142 + first_hr->set_top(new_top); 52.143 + 52.144 + // Now, we will update the top fields of the "continues humongous" 52.145 + // regions. The reason we need to do this is that, otherwise, 52.146 + // these regions would look empty and this will confuse parts of 52.147 + // G1. For example, the code that looks for a consecutive number 52.148 + // of empty regions will consider them empty and try to 52.149 + // re-allocate them. We can extend is_empty() to also include 52.150 + // !continuesHumongous(), but it is easier to just update the top 52.151 + // fields here. The way we set top for all regions (i.e., top == 52.152 + // end for all regions but the last one, top == new_top for the 52.153 + // last one) is actually used when we will free up the humongous 52.154 + // region in free_humongous_region(). 52.155 + hr = NULL; 52.156 + for (int i = first + 1; i < last; ++i) { 52.157 + hr = _hrs->at(i); 52.158 + if ((i + 1) == last) { 52.159 + // last continues humongous region 52.160 + assert(hr->bottom() < new_top && new_top <= hr->end(), 52.161 + "new_top should fall on this region"); 52.162 + hr->set_top(new_top); 52.163 + } else { 52.164 + // not last one 52.165 + assert(new_top > hr->end(), "new_top should be above this region"); 52.166 + hr->set_top(hr->end()); 52.167 + } 52.168 + } 52.169 + // If we have continues humongous regions (hr != NULL), then the 52.170 + // end of the last one should match new_end and its top should 52.171 + // match new_top. 52.172 + assert(hr == NULL || 52.173 + (hr->end() == new_end && hr->top() == new_top), "sanity"); 52.174 + 52.175 + assert(first_hr->used() == word_size * HeapWordSize, "invariant"); 52.176 + _summary_bytes_used += first_hr->used(); 52.177 + _humongous_set.add(first_hr); 52.178 + 52.179 + return new_obj; 52.180 +} 52.181 + 52.182 // If could fit into free regions w/o expansion, try. 52.183 // Otherwise, if can expand, do so. 52.184 // Otherwise, if using ex regions might help, try with ex given back. 52.185 @@ -653,121 +776,16 @@ 52.186 } 52.187 } 52.188 52.189 + HeapWord* result = NULL; 52.190 if (first != -1) { 52.191 - // Index of last region in the series + 1. 52.192 - int last = first + (int) num_regions; 52.193 - 52.194 - // We need to initialize the region(s) we just discovered. This is 52.195 - // a bit tricky given that it can happen concurrently with 52.196 - // refinement threads refining cards on these regions and 52.197 - // potentially wanting to refine the BOT as they are scanning 52.198 - // those cards (this can happen shortly after a cleanup; see CR 52.199 - // 6991377). So we have to set up the region(s) carefully and in 52.200 - // a specific order. 52.201 - 52.202 - // The word size sum of all the regions we will allocate. 52.203 - size_t word_size_sum = num_regions * HeapRegion::GrainWords; 52.204 - assert(word_size <= word_size_sum, "sanity"); 52.205 - 52.206 - // This will be the "starts humongous" region. 52.207 - HeapRegion* first_hr = _hrs->at(first); 52.208 - // The header of the new object will be placed at the bottom of 52.209 - // the first region. 52.210 - HeapWord* new_obj = first_hr->bottom(); 52.211 - // This will be the new end of the first region in the series that 52.212 - // should also match the end of the last region in the seriers. 52.213 - HeapWord* new_end = new_obj + word_size_sum; 52.214 - // This will be the new top of the first region that will reflect 52.215 - // this allocation. 52.216 - HeapWord* new_top = new_obj + word_size; 52.217 - 52.218 - // First, we need to zero the header of the space that we will be 52.219 - // allocating. When we update top further down, some refinement 52.220 - // threads might try to scan the region. By zeroing the header we 52.221 - // ensure that any thread that will try to scan the region will 52.222 - // come across the zero klass word and bail out. 52.223 - // 52.224 - // NOTE: It would not have been correct to have used 52.225 - // CollectedHeap::fill_with_object() and make the space look like 52.226 - // an int array. The thread that is doing the allocation will 52.227 - // later update the object header to a potentially different array 52.228 - // type and, for a very short period of time, the klass and length 52.229 - // fields will be inconsistent. This could cause a refinement 52.230 - // thread to calculate the object size incorrectly. 52.231 - Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); 52.232 - 52.233 - // We will set up the first region as "starts humongous". This 52.234 - // will also update the BOT covering all the regions to reflect 52.235 - // that there is a single object that starts at the bottom of the 52.236 - // first region. 52.237 - first_hr->set_startsHumongous(new_top, new_end); 52.238 - 52.239 - // Then, if there are any, we will set up the "continues 52.240 - // humongous" regions. 52.241 - HeapRegion* hr = NULL; 52.242 - for (int i = first + 1; i < last; ++i) { 52.243 - hr = _hrs->at(i); 52.244 - hr->set_continuesHumongous(first_hr); 52.245 - } 52.246 - // If we have "continues humongous" regions (hr != NULL), then the 52.247 - // end of the last one should match new_end. 52.248 - assert(hr == NULL || hr->end() == new_end, "sanity"); 52.249 - 52.250 - // Up to this point no concurrent thread would have been able to 52.251 - // do any scanning on any region in this series. All the top 52.252 - // fields still point to bottom, so the intersection between 52.253 - // [bottom,top] and [card_start,card_end] will be empty. Before we 52.254 - // update the top fields, we'll do a storestore to make sure that 52.255 - // no thread sees the update to top before the zeroing of the 52.256 - // object header and the BOT initialization. 52.257 - OrderAccess::storestore(); 52.258 - 52.259 - // Now that the BOT and the object header have been initialized, 52.260 - // we can update top of the "starts humongous" region. 52.261 - assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), 52.262 - "new_top should be in this region"); 52.263 - first_hr->set_top(new_top); 52.264 - 52.265 - // Now, we will update the top fields of the "continues humongous" 52.266 - // regions. The reason we need to do this is that, otherwise, 52.267 - // these regions would look empty and this will confuse parts of 52.268 - // G1. For example, the code that looks for a consecutive number 52.269 - // of empty regions will consider them empty and try to 52.270 - // re-allocate them. We can extend is_empty() to also include 52.271 - // !continuesHumongous(), but it is easier to just update the top 52.272 - // fields here. The way we set top for all regions (i.e., top == 52.273 - // end for all regions but the last one, top == new_top for the 52.274 - // last one) is actually used when we will free up the humongous 52.275 - // region in free_humongous_region(). 52.276 - hr = NULL; 52.277 - for (int i = first + 1; i < last; ++i) { 52.278 - hr = _hrs->at(i); 52.279 - if ((i + 1) == last) { 52.280 - // last continues humongous region 52.281 - assert(hr->bottom() < new_top && new_top <= hr->end(), 52.282 - "new_top should fall on this region"); 52.283 - hr->set_top(new_top); 52.284 - } else { 52.285 - // not last one 52.286 - assert(new_top > hr->end(), "new_top should be above this region"); 52.287 - hr->set_top(hr->end()); 52.288 - } 52.289 - } 52.290 - // If we have continues humongous regions (hr != NULL), then the 52.291 - // end of the last one should match new_end and its top should 52.292 - // match new_top. 52.293 - assert(hr == NULL || 52.294 - (hr->end() == new_end && hr->top() == new_top), "sanity"); 52.295 - 52.296 - assert(first_hr->used() == word_size * HeapWordSize, "invariant"); 52.297 - _summary_bytes_used += first_hr->used(); 52.298 - _humongous_set.add(first_hr); 52.299 - 52.300 - return new_obj; 52.301 + result = 52.302 + humongous_obj_allocate_initialize_regions(first, num_regions, word_size); 52.303 + assert(result != NULL, "it should always return a valid result"); 52.304 } 52.305 52.306 verify_region_sets_optional(); 52.307 - return NULL; 52.308 + 52.309 + return result; 52.310 } 52.311 52.312 void 52.313 @@ -1389,7 +1407,7 @@ 52.314 g1_policy()->record_full_collection_start(); 52.315 52.316 wait_while_free_regions_coming(); 52.317 - append_secondary_free_list_if_not_empty(); 52.318 + append_secondary_free_list_if_not_empty_with_lock(); 52.319 52.320 gc_prologue(true); 52.321 increment_total_collections(true /* full gc */); 52.322 @@ -1444,7 +1462,7 @@ 52.323 // how reference processing currently works in G1. 52.324 52.325 // Temporarily make reference _discovery_ single threaded (non-MT). 52.326 - ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); 52.327 + ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false); 52.328 52.329 // Temporarily make refs discovery atomic 52.330 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); 52.331 @@ -2201,16 +2219,16 @@ 52.332 52.333 SharedHeap::ref_processing_init(); 52.334 MemRegion mr = reserved_region(); 52.335 - _ref_processor = ReferenceProcessor::create_ref_processor( 52.336 - mr, // span 52.337 - false, // Reference discovery is not atomic 52.338 - true, // mt_discovery 52.339 - &_is_alive_closure, // is alive closure 52.340 - // for efficiency 52.341 - ParallelGCThreads, 52.342 - ParallelRefProcEnabled, 52.343 - true); // Setting next fields of discovered 52.344 - // lists requires a barrier. 52.345 + _ref_processor = 52.346 + new ReferenceProcessor(mr, // span 52.347 + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 52.348 + (int) ParallelGCThreads, // degree of mt processing 52.349 + ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery 52.350 + (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery 52.351 + false, // Reference discovery is not atomic 52.352 + &_is_alive_closure, // is alive closure for efficiency 52.353 + true); // Setting next fields of discovered 52.354 + // lists requires a barrier. 52.355 } 52.356 52.357 size_t G1CollectedHeap::capacity() const { 52.358 @@ -3377,15 +3395,14 @@ 52.359 52.360 TraceMemoryManagerStats tms(false /* fullGC */); 52.361 52.362 - // If there are any free regions available on the secondary_free_list 52.363 - // make sure we append them to the free_list. However, we don't 52.364 - // have to wait for the rest of the cleanup operation to 52.365 - // finish. If it's still going on that's OK. If we run out of 52.366 - // regions, the region allocation code will check the 52.367 - // secondary_free_list and potentially wait if more free regions 52.368 - // are coming (see new_region_try_secondary_free_list()). 52.369 + // If the secondary_free_list is not empty, append it to the 52.370 + // free_list. No need to wait for the cleanup operation to finish; 52.371 + // the region allocation code will check the secondary_free_list 52.372 + // and wait if necessary. If the G1StressConcRegionFreeing flag is 52.373 + // set, skip this step so that the region allocation code has to 52.374 + // get entries from the secondary_free_list. 52.375 if (!G1StressConcRegionFreeing) { 52.376 - append_secondary_free_list_if_not_empty(); 52.377 + append_secondary_free_list_if_not_empty_with_lock(); 52.378 } 52.379 52.380 increment_gc_time_stamp(); 52.381 @@ -5199,7 +5216,7 @@ 52.382 size_t rs_lengths = 0; 52.383 52.384 while (cur != NULL) { 52.385 - assert(!is_on_free_list(cur), "sanity"); 52.386 + assert(!is_on_master_free_list(cur), "sanity"); 52.387 52.388 if (non_young) { 52.389 if (cur->is_young()) { 52.390 @@ -5543,13 +5560,10 @@ 52.391 return; 52.392 } 52.393 52.394 - { 52.395 - MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 52.396 - // Make sure we append the secondary_free_list on the free_list so 52.397 - // that all free regions we will come across can be safely 52.398 - // attributed to the free_list. 52.399 - append_secondary_free_list(); 52.400 - } 52.401 + // Make sure we append the secondary_free_list on the free_list so 52.402 + // that all free regions we will come across can be safely 52.403 + // attributed to the free_list. 52.404 + append_secondary_free_list_if_not_empty_with_lock(); 52.405 52.406 // Finally, make sure that the region accounting in the lists is 52.407 // consistent with what we see in the heap.
53.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Mar 25 11:29:30 2011 -0700 53.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Mar 25 18:19:22 2011 -0400 53.3 @@ -56,7 +56,6 @@ 53.4 class ConcurrentMark; 53.5 class ConcurrentMarkThread; 53.6 class ConcurrentG1Refine; 53.7 -class ConcurrentZFThread; 53.8 53.9 typedef OverflowTaskQueue<StarTask> RefToScanQueue; 53.10 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet; 53.11 @@ -64,12 +63,6 @@ 53.12 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) 53.13 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) 53.14 53.15 -enum G1GCThreadGroups { 53.16 - G1CRGroup = 0, 53.17 - G1ZFGroup = 1, 53.18 - G1CMGroup = 2 53.19 -}; 53.20 - 53.21 enum GCAllocPurpose { 53.22 GCAllocForTenured, 53.23 GCAllocForSurvived, 53.24 @@ -294,9 +287,9 @@ 53.25 // These are macros so that, if the assert fires, we get the correct 53.26 // line number, file, etc. 53.27 53.28 -#define heap_locking_asserts_err_msg(__extra_message) \ 53.29 +#define heap_locking_asserts_err_msg(_extra_message_) \ 53.30 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \ 53.31 - (__extra_message), \ 53.32 + (_extra_message_), \ 53.33 BOOL_TO_STR(Heap_lock->owned_by_self()), \ 53.34 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \ 53.35 BOOL_TO_STR(Thread::current()->is_VM_thread())) 53.36 @@ -307,11 +300,11 @@ 53.37 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \ 53.38 } while (0) 53.39 53.40 -#define assert_heap_locked_or_at_safepoint(__should_be_vm_thread) \ 53.41 +#define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \ 53.42 do { \ 53.43 assert(Heap_lock->owned_by_self() || \ 53.44 (SafepointSynchronize::is_at_safepoint() && \ 53.45 - ((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \ 53.46 + ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \ 53.47 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \ 53.48 "should be at a safepoint")); \ 53.49 } while (0) 53.50 @@ -338,10 +331,10 @@ 53.51 "should not be at a safepoint")); \ 53.52 } while (0) 53.53 53.54 -#define assert_at_safepoint(__should_be_vm_thread) \ 53.55 +#define assert_at_safepoint(_should_be_vm_thread_) \ 53.56 do { \ 53.57 assert(SafepointSynchronize::is_at_safepoint() && \ 53.58 - ((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \ 53.59 + ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \ 53.60 heap_locking_asserts_err_msg("should be at a safepoint")); \ 53.61 } while (0) 53.62 53.63 @@ -371,35 +364,40 @@ 53.64 // will check whether there's anything available in the 53.65 // secondary_free_list and/or wait for more regions to appear in that 53.66 // list, if _free_regions_coming is set. 53.67 - HeapRegion* new_region_try_secondary_free_list(size_t word_size); 53.68 + HeapRegion* new_region_try_secondary_free_list(); 53.69 53.70 - // It will try to allocate a single non-humongous HeapRegion 53.71 - // sufficient for an allocation of the given word_size. If 53.72 - // do_expand is true, it will attempt to expand the heap if 53.73 - // necessary to satisfy the allocation request. Note that word_size 53.74 - // is only used to make sure that we expand sufficiently but, given 53.75 - // that the allocation request is assumed not to be humongous, 53.76 - // having word_size is not strictly necessary (expanding by a single 53.77 - // region will always be sufficient). But let's keep that parameter 53.78 - // in case we need it in the future. 53.79 + // Try to allocate a single non-humongous HeapRegion sufficient for 53.80 + // an allocation of the given word_size. If do_expand is true, 53.81 + // attempt to expand the heap if necessary to satisfy the allocation 53.82 + // request. 53.83 HeapRegion* new_region_work(size_t word_size, bool do_expand); 53.84 53.85 - // It will try to allocate a new region to be used for allocation by 53.86 - // mutator threads. It will not try to expand the heap if not region 53.87 - // is available. 53.88 + // Try to allocate a new region to be used for allocation by a 53.89 + // mutator thread. Attempt to expand the heap if no region is 53.90 + // available. 53.91 HeapRegion* new_alloc_region(size_t word_size) { 53.92 return new_region_work(word_size, false /* do_expand */); 53.93 } 53.94 53.95 - // It will try to allocate a new region to be used for allocation by 53.96 - // a GC thread. It will try to expand the heap if no region is 53.97 - // available. 53.98 + // Try to allocate a new region to be used for allocation by a GC 53.99 + // thread. Attempt to expand the heap if no region is available. 53.100 HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); 53.101 53.102 + // Attempt to satisfy a humongous allocation request of the given 53.103 + // size by finding a contiguous set of free regions of num_regions 53.104 + // length and remove them from the master free list. Return the 53.105 + // index of the first region or -1 if the search was unsuccessful. 53.106 int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size); 53.107 53.108 - // Attempt to allocate an object of the given (very large) "word_size". 53.109 - // Returns "NULL" on failure. 53.110 + // Initialize a contiguous set of free regions of length num_regions 53.111 + // and starting at index first so that they appear as a single 53.112 + // humongous region. 53.113 + HeapWord* humongous_obj_allocate_initialize_regions(int first, 53.114 + size_t num_regions, 53.115 + size_t word_size); 53.116 + 53.117 + // Attempt to allocate a humongous object of the given size. Return 53.118 + // NULL if unsuccessful. 53.119 HeapWord* humongous_obj_allocate(size_t word_size); 53.120 53.121 // The following two methods, allocate_new_tlab() and 53.122 @@ -776,7 +774,7 @@ 53.123 // Invoke "save_marks" on all heap regions. 53.124 void save_marks(); 53.125 53.126 - // It frees a non-humongous region by initializing its contents and 53.127 + // Frees a non-humongous region by initializing its contents and 53.128 // adding it to the free list that's passed as a parameter (this is 53.129 // usually a local list which will be appended to the master free 53.130 // list later). The used bytes of freed regions are accumulated in 53.131 @@ -787,13 +785,13 @@ 53.132 FreeRegionList* free_list, 53.133 bool par); 53.134 53.135 - // It frees a humongous region by collapsing it into individual 53.136 - // regions and calling free_region() for each of them. The freed 53.137 - // regions will be added to the free list that's passed as a parameter 53.138 - // (this is usually a local list which will be appended to the 53.139 - // master free list later). The used bytes of freed regions are 53.140 - // accumulated in pre_used. If par is true, the region's RSet will 53.141 - // not be freed up. The assumption is that this will be done later. 53.142 + // Frees a humongous region by collapsing it into individual regions 53.143 + // and calling free_region() for each of them. The freed regions 53.144 + // will be added to the free list that's passed as a parameter (this 53.145 + // is usually a local list which will be appended to the master free 53.146 + // list later). The used bytes of freed regions are accumulated in 53.147 + // pre_used. If par is true, the region's RSet will not be freed 53.148 + // up. The assumption is that this will be done later. 53.149 void free_humongous_region(HeapRegion* hr, 53.150 size_t* pre_used, 53.151 FreeRegionList* free_list, 53.152 @@ -1046,13 +1044,13 @@ 53.153 #endif // HEAP_REGION_SET_FORCE_VERIFY 53.154 53.155 #ifdef ASSERT 53.156 - bool is_on_free_list(HeapRegion* hr) { 53.157 + bool is_on_master_free_list(HeapRegion* hr) { 53.158 return hr->containing_set() == &_free_list; 53.159 } 53.160 53.161 - bool is_on_humongous_set(HeapRegion* hr) { 53.162 + bool is_in_humongous_set(HeapRegion* hr) { 53.163 return hr->containing_set() == &_humongous_set; 53.164 -} 53.165 + } 53.166 #endif // ASSERT 53.167 53.168 // Wrapper for the region list operations that can be called from 53.169 @@ -1066,7 +1064,9 @@ 53.170 _free_list.add_as_tail(&_secondary_free_list); 53.171 } 53.172 53.173 - void append_secondary_free_list_if_not_empty() { 53.174 + void append_secondary_free_list_if_not_empty_with_lock() { 53.175 + // If the secondary free list looks empty there's no reason to 53.176 + // take the lock and then try to append it. 53.177 if (!_secondary_free_list.is_empty()) { 53.178 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 53.179 append_secondary_free_list();
54.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Mar 25 11:29:30 2011 -0700 54.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Mar 25 18:19:22 2011 -0400 54.3 @@ -81,6 +81,57 @@ 54.4 54.5 // </NEW PREDICTION> 54.6 54.7 +// Help class for avoiding interleaved logging 54.8 +class LineBuffer: public StackObj { 54.9 + 54.10 +private: 54.11 + static const int BUFFER_LEN = 1024; 54.12 + static const int INDENT_CHARS = 3; 54.13 + char _buffer[BUFFER_LEN]; 54.14 + int _indent_level; 54.15 + int _cur; 54.16 + 54.17 + void vappend(const char* format, va_list ap) { 54.18 + int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap); 54.19 + if (res != -1) { 54.20 + _cur += res; 54.21 + } else { 54.22 + DEBUG_ONLY(warning("buffer too small in LineBuffer");) 54.23 + _buffer[BUFFER_LEN -1] = 0; 54.24 + _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again 54.25 + } 54.26 + } 54.27 + 54.28 +public: 54.29 + explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) { 54.30 + for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) { 54.31 + _buffer[_cur] = ' '; 54.32 + } 54.33 + } 54.34 + 54.35 +#ifndef PRODUCT 54.36 + ~LineBuffer() { 54.37 + assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?"); 54.38 + } 54.39 +#endif 54.40 + 54.41 + void append(const char* format, ...) { 54.42 + va_list ap; 54.43 + va_start(ap, format); 54.44 + vappend(format, ap); 54.45 + va_end(ap); 54.46 + } 54.47 + 54.48 + void append_and_print_cr(const char* format, ...) { 54.49 + va_list ap; 54.50 + va_start(ap, format); 54.51 + vappend(format, ap); 54.52 + va_end(ap); 54.53 + gclog_or_tty->print_cr("%s", _buffer); 54.54 + _cur = _indent_level * INDENT_CHARS; 54.55 + } 54.56 +}; 54.57 + 54.58 G1CollectorPolicy::G1CollectorPolicy() : 54.59 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads() 54.60 ? ParallelGCThreads : 1), 54.61 @@ -1016,10 +1067,8 @@ 54.62 bool summary) { 54.63 double min = data[0], max = data[0]; 54.64 double total = 0.0; 54.65 - int j; 54.66 - for (j = 0; j < level; ++j) 54.67 - gclog_or_tty->print(" "); 54.68 - gclog_or_tty->print("[%s (ms):", str); 54.69 + LineBuffer buf(level); 54.70 + buf.append("[%s (ms):", str); 54.71 for (uint i = 0; i < ParallelGCThreads; ++i) { 54.72 double val = data[i]; 54.73 if (val < min) 54.74 @@ -1027,18 +1076,16 @@ 54.75 if (val > max) 54.76 max = val; 54.77 total += val; 54.78 - gclog_or_tty->print(" %3.1lf", val); 54.79 + buf.append(" %3.1lf", val); 54.80 } 54.81 if (summary) { 54.82 - gclog_or_tty->print_cr(""); 54.83 + buf.append_and_print_cr(""); 54.84 double avg = total / (double) ParallelGCThreads; 54.85 - gclog_or_tty->print(" "); 54.86 - for (j = 0; j < level; ++j) 54.87 - gclog_or_tty->print(" "); 54.88 - gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf", 54.89 + buf.append(" "); 54.90 + buf.append("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf", 54.91 avg, min, max); 54.92 } 54.93 - gclog_or_tty->print_cr("]"); 54.94 + buf.append_and_print_cr("]"); 54.95 } 54.96 54.97 void G1CollectorPolicy::print_par_sizes(int level, 54.98 @@ -1047,10 +1094,8 @@ 54.99 bool summary) { 54.100 double min = data[0], max = data[0]; 54.101 double total = 0.0; 54.102 - int j; 54.103 - for (j = 0; j < level; ++j) 54.104 - gclog_or_tty->print(" "); 54.105 - gclog_or_tty->print("[%s :", str); 54.106 + LineBuffer buf(level); 54.107 + buf.append("[%s :", str); 54.108 for (uint i = 0; i < ParallelGCThreads; ++i) { 54.109 double val = data[i]; 54.110 if (val < min) 54.111 @@ -1058,34 +1103,28 @@ 54.112 if (val > max) 54.113 max = val; 54.114 total += val; 54.115 - gclog_or_tty->print(" %d", (int) val); 54.116 + buf.append(" %d", (int) val); 54.117 } 54.118 if (summary) { 54.119 - gclog_or_tty->print_cr(""); 54.120 + buf.append_and_print_cr(""); 54.121 double avg = total / (double) ParallelGCThreads; 54.122 - gclog_or_tty->print(" "); 54.123 - for (j = 0; j < level; ++j) 54.124 - gclog_or_tty->print(" "); 54.125 - gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d", 54.126 + buf.append(" "); 54.127 + buf.append("Sum: %d, Avg: %d, Min: %d, Max: %d", 54.128 (int)total, (int)avg, (int)min, (int)max); 54.129 } 54.130 - gclog_or_tty->print_cr("]"); 54.131 + buf.append_and_print_cr("]"); 54.132 } 54.133 54.134 void G1CollectorPolicy::print_stats (int level, 54.135 const char* str, 54.136 double value) { 54.137 - for (int j = 0; j < level; ++j) 54.138 - gclog_or_tty->print(" "); 54.139 - gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value); 54.140 + LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value); 54.141 } 54.142 54.143 void G1CollectorPolicy::print_stats (int level, 54.144 const char* str, 54.145 int value) { 54.146 - for (int j = 0; j < level; ++j) 54.147 - gclog_or_tty->print(" "); 54.148 - gclog_or_tty->print_cr("[%s: %d]", str, value); 54.149 + LineBuffer(level).append_and_print_cr("[%s: %d]", str, value); 54.150 } 54.151 54.152 double G1CollectorPolicy::avg_value (double* data) { 54.153 @@ -2060,17 +2099,11 @@ 54.154 _g1->collection_set_iterate(&cs_closure); 54.155 } 54.156 54.157 -static void print_indent(int level) { 54.158 - for (int j = 0; j < level+1; ++j) 54.159 - gclog_or_tty->print(" "); 54.160 -} 54.161 - 54.162 void G1CollectorPolicy::print_summary (int level, 54.163 const char* str, 54.164 NumberSeq* seq) const { 54.165 double sum = seq->sum(); 54.166 - print_indent(level); 54.167 - gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)", 54.168 + LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)", 54.169 str, sum / 1000.0, seq->avg()); 54.170 } 54.171 54.172 @@ -2078,8 +2111,7 @@ 54.173 const char* str, 54.174 NumberSeq* seq) const { 54.175 print_summary(level, str, seq); 54.176 - print_indent(level + 5); 54.177 - gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 54.178 + LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 54.179 seq->num(), seq->sd(), seq->maximum()); 54.180 } 54.181 54.182 @@ -2087,6 +2119,7 @@ 54.183 NumberSeq* other_times_ms, 54.184 NumberSeq* calc_other_times_ms) const { 54.185 bool should_print = false; 54.186 + LineBuffer buf(level + 2); 54.187 54.188 double max_sum = MAX2(fabs(other_times_ms->sum()), 54.189 fabs(calc_other_times_ms->sum())); 54.190 @@ -2095,8 +2128,7 @@ 54.191 double sum_ratio = max_sum / min_sum; 54.192 if (sum_ratio > 1.1) { 54.193 should_print = true; 54.194 - print_indent(level + 1); 54.195 - gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###"); 54.196 + buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###"); 54.197 } 54.198 54.199 double max_avg = MAX2(fabs(other_times_ms->avg()), 54.200 @@ -2106,30 +2138,25 @@ 54.201 double avg_ratio = max_avg / min_avg; 54.202 if (avg_ratio > 1.1) { 54.203 should_print = true; 54.204 - print_indent(level + 1); 54.205 - gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###"); 54.206 + buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###"); 54.207 } 54.208 54.209 if (other_times_ms->sum() < -0.01) { 54.210 - print_indent(level + 1); 54.211 - gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###"); 54.212 + buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###"); 54.213 } 54.214 54.215 if (other_times_ms->avg() < -0.01) { 54.216 - print_indent(level + 1); 54.217 - gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###"); 54.218 + buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###"); 54.219 } 54.220 54.221 if (calc_other_times_ms->sum() < -0.01) { 54.222 should_print = true; 54.223 - print_indent(level + 1); 54.224 - gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###"); 54.225 + buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###"); 54.226 } 54.227 54.228 if (calc_other_times_ms->avg() < -0.01) { 54.229 should_print = true; 54.230 - print_indent(level + 1); 54.231 - gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###"); 54.232 + buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###"); 54.233 } 54.234 54.235 if (should_print) 54.236 @@ -2210,10 +2237,9 @@ 54.237 } 54.238 } 54.239 } else { 54.240 - print_indent(0); 54.241 - gclog_or_tty->print_cr("none"); 54.242 + LineBuffer(1).append_and_print_cr("none"); 54.243 } 54.244 - gclog_or_tty->print_cr(""); 54.245 + LineBuffer(0).append_and_print_cr(""); 54.246 } 54.247 54.248 void G1CollectorPolicy::print_tracing_info() const { 54.249 @@ -2532,7 +2558,7 @@ 54.250 jint regions_added = parKnownGarbageCl.marked_regions_added(); 54.251 _hrSorted->incNumMarkedHeapRegions(regions_added); 54.252 if (G1PrintParCleanupStats) { 54.253 - gclog_or_tty->print(" Thread %d called %d times, added %d regions to list.\n", 54.254 + gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.", 54.255 i, parKnownGarbageCl.invokes(), regions_added); 54.256 } 54.257 }
55.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Fri Mar 25 11:29:30 2011 -0700 55.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Fri Mar 25 18:19:22 2011 -0400 55.3 @@ -185,22 +185,22 @@ 55.4 G1CollectedHeap* _g1h; 55.5 ModRefBarrierSet* _mrbs; 55.6 CompactPoint _cp; 55.7 - size_t _pre_used; 55.8 - FreeRegionList _free_list; 55.9 HumongousRegionSet _humongous_proxy_set; 55.10 55.11 void free_humongous_region(HeapRegion* hr) { 55.12 HeapWord* end = hr->end(); 55.13 + size_t dummy_pre_used; 55.14 + FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); 55.15 + 55.16 assert(hr->startsHumongous(), 55.17 "Only the start of a humongous region should be freed."); 55.18 - _g1h->free_humongous_region(hr, &_pre_used, &_free_list, 55.19 + _g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list, 55.20 &_humongous_proxy_set, false /* par */); 55.21 - // Do we also need to do this for the continues humongous regions 55.22 - // we just collapsed? 55.23 hr->prepare_for_compaction(&_cp); 55.24 // Also clear the part of the card table that will be unused after 55.25 // compaction. 55.26 _mrbs->clear(MemRegion(hr->compaction_top(), end)); 55.27 + dummy_free_list.remove_all(); 55.28 } 55.29 55.30 public: 55.31 @@ -208,8 +208,6 @@ 55.32 : _g1h(G1CollectedHeap::heap()), 55.33 _mrbs(G1CollectedHeap::heap()->mr_bs()), 55.34 _cp(NULL, cs, cs->initialize_threshold()), 55.35 - _pre_used(0), 55.36 - _free_list("Local Free List for G1MarkSweep"), 55.37 _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { } 55.38 55.39 void update_sets() { 55.40 @@ -219,7 +217,6 @@ 55.41 NULL, /* free_list */ 55.42 &_humongous_proxy_set, 55.43 false /* par */); 55.44 - _free_list.remove_all(); 55.45 } 55.46 55.47 bool doHeapRegion(HeapRegion* hr) {
56.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Mar 25 11:29:30 2011 -0700 56.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Mar 25 18:19:22 2011 -0400 56.3 @@ -86,28 +86,6 @@ 56.4 bool idempotent() { return true; } 56.5 }; 56.6 56.7 -class IntoCSRegionClosure: public HeapRegionClosure { 56.8 - IntoCSOopClosure _blk; 56.9 - G1CollectedHeap* _g1; 56.10 -public: 56.11 - IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : 56.12 - _g1(g1), _blk(g1, blk) {} 56.13 - bool doHeapRegion(HeapRegion* r) { 56.14 - if (!r->in_collection_set()) { 56.15 - _blk.set_region(r); 56.16 - if (r->isHumongous()) { 56.17 - if (r->startsHumongous()) { 56.18 - oop obj = oop(r->bottom()); 56.19 - obj->oop_iterate(&_blk); 56.20 - } 56.21 - } else { 56.22 - r->oop_before_save_marks_iterate(&_blk); 56.23 - } 56.24 - } 56.25 - return false; 56.26 - } 56.27 -}; 56.28 - 56.29 class VerifyRSCleanCardOopClosure: public OopClosure { 56.30 G1CollectedHeap* _g1; 56.31 public: 56.32 @@ -329,7 +307,7 @@ 56.33 // is during RSet updating within an evacuation pause. 56.34 // In this case worker_i should be the id of a GC worker thread. 56.35 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 56.36 - assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "should be a GC worker"); 56.37 + assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker"); 56.38 56.39 if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) { 56.40 // 'card_ptr' contains references that point into the collection
57.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Mar 25 11:29:30 2011 -0700 57.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Mar 25 18:19:22 2011 -0400 57.3 @@ -53,8 +53,8 @@ 57.4 class HeapRegionSetBase; 57.5 57.6 #define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" 57.7 -#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \ 57.8 - (__hr)->top(), (__hr)->end() 57.9 +#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \ 57.10 + (_hr_)->top(), (_hr_)->end() 57.11 57.12 // A dirty card to oop closure for heap regions. It 57.13 // knows how to get the G1 heap and how to use the bitmap 57.14 @@ -518,13 +518,13 @@ 57.15 containing_set, _containing_set)); 57.16 57.17 _containing_set = containing_set; 57.18 -} 57.19 + } 57.20 57.21 HeapRegionSetBase* containing_set() { return _containing_set; } 57.22 #else // ASSERT 57.23 void set_containing_set(HeapRegionSetBase* containing_set) { } 57.24 57.25 - // containing_set() is only used in asserts so there's not reason 57.26 + // containing_set() is only used in asserts so there's no reason 57.27 // to provide a dummy version of it. 57.28 #endif // ASSERT 57.29 57.30 @@ -535,14 +535,15 @@ 57.31 bool pending_removal() { return _pending_removal; } 57.32 57.33 void set_pending_removal(bool pending_removal) { 57.34 - // We can only set pending_removal to true, if it's false and the 57.35 - // region belongs to a set. 57.36 - assert(!pending_removal || 57.37 - (!_pending_removal && containing_set() != NULL), "pre-condition"); 57.38 - // We can only set pending_removal to false, if it's true and the 57.39 - // region does not belong to a set. 57.40 - assert( pending_removal || 57.41 - ( _pending_removal && containing_set() == NULL), "pre-condition"); 57.42 + if (pending_removal) { 57.43 + assert(!_pending_removal && containing_set() != NULL, 57.44 + "can only set pending removal to true if it's false and " 57.45 + "the region belongs to a region set"); 57.46 + } else { 57.47 + assert( _pending_removal && containing_set() == NULL, 57.48 + "can only set pending removal to false if it's true and " 57.49 + "the region does not belong to a region set"); 57.50 + } 57.51 57.52 _pending_removal = pending_removal; 57.53 }
58.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Fri Mar 25 11:29:30 2011 -0700 58.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Fri Mar 25 18:19:22 2011 -0400 58.3 @@ -165,7 +165,7 @@ 58.4 58.5 assert(num_so_far <= num, "post-condition"); 58.6 if (num_so_far == num) { 58.7 - // we find enough space for the humongous object 58.8 + // we found enough space for the humongous object 58.9 assert(from <= first && first < _regions.length(), "post-condition"); 58.10 assert(first < curr && (curr - first) == (int) num, "post-condition"); 58.11 for (int i = first; i < first + (int) num; ++i) {
59.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Fri Mar 25 11:29:30 2011 -0700 59.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Fri Mar 25 18:19:22 2011 -0400 59.3 @@ -76,7 +76,8 @@ 59.4 // that are available for allocation. 59.5 size_t free_suffix(); 59.6 59.7 - // Finds a contiguous set of empty regions of length num. 59.8 + // Find a contiguous set of empty regions of length num and return 59.9 + // the index of the first region or -1 if the search was unsuccessful. 59.10 int find_contiguous(size_t num); 59.11 59.12 // Apply the "doHeapRegion" method of "blk" to all regions in "this",
60.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Fri Mar 25 11:29:30 2011 -0700 60.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Fri Mar 25 18:19:22 2011 -0400 60.3 @@ -42,7 +42,7 @@ 60.4 return region_num; 60.5 } 60.6 60.7 -void HeapRegionSetBase::fill_in_ext_msg(hrl_ext_msg* msg, const char* message) { 60.8 +void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) { 60.9 msg->append("[%s] %s " 60.10 "ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" " 60.11 "cy: "SIZE_FORMAT" ud: "SIZE_FORMAT, 60.12 @@ -109,30 +109,30 @@ 60.13 // for the verification calls. If we do verification without the 60.14 // appropriate locks and the set changes underneath our feet 60.15 // verification might fail and send us on a wild goose chase. 60.16 - hrl_assert_mt_safety_ok(this); 60.17 + hrs_assert_mt_safety_ok(this); 60.18 60.19 guarantee(( is_empty() && length() == 0 && region_num() == 0 && 60.20 total_used_bytes() == 0 && total_capacity_bytes() == 0) || 60.21 (!is_empty() && length() >= 0 && region_num() >= 0 && 60.22 total_used_bytes() >= 0 && total_capacity_bytes() >= 0), 60.23 - hrl_ext_msg(this, "invariant")); 60.24 + hrs_ext_msg(this, "invariant")); 60.25 60.26 guarantee((!regions_humongous() && region_num() == length()) || 60.27 ( regions_humongous() && region_num() >= length()), 60.28 - hrl_ext_msg(this, "invariant")); 60.29 + hrs_ext_msg(this, "invariant")); 60.30 60.31 guarantee(!regions_empty() || total_used_bytes() == 0, 60.32 - hrl_ext_msg(this, "invariant")); 60.33 + hrs_ext_msg(this, "invariant")); 60.34 60.35 guarantee(total_used_bytes() <= total_capacity_bytes(), 60.36 - hrl_ext_msg(this, "invariant")); 60.37 + hrs_ext_msg(this, "invariant")); 60.38 } 60.39 60.40 void HeapRegionSetBase::verify_start() { 60.41 // See comment in verify() about MT safety and verification. 60.42 - hrl_assert_mt_safety_ok(this); 60.43 + hrs_assert_mt_safety_ok(this); 60.44 assert(!_verify_in_progress, 60.45 - hrl_ext_msg(this, "verification should not be in progress")); 60.46 + hrs_ext_msg(this, "verification should not be in progress")); 60.47 60.48 // Do the basic verification first before we do the checks over the regions. 60.49 HeapRegionSetBase::verify(); 60.50 @@ -146,11 +146,11 @@ 60.51 60.52 void HeapRegionSetBase::verify_next_region(HeapRegion* hr) { 60.53 // See comment in verify() about MT safety and verification. 60.54 - hrl_assert_mt_safety_ok(this); 60.55 + hrs_assert_mt_safety_ok(this); 60.56 assert(_verify_in_progress, 60.57 - hrl_ext_msg(this, "verification should be in progress")); 60.58 + hrs_ext_msg(this, "verification should be in progress")); 60.59 60.60 - guarantee(verify_region(hr, this), hrl_ext_msg(this, "region verification")); 60.61 + guarantee(verify_region(hr, this), hrs_ext_msg(this, "region verification")); 60.62 60.63 _calc_length += 1; 60.64 if (!hr->isHumongous()) { 60.65 @@ -164,28 +164,28 @@ 60.66 60.67 void HeapRegionSetBase::verify_end() { 60.68 // See comment in verify() about MT safety and verification. 60.69 - hrl_assert_mt_safety_ok(this); 60.70 + hrs_assert_mt_safety_ok(this); 60.71 assert(_verify_in_progress, 60.72 - hrl_ext_msg(this, "verification should be in progress")); 60.73 + hrs_ext_msg(this, "verification should be in progress")); 60.74 60.75 guarantee(length() == _calc_length, 60.76 - hrl_err_msg("[%s] length: "SIZE_FORMAT" should be == " 60.77 + hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == " 60.78 "calc length: "SIZE_FORMAT, 60.79 name(), length(), _calc_length)); 60.80 60.81 guarantee(region_num() == _calc_region_num, 60.82 - hrl_err_msg("[%s] region num: "SIZE_FORMAT" should be == " 60.83 + hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == " 60.84 "calc region num: "SIZE_FORMAT, 60.85 name(), region_num(), _calc_region_num)); 60.86 60.87 guarantee(total_capacity_bytes() == _calc_total_capacity_bytes, 60.88 - hrl_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == " 60.89 + hrs_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == " 60.90 "calc capacity bytes: "SIZE_FORMAT, 60.91 name(), 60.92 total_capacity_bytes(), _calc_total_capacity_bytes)); 60.93 60.94 guarantee(total_used_bytes() == _calc_total_used_bytes, 60.95 - hrl_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == " 60.96 + hrs_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == " 60.97 "calc used bytes: "SIZE_FORMAT, 60.98 name(), total_used_bytes(), _calc_total_used_bytes)); 60.99 60.100 @@ -221,9 +221,9 @@ 60.101 //////////////////// HeapRegionSet //////////////////// 60.102 60.103 void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) { 60.104 - hrl_assert_mt_safety_ok(this); 60.105 - hrl_assert_mt_safety_ok(proxy_set); 60.106 - hrl_assert_sets_match(this, proxy_set); 60.107 + hrs_assert_mt_safety_ok(this); 60.108 + hrs_assert_mt_safety_ok(proxy_set); 60.109 + hrs_assert_sets_match(this, proxy_set); 60.110 60.111 verify_optional(); 60.112 proxy_set->verify_optional(); 60.113 @@ -231,19 +231,19 @@ 60.114 if (proxy_set->is_empty()) return; 60.115 60.116 assert(proxy_set->length() <= _length, 60.117 - hrl_err_msg("[%s] proxy set length: "SIZE_FORMAT" " 60.118 + hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" " 60.119 "should be <= length: "SIZE_FORMAT, 60.120 name(), proxy_set->length(), _length)); 60.121 _length -= proxy_set->length(); 60.122 60.123 assert(proxy_set->region_num() <= _region_num, 60.124 - hrl_err_msg("[%s] proxy set region num: "SIZE_FORMAT" " 60.125 + hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" " 60.126 "should be <= region num: "SIZE_FORMAT, 60.127 name(), proxy_set->region_num(), _region_num)); 60.128 _region_num -= proxy_set->region_num(); 60.129 60.130 assert(proxy_set->total_used_bytes() <= _total_used_bytes, 60.131 - hrl_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" " 60.132 + hrs_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" " 60.133 "should be <= used bytes: "SIZE_FORMAT, 60.134 name(), proxy_set->total_used_bytes(), 60.135 _total_used_bytes)); 60.136 @@ -257,13 +257,13 @@ 60.137 60.138 //////////////////// HeapRegionLinkedList //////////////////// 60.139 60.140 -void HeapRegionLinkedList::fill_in_ext_msg_extra(hrl_ext_msg* msg) { 60.141 +void HeapRegionLinkedList::fill_in_ext_msg_extra(hrs_ext_msg* msg) { 60.142 msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail()); 60.143 } 60.144 60.145 void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) { 60.146 - hrl_assert_mt_safety_ok(this); 60.147 - hrl_assert_mt_safety_ok(from_list); 60.148 + hrs_assert_mt_safety_ok(this); 60.149 + hrs_assert_mt_safety_ok(from_list); 60.150 60.151 verify_optional(); 60.152 from_list->verify_optional(); 60.153 @@ -283,10 +283,10 @@ 60.154 #endif // ASSERT 60.155 60.156 if (_tail != NULL) { 60.157 - assert(length() > 0 && _head != NULL, hrl_ext_msg(this, "invariant")); 60.158 + assert(length() > 0 && _head != NULL, hrs_ext_msg(this, "invariant")); 60.159 _tail->set_next(from_list->_head); 60.160 } else { 60.161 - assert(length() == 0 && _head == NULL, hrl_ext_msg(this, "invariant")); 60.162 + assert(length() == 0 && _head == NULL, hrs_ext_msg(this, "invariant")); 60.163 _head = from_list->_head; 60.164 } 60.165 _tail = from_list->_tail; 60.166 @@ -301,12 +301,12 @@ 60.167 } 60.168 60.169 void HeapRegionLinkedList::remove_all() { 60.170 - hrl_assert_mt_safety_ok(this); 60.171 + hrs_assert_mt_safety_ok(this); 60.172 verify_optional(); 60.173 60.174 HeapRegion* curr = _head; 60.175 while (curr != NULL) { 60.176 - hrl_assert_region_ok(this, curr, this); 60.177 + hrs_assert_region_ok(this, curr, this); 60.178 60.179 HeapRegion* next = curr->next(); 60.180 curr->set_next(NULL); 60.181 @@ -319,9 +319,9 @@ 60.182 } 60.183 60.184 void HeapRegionLinkedList::remove_all_pending(size_t target_count) { 60.185 - hrl_assert_mt_safety_ok(this); 60.186 - assert(target_count > 1, hrl_ext_msg(this, "pre-condition")); 60.187 - assert(!is_empty(), hrl_ext_msg(this, "pre-condition")); 60.188 + hrs_assert_mt_safety_ok(this); 60.189 + assert(target_count > 1, hrs_ext_msg(this, "pre-condition")); 60.190 + assert(!is_empty(), hrs_ext_msg(this, "pre-condition")); 60.191 60.192 verify_optional(); 60.193 DEBUG_ONLY(size_t old_length = length();) 60.194 @@ -330,27 +330,27 @@ 60.195 HeapRegion* prev = NULL; 60.196 size_t count = 0; 60.197 while (curr != NULL) { 60.198 - hrl_assert_region_ok(this, curr, this); 60.199 + hrs_assert_region_ok(this, curr, this); 60.200 HeapRegion* next = curr->next(); 60.201 60.202 if (curr->pending_removal()) { 60.203 assert(count < target_count, 60.204 - hrl_err_msg("[%s] should not come across more regions " 60.205 + hrs_err_msg("[%s] should not come across more regions " 60.206 "pending for removal than target_count: "SIZE_FORMAT, 60.207 name(), target_count)); 60.208 60.209 if (prev == NULL) { 60.210 - assert(_head == curr, hrl_ext_msg(this, "invariant")); 60.211 + assert(_head == curr, hrs_ext_msg(this, "invariant")); 60.212 _head = next; 60.213 } else { 60.214 - assert(_head != curr, hrl_ext_msg(this, "invariant")); 60.215 + assert(_head != curr, hrs_ext_msg(this, "invariant")); 60.216 prev->set_next(next); 60.217 } 60.218 if (next == NULL) { 60.219 - assert(_tail == curr, hrl_ext_msg(this, "invariant")); 60.220 + assert(_tail == curr, hrs_ext_msg(this, "invariant")); 60.221 _tail = prev; 60.222 } else { 60.223 - assert(_tail != curr, hrl_ext_msg(this, "invariant")); 60.224 + assert(_tail != curr, hrs_ext_msg(this, "invariant")); 60.225 } 60.226 60.227 curr->set_next(NULL); 60.228 @@ -371,10 +371,10 @@ 60.229 } 60.230 60.231 assert(count == target_count, 60.232 - hrl_err_msg("[%s] count: "SIZE_FORMAT" should be == " 60.233 + hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == " 60.234 "target_count: "SIZE_FORMAT, name(), count, target_count)); 60.235 assert(length() + target_count == old_length, 60.236 - hrl_err_msg("[%s] new length should be consistent " 60.237 + hrs_err_msg("[%s] new length should be consistent " 60.238 "new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" " 60.239 "target_count: "SIZE_FORMAT, 60.240 name(), length(), old_length, target_count)); 60.241 @@ -385,7 +385,7 @@ 60.242 void HeapRegionLinkedList::verify() { 60.243 // See comment in HeapRegionSetBase::verify() about MT safety and 60.244 // verification. 60.245 - hrl_assert_mt_safety_ok(this); 60.246 + hrs_assert_mt_safety_ok(this); 60.247 60.248 // This will also do the basic verification too. 60.249 verify_start(); 60.250 @@ -399,7 +399,7 @@ 60.251 60.252 count += 1; 60.253 guarantee(count < _unrealistically_long_length, 60.254 - hrl_err_msg("[%s] the calculated length: "SIZE_FORMAT" " 60.255 + hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" " 60.256 "seems very long, is there maybe a cycle? " 60.257 "curr: "PTR_FORMAT" prev0: "PTR_FORMAT" " 60.258 "prev1: "PTR_FORMAT" length: "SIZE_FORMAT, 60.259 @@ -410,7 +410,7 @@ 60.260 curr = curr->next(); 60.261 } 60.262 60.263 - guarantee(_tail == prev0, hrl_ext_msg(this, "post-condition")); 60.264 + guarantee(_tail == prev0, hrs_ext_msg(this, "post-condition")); 60.265 60.266 verify_end(); 60.267 }
61.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Fri Mar 25 11:29:30 2011 -0700 61.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Fri Mar 25 18:19:22 2011 -0400 61.3 @@ -28,8 +28,8 @@ 61.4 #include "gc_implementation/g1/heapRegion.hpp" 61.5 61.6 // Large buffer for some cases where the output might be larger than normal. 61.7 -#define HRL_ERR_MSG_BUFSZ 512 61.8 -typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg; 61.9 +#define HRS_ERR_MSG_BUFSZ 512 61.10 +typedef FormatBuffer<HRS_ERR_MSG_BUFSZ> hrs_err_msg; 61.11 61.12 // Set verification will be forced either if someone defines 61.13 // HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which 61.14 @@ -45,10 +45,10 @@ 61.15 // (e.g., length, region num, used bytes sum) plus any shared 61.16 // functionality (e.g., verification). 61.17 61.18 -class hrl_ext_msg; 61.19 +class hrs_ext_msg; 61.20 61.21 class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC { 61.22 - friend class hrl_ext_msg; 61.23 + friend class hrs_ext_msg; 61.24 61.25 protected: 61.26 static size_t calculate_region_num(HeapRegion* hr); 61.27 @@ -104,10 +104,10 @@ 61.28 virtual bool check_mt_safety() { return true; } 61.29 61.30 // fill_in_ext_msg() writes the the values of the set's attributes 61.31 - // in the custom err_msg (hrl_ext_msg). fill_in_ext_msg_extra() 61.32 + // in the custom err_msg (hrs_ext_msg). fill_in_ext_msg_extra() 61.33 // allows subclasses to append further information. 61.34 - virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg) { } 61.35 - void fill_in_ext_msg(hrl_ext_msg* msg, const char* message); 61.36 + virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { } 61.37 + void fill_in_ext_msg(hrs_ext_msg* msg, const char* message); 61.38 61.39 // It updates the fields of the set to reflect hr being added to 61.40 // the set. 61.41 @@ -170,9 +170,9 @@ 61.42 // the fields of the associated set. This can be very helpful in 61.43 // diagnosing failures. 61.44 61.45 -class hrl_ext_msg : public hrl_err_msg { 61.46 +class hrs_ext_msg : public hrs_err_msg { 61.47 public: 61.48 - hrl_ext_msg(HeapRegionSetBase* set, const char* message) : hrl_err_msg("") { 61.49 + hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("") { 61.50 set->fill_in_ext_msg(this, message); 61.51 } 61.52 }; 61.53 @@ -180,25 +180,25 @@ 61.54 // These two macros are provided for convenience, to keep the uses of 61.55 // these two asserts a bit more concise. 61.56 61.57 -#define hrl_assert_mt_safety_ok(_set_) \ 61.58 +#define hrs_assert_mt_safety_ok(_set_) \ 61.59 do { \ 61.60 - assert((_set_)->check_mt_safety(), hrl_ext_msg((_set_), "MT safety")); \ 61.61 + assert((_set_)->check_mt_safety(), hrs_ext_msg((_set_), "MT safety")); \ 61.62 } while (0) 61.63 61.64 -#define hrl_assert_region_ok(_set_, _hr_, _expected_) \ 61.65 +#define hrs_assert_region_ok(_set_, _hr_, _expected_) \ 61.66 do { \ 61.67 assert((_set_)->verify_region((_hr_), (_expected_)), \ 61.68 - hrl_ext_msg((_set_), "region verification")); \ 61.69 + hrs_ext_msg((_set_), "region verification")); \ 61.70 } while (0) 61.71 61.72 //////////////////// HeapRegionSet //////////////////// 61.73 61.74 -#define hrl_assert_sets_match(_set1_, _set2_) \ 61.75 +#define hrs_assert_sets_match(_set1_, _set2_) \ 61.76 do { \ 61.77 assert(((_set1_)->regions_humongous() == \ 61.78 (_set2_)->regions_humongous()) && \ 61.79 ((_set1_)->regions_empty() == (_set2_)->regions_empty()), \ 61.80 - hrl_err_msg("the contents of set %s and set %s should match", \ 61.81 + hrs_err_msg("the contents of set %s and set %s should match", \ 61.82 (_set1_)->name(), (_set2_)->name())); \ 61.83 } while (0) 61.84 61.85 @@ -267,7 +267,7 @@ 61.86 HeapRegion* tail() { return _tail; } 61.87 61.88 protected: 61.89 - virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg); 61.90 + virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg); 61.91 61.92 // See the comment for HeapRegionSetBase::clear() 61.93 virtual void clear(); 61.94 @@ -309,10 +309,10 @@ 61.95 virtual void print_on(outputStream* out, bool print_contents = false); 61.96 }; 61.97 61.98 -//////////////////// HeapRegionLinkedList //////////////////// 61.99 +//////////////////// HeapRegionLinkedListIterator //////////////////// 61.100 61.101 -// Iterator class that provides a convenient way to iterator over the 61.102 -// regions in a HeapRegionLinkedList instance. 61.103 +// Iterator class that provides a convenient way to iterate over the 61.104 +// regions of a HeapRegionLinkedList instance. 61.105 61.106 class HeapRegionLinkedListIterator : public StackObj { 61.107 private:
62.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Fri Mar 25 11:29:30 2011 -0700 62.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Fri Mar 25 18:19:22 2011 -0400 62.3 @@ -42,8 +42,8 @@ 62.4 } 62.5 62.6 inline void HeapRegionSetBase::add_internal(HeapRegion* hr) { 62.7 - hrl_assert_region_ok(this, hr, NULL); 62.8 - assert(hr->next() == NULL, hrl_ext_msg(this, "should not already be linked")); 62.9 + hrs_assert_region_ok(this, hr, NULL); 62.10 + assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked")); 62.11 62.12 update_for_addition(hr); 62.13 hr->set_containing_set(this); 62.14 @@ -51,7 +51,7 @@ 62.15 62.16 inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) { 62.17 // Assumes the caller has already verified the region. 62.18 - assert(_length > 0, hrl_ext_msg(this, "pre-condition")); 62.19 + assert(_length > 0, hrs_ext_msg(this, "pre-condition")); 62.20 _length -= 1; 62.21 62.22 size_t region_num_diff; 62.23 @@ -61,22 +61,22 @@ 62.24 region_num_diff = calculate_region_num(hr); 62.25 } 62.26 assert(region_num_diff <= _region_num, 62.27 - hrl_err_msg("[%s] region's region num: "SIZE_FORMAT" " 62.28 + hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" " 62.29 "should be <= region num: "SIZE_FORMAT, 62.30 name(), region_num_diff, _region_num)); 62.31 _region_num -= region_num_diff; 62.32 62.33 size_t used_bytes = hr->used(); 62.34 assert(used_bytes <= _total_used_bytes, 62.35 - hrl_err_msg("[%s] region's used bytes: "SIZE_FORMAT" " 62.36 + hrs_err_msg("[%s] region's used bytes: "SIZE_FORMAT" " 62.37 "should be <= used bytes: "SIZE_FORMAT, 62.38 name(), used_bytes, _total_used_bytes)); 62.39 _total_used_bytes -= used_bytes; 62.40 } 62.41 62.42 inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) { 62.43 - hrl_assert_region_ok(this, hr, this); 62.44 - assert(hr->next() == NULL, hrl_ext_msg(this, "should already be unlinked")); 62.45 + hrs_assert_region_ok(this, hr, this); 62.46 + assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked")); 62.47 62.48 hr->set_containing_set(NULL); 62.49 update_for_removal(hr); 62.50 @@ -85,13 +85,13 @@ 62.51 //////////////////// HeapRegionSet //////////////////// 62.52 62.53 inline void HeapRegionSet::add(HeapRegion* hr) { 62.54 - hrl_assert_mt_safety_ok(this); 62.55 + hrs_assert_mt_safety_ok(this); 62.56 // add_internal() will verify the region. 62.57 add_internal(hr); 62.58 } 62.59 62.60 inline void HeapRegionSet::remove(HeapRegion* hr) { 62.61 - hrl_assert_mt_safety_ok(this); 62.62 + hrs_assert_mt_safety_ok(this); 62.63 // remove_internal() will verify the region. 62.64 remove_internal(hr); 62.65 } 62.66 @@ -101,8 +101,8 @@ 62.67 // No need to fo the MT safety check here given that this method 62.68 // does not update the contents of the set but instead accumulates 62.69 // the changes in proxy_set which is assumed to be thread-local. 62.70 - hrl_assert_sets_match(this, proxy_set); 62.71 - hrl_assert_region_ok(this, hr, this); 62.72 + hrs_assert_sets_match(this, proxy_set); 62.73 + hrs_assert_region_ok(this, hr, this); 62.74 62.75 hr->set_containing_set(NULL); 62.76 proxy_set->update_for_addition(hr); 62.77 @@ -111,10 +111,10 @@ 62.78 //////////////////// HeapRegionLinkedList //////////////////// 62.79 62.80 inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) { 62.81 - hrl_assert_mt_safety_ok(this); 62.82 + hrs_assert_mt_safety_ok(this); 62.83 assert((length() == 0 && _head == NULL && _tail == NULL) || 62.84 (length() > 0 && _head != NULL && _tail != NULL), 62.85 - hrl_ext_msg(this, "invariant")); 62.86 + hrs_ext_msg(this, "invariant")); 62.87 // add_internal() will verify the region. 62.88 add_internal(hr); 62.89 62.90 @@ -128,10 +128,10 @@ 62.91 } 62.92 62.93 inline HeapRegion* HeapRegionLinkedList::remove_head() { 62.94 - hrl_assert_mt_safety_ok(this); 62.95 - assert(!is_empty(), hrl_ext_msg(this, "the list should not be empty")); 62.96 + hrs_assert_mt_safety_ok(this); 62.97 + assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty")); 62.98 assert(length() > 0 && _head != NULL && _tail != NULL, 62.99 - hrl_ext_msg(this, "invariant")); 62.100 + hrs_ext_msg(this, "invariant")); 62.101 62.102 // We need to unlink it first. 62.103 HeapRegion* hr = _head; 62.104 @@ -147,7 +147,7 @@ 62.105 } 62.106 62.107 inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() { 62.108 - hrl_assert_mt_safety_ok(this); 62.109 + hrs_assert_mt_safety_ok(this); 62.110 62.111 if (!is_empty()) { 62.112 return remove_head();
63.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSets.cpp Fri Mar 25 11:29:30 2011 -0700 63.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp Fri Mar 25 18:19:22 2011 -0400 63.3 @@ -52,7 +52,7 @@ 63.4 FreeList_lock->owned_by_self())) || 63.5 (!SafepointSynchronize::is_at_safepoint() && 63.6 Heap_lock->owned_by_self()), 63.7 - hrl_ext_msg(this, "master free list MT safety protocol")); 63.8 + hrs_ext_msg(this, "master free list MT safety protocol")); 63.9 63.10 return FreeRegionList::check_mt_safety(); 63.11 } 63.12 @@ -65,7 +65,7 @@ 63.13 // while holding the SecondaryFreeList_lock. 63.14 63.15 guarantee(SecondaryFreeList_lock->owned_by_self(), 63.16 - hrl_ext_msg(this, "secondary free list MT safety protocol")); 63.17 + hrs_ext_msg(this, "secondary free list MT safety protocol")); 63.18 63.19 return FreeRegionList::check_mt_safety(); 63.20 } 63.21 @@ -81,7 +81,7 @@ 63.22 return HeapRegionSet::verify_region_extra(hr); 63.23 } 63.24 63.25 -//////////////////// HumongousRegionSet //////////////////// 63.26 +//////////////////// MasterHumongousRegionSet //////////////////// 63.27 63.28 bool MasterHumongousRegionSet::check_mt_safety() { 63.29 // Master Humongous Set MT safety protocol: 63.30 @@ -97,6 +97,6 @@ 63.31 OldSets_lock->owned_by_self())) || 63.32 (!SafepointSynchronize::is_at_safepoint() && 63.33 Heap_lock->owned_by_self()), 63.34 - hrl_ext_msg(this, "master humongous set MT safety protocol")); 63.35 + hrs_ext_msg(this, "master humongous set MT safety protocol")); 63.36 return HumongousRegionSet::check_mt_safety(); 63.37 }
64.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Mar 25 11:29:30 2011 -0700 64.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Mar 25 18:19:22 2011 -0400 64.3 @@ -1,5 +1,5 @@ 64.4 /* 64.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 64.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 64.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 64.8 * 64.9 * This code is free software; you can redistribute it and/or modify it 64.10 @@ -1530,13 +1530,15 @@ 64.11 { 64.12 if (_ref_processor == NULL) { 64.13 // Allocate and initialize a reference processor 64.14 - _ref_processor = ReferenceProcessor::create_ref_processor( 64.15 - _reserved, // span 64.16 - refs_discovery_is_atomic(), // atomic_discovery 64.17 - refs_discovery_is_mt(), // mt_discovery 64.18 - NULL, // is_alive_non_header 64.19 - ParallelGCThreads, 64.20 - ParallelRefProcEnabled); 64.21 + _ref_processor = 64.22 + new ReferenceProcessor(_reserved, // span 64.23 + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 64.24 + (int) ParallelGCThreads, // mt processing degree 64.25 + refs_discovery_is_mt(), // mt discovery 64.26 + (int) ParallelGCThreads, // mt discovery degree 64.27 + refs_discovery_is_atomic(), // atomic_discovery 64.28 + NULL, // is_alive_non_header 64.29 + false); // write barrier for next field updates 64.30 } 64.31 } 64.32
65.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Fri Mar 25 11:29:30 2011 -0700 65.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Fri Mar 25 18:19:22 2011 -0400 65.3 @@ -1,5 +1,5 @@ 65.4 /* 65.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 65.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 65.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 65.8 * 65.9 * This code is free software; you can redistribute it and/or modify it 65.10 @@ -58,9 +58,7 @@ 65.11 65.12 void PSMarkSweep::initialize() { 65.13 MemRegion mr = Universe::heap()->reserved_region(); 65.14 - _ref_processor = new ReferenceProcessor(mr, 65.15 - true, // atomic_discovery 65.16 - false); // mt_discovery 65.17 + _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc 65.18 _counters = new CollectorCounters("PSMarkSweep", 1); 65.19 } 65.20
66.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Mar 25 11:29:30 2011 -0700 66.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Mar 25 18:19:22 2011 -0400 66.3 @@ -1,5 +1,5 @@ 66.4 /* 66.5 - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 66.6 + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. 66.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 66.8 * 66.9 * This code is free software; you can redistribute it and/or modify it 66.10 @@ -827,13 +827,15 @@ 66.11 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 66.12 66.13 MemRegion mr = heap->reserved_region(); 66.14 - _ref_processor = ReferenceProcessor::create_ref_processor( 66.15 - mr, // span 66.16 - true, // atomic_discovery 66.17 - true, // mt_discovery 66.18 - &_is_alive_closure, 66.19 - ParallelGCThreads, 66.20 - ParallelRefProcEnabled); 66.21 + _ref_processor = 66.22 + new ReferenceProcessor(mr, // span 66.23 + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 66.24 + (int) ParallelGCThreads, // mt processing degree 66.25 + true, // mt discovery 66.26 + (int) ParallelGCThreads, // mt discovery degree 66.27 + true, // atomic_discovery 66.28 + &_is_alive_closure, // non-header is alive closure 66.29 + false); // write barrier for next field updates 66.30 _counters = new CollectorCounters("PSParallelCompact", 1); 66.31 66.32 // Initialize static fields in ParCompactionManager.
67.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Fri Mar 25 11:29:30 2011 -0700 67.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Fri Mar 25 18:19:22 2011 -0400 67.3 @@ -411,7 +411,7 @@ 67.4 template <class T> void PSPromotionManager::process_array_chunk_work( 67.5 oop obj, 67.6 int start, int end) { 67.7 - assert(start < end, "invariant"); 67.8 + assert(start <= end, "invariant"); 67.9 T* const base = (T*)objArrayOop(obj)->base(); 67.10 T* p = base + start; 67.11 T* const chunk_end = base + end;
68.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Mar 25 11:29:30 2011 -0700 68.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Mar 25 18:19:22 2011 -0400 68.3 @@ -1,5 +1,5 @@ 68.4 /* 68.5 - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 68.6 + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. 68.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 68.8 * 68.9 * This code is free software; you can redistribute it and/or modify it 68.10 @@ -23,6 +23,7 @@ 68.11 */ 68.12 68.13 #include "precompiled.hpp" 68.14 +#include "classfile/symbolTable.hpp" 68.15 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 68.16 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 68.17 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 68.18 @@ -439,6 +440,14 @@ 68.19 reference_processor()->enqueue_discovered_references(NULL); 68.20 } 68.21 68.22 + if (!JavaObjectsInPerm) { 68.23 + // Unlink any dead interned Strings 68.24 + StringTable::unlink(&_is_alive_closure); 68.25 + // Process the remaining live ones 68.26 + PSScavengeRootsClosure root_closure(promotion_manager); 68.27 + StringTable::oops_do(&root_closure); 68.28 + } 68.29 + 68.30 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 68.31 PSPromotionManager::post_scavenge(); 68.32 68.33 @@ -796,13 +805,15 @@ 68.34 68.35 // Initialize ref handling object for scavenging. 68.36 MemRegion mr = young_gen->reserved(); 68.37 - _ref_processor = ReferenceProcessor::create_ref_processor( 68.38 - mr, // span 68.39 - true, // atomic_discovery 68.40 - true, // mt_discovery 68.41 - NULL, // is_alive_non_header 68.42 - ParallelGCThreads, 68.43 - ParallelRefProcEnabled); 68.44 + _ref_processor = 68.45 + new ReferenceProcessor(mr, // span 68.46 + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 68.47 + (int) ParallelGCThreads, // mt processing degree 68.48 + true, // mt discovery 68.49 + (int) ParallelGCThreads, // mt discovery degree 68.50 + true, // atomic_discovery 68.51 + NULL, // header provides liveness info 68.52 + false); // next field updates do not need write barrier 68.53 68.54 // Cache the cardtable 68.55 BarrierSet* bs = Universe::heap()->barrier_set();
69.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Fri Mar 25 11:29:30 2011 -0700 69.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Fri Mar 25 18:19:22 2011 -0400 69.3 @@ -86,4 +86,21 @@ 69.4 } 69.5 } 69.6 69.7 +class PSScavengeRootsClosure: public OopClosure { 69.8 + private: 69.9 + PSPromotionManager* _promotion_manager; 69.10 + 69.11 + protected: 69.12 + template <class T> void do_oop_work(T *p) { 69.13 + if (PSScavenge::should_scavenge(p)) { 69.14 + // We never card mark roots, maybe call a func without test? 69.15 + PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); 69.16 + } 69.17 + } 69.18 + public: 69.19 + PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { } 69.20 + void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); } 69.21 + void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); } 69.22 +}; 69.23 + 69.24 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSSCAVENGE_INLINE_HPP
70.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Fri Mar 25 11:29:30 2011 -0700 70.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Fri Mar 25 18:19:22 2011 -0400 70.3 @@ -30,7 +30,7 @@ 70.4 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 70.5 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" 70.6 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 70.7 -#include "gc_implementation/parallelScavenge/psScavenge.hpp" 70.8 +#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 70.9 #include "gc_implementation/parallelScavenge/psTasks.hpp" 70.10 #include "memory/iterator.hpp" 70.11 #include "memory/universe.hpp" 70.12 @@ -46,24 +46,6 @@ 70.13 // ScavengeRootsTask 70.14 // 70.15 70.16 -// Define before use 70.17 -class PSScavengeRootsClosure: public OopClosure { 70.18 - private: 70.19 - PSPromotionManager* _promotion_manager; 70.20 - 70.21 - protected: 70.22 - template <class T> void do_oop_work(T *p) { 70.23 - if (PSScavenge::should_scavenge(p)) { 70.24 - // We never card mark roots, maybe call a func without test? 70.25 - PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); 70.26 - } 70.27 - } 70.28 - public: 70.29 - PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { } 70.30 - void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); } 70.31 - void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); } 70.32 -}; 70.33 - 70.34 void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) { 70.35 assert(Universe::heap()->is_gc_active(), "called outside gc"); 70.36
71.1 --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Fri Mar 25 11:29:30 2011 -0700 71.2 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Fri Mar 25 18:19:22 2011 -0400 71.3 @@ -656,7 +656,7 @@ 71.4 // oop rcvr = locals[0].j.r; 71.5 oop rcvr; 71.6 if (METHOD->is_static()) { 71.7 - rcvr = METHOD->constants()->pool_holder()->klass_part()->java_mirror(); 71.8 + rcvr = METHOD->constants()->pool_holder()->java_mirror(); 71.9 } else { 71.10 rcvr = LOCALS_OBJECT(0); 71.11 VERIFY_OOP(rcvr); 71.12 @@ -2111,8 +2111,8 @@ 71.13 break; 71.14 71.15 case JVM_CONSTANT_Class: 71.16 - VERIFY_OOP(constants->resolved_klass_at(index)->klass_part()->java_mirror()); 71.17 - SET_STACK_OBJECT(constants->resolved_klass_at(index)->klass_part()->java_mirror(), 0); 71.18 + VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 71.19 + SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 71.20 break; 71.21 71.22 case JVM_CONSTANT_UnresolvedString:
72.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp Fri Mar 25 11:29:30 2011 -0700 72.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Fri Mar 25 18:19:22 2011 -0400 72.3 @@ -118,7 +118,7 @@ 72.4 72.5 if (tag.is_unresolved_klass() || tag.is_klass()) { 72.6 klassOop klass = pool->klass_at(index, CHECK); 72.7 - oop java_class = klass->klass_part()->java_mirror(); 72.8 + oop java_class = klass->java_mirror(); 72.9 thread->set_vm_result(java_class); 72.10 } else { 72.11 #ifdef ASSERT 72.12 @@ -983,7 +983,8 @@ 72.13 ConstantPoolCacheEntry *cp_entry)) 72.14 72.15 // check the access_flags for the field in the klass 72.16 - instanceKlass* ik = instanceKlass::cast((klassOop)cp_entry->f1()); 72.17 + 72.18 + instanceKlass* ik = instanceKlass::cast(java_lang_Class::as_klassOop(cp_entry->f1())); 72.19 typeArrayOop fields = ik->fields(); 72.20 int index = cp_entry->field_index(); 72.21 assert(index < fields->length(), "holders field index is out of range"); 72.22 @@ -1009,7 +1010,7 @@ 72.23 // non-static field accessors have an object, but we need a handle 72.24 h_obj = Handle(thread, obj); 72.25 } 72.26 - instanceKlassHandle h_cp_entry_f1(thread, (klassOop)cp_entry->f1()); 72.27 + instanceKlassHandle h_cp_entry_f1(thread, java_lang_Class::as_klassOop(cp_entry->f1())); 72.28 jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2(), is_static); 72.29 JvmtiExport::post_field_access(thread, method(thread), bcp(thread), h_cp_entry_f1, h_obj, fid); 72.30 IRT_END 72.31 @@ -1017,7 +1018,7 @@ 72.32 IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread, 72.33 oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value)) 72.34 72.35 - klassOop k = (klassOop)cp_entry->f1(); 72.36 + klassOop k = java_lang_Class::as_klassOop(cp_entry->f1()); 72.37 72.38 // check the access_flags for the field in the klass 72.39 instanceKlass* ik = instanceKlass::cast(k);
73.1 --- a/src/share/vm/memory/collectorPolicy.cpp Fri Mar 25 11:29:30 2011 -0700 73.2 +++ b/src/share/vm/memory/collectorPolicy.cpp Fri Mar 25 18:19:22 2011 -0400 73.3 @@ -1,5 +1,5 @@ 73.4 /* 73.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 73.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 73.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 73.8 * 73.9 * This code is free software; you can redistribute it and/or modify it 73.10 @@ -293,10 +293,11 @@ 73.11 // Determine maximum size of gen0 73.12 73.13 size_t max_new_size = 0; 73.14 - if (FLAG_IS_CMDLINE(MaxNewSize)) { 73.15 + if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { 73.16 if (MaxNewSize < min_alignment()) { 73.17 max_new_size = min_alignment(); 73.18 - } else if (MaxNewSize >= max_heap_byte_size()) { 73.19 + } 73.20 + if (MaxNewSize >= max_heap_byte_size()) { 73.21 max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), 73.22 min_alignment()); 73.23 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " 73.24 @@ -333,7 +334,7 @@ 73.25 assert(max_new_size > 0, "All paths should set max_new_size"); 73.26 73.27 // Given the maximum gen0 size, determine the initial and 73.28 - // minimum sizes. 73.29 + // minimum gen0 sizes. 73.30 73.31 if (max_heap_byte_size() == min_heap_byte_size()) { 73.32 // The maximum and minimum heap sizes are the same so 73.33 @@ -396,7 +397,7 @@ 73.34 } 73.35 73.36 if (PrintGCDetails && Verbose) { 73.37 - gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 " 73.38 + gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 73.39 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 73.40 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 73.41 } 73.42 @@ -448,7 +449,7 @@ 73.43 // At this point the minimum, initial and maximum sizes 73.44 // of the overall heap and of gen0 have been determined. 73.45 // The maximum gen1 size can be determined from the maximum gen0 73.46 - // and maximum heap size since not explicit flags exits 73.47 + // and maximum heap size since no explicit flags exits 73.48 // for setting the gen1 maximum. 73.49 _max_gen1_size = max_heap_byte_size() - _max_gen0_size; 73.50 _max_gen1_size = 73.51 @@ -494,13 +495,13 @@ 73.52 "generation sizes: using maximum heap = " SIZE_FORMAT 73.53 " -XX:OldSize flag is being ignored", 73.54 max_heap_byte_size()); 73.55 - } 73.56 + } 73.57 // If there is an inconsistency between the OldSize and the minimum and/or 73.58 // initial size of gen0, since OldSize was explicitly set, OldSize wins. 73.59 if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, 73.60 min_heap_byte_size(), OldSize)) { 73.61 if (PrintGCDetails && Verbose) { 73.62 - gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 " 73.63 + gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 73.64 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 73.65 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 73.66 } 73.67 @@ -509,7 +510,7 @@ 73.68 if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, 73.69 initial_heap_byte_size(), OldSize)) { 73.70 if (PrintGCDetails && Verbose) { 73.71 - gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 " 73.72 + gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " 73.73 SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, 73.74 min_gen0_size(), initial_gen0_size(), max_gen0_size()); 73.75 }
74.1 --- a/src/share/vm/memory/compactingPermGenGen.hpp Fri Mar 25 11:29:30 2011 -0700 74.2 +++ b/src/share/vm/memory/compactingPermGenGen.hpp Fri Mar 25 18:19:22 2011 -0400 74.3 @@ -1,5 +1,5 @@ 74.4 /* 74.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 74.6 + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 74.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 74.8 * 74.9 * This code is free software; you can redistribute it and/or modify it 74.10 @@ -105,7 +105,7 @@ 74.11 public: 74.12 74.13 enum { 74.14 - vtbl_list_size = 16, // number of entries in the shared space vtable list. 74.15 + vtbl_list_size = 17, // number of entries in the shared space vtable list. 74.16 num_virtuals = 200 // number of virtual methods in Klass (or 74.17 // subclass) objects, or greater. 74.18 };
75.1 --- a/src/share/vm/memory/dump.cpp Fri Mar 25 11:29:30 2011 -0700 75.2 +++ b/src/share/vm/memory/dump.cpp Fri Mar 25 18:19:22 2011 -0400 75.3 @@ -1561,6 +1561,7 @@ 75.4 // thread because it requires object allocation. 75.5 LinkClassesClosure lcc(Thread::current()); 75.6 object_iterate(&lcc); 75.7 + ensure_parsability(false); // arg is actually don't care 75.8 tty->print_cr("done. "); 75.9 75.10 // Create and dump the shared spaces.
76.1 --- a/src/share/vm/memory/generation.cpp Fri Mar 25 11:29:30 2011 -0700 76.2 +++ b/src/share/vm/memory/generation.cpp Fri Mar 25 18:19:22 2011 -0400 76.3 @@ -1,5 +1,5 @@ 76.4 /* 76.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 76.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 76.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 76.8 * 76.9 * This code is free software; you can redistribute it and/or modify it 76.10 @@ -83,14 +83,11 @@ 76.11 } 76.12 76.13 // By default we get a single threaded default reference processor; 76.14 -// generations needing multi-threaded refs discovery override this method. 76.15 +// generations needing multi-threaded refs processing or discovery override this method. 76.16 void Generation::ref_processor_init() { 76.17 assert(_ref_processor == NULL, "a reference processor already exists"); 76.18 assert(!_reserved.is_empty(), "empty generation?"); 76.19 - _ref_processor = 76.20 - new ReferenceProcessor(_reserved, // span 76.21 - refs_discovery_is_atomic(), // atomic_discovery 76.22 - refs_discovery_is_mt()); // mt_discovery 76.23 + _ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor 76.24 if (_ref_processor == NULL) { 76.25 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); 76.26 }
77.1 --- a/src/share/vm/memory/oopFactory.cpp Fri Mar 25 11:29:30 2011 -0700 77.2 +++ b/src/share/vm/memory/oopFactory.cpp Fri Mar 25 18:19:22 2011 -0400 77.3 @@ -1,5 +1,5 @@ 77.4 /* 77.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 77.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 77.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 77.8 * 77.9 * This code is free software; you can redistribute it and/or modify it 77.10 @@ -117,12 +117,12 @@ 77.11 } 77.12 77.13 77.14 -klassOop oopFactory::new_instanceKlass(int vtable_len, int itable_len, 77.15 +klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_len, 77.16 int static_field_size, 77.17 unsigned int nonstatic_oop_map_count, 77.18 ReferenceType rt, TRAPS) { 77.19 instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj()); 77.20 - return ikk->allocate_instance_klass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL); 77.21 + return ikk->allocate_instance_klass(name, vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL); 77.22 } 77.23 77.24
78.1 --- a/src/share/vm/memory/oopFactory.hpp Fri Mar 25 11:29:30 2011 -0700 78.2 +++ b/src/share/vm/memory/oopFactory.hpp Fri Mar 25 18:19:22 2011 -0400 78.3 @@ -1,5 +1,5 @@ 78.4 /* 78.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 78.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 78.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 78.8 * 78.9 * This code is free software; you can redistribute it and/or modify it 78.10 @@ -72,7 +72,8 @@ 78.11 TRAPS); 78.12 78.13 // Instance classes 78.14 - static klassOop new_instanceKlass(int vtable_len, int itable_len, 78.15 + static klassOop new_instanceKlass(Symbol* name, 78.16 + int vtable_len, int itable_len, 78.17 int static_field_size, 78.18 unsigned int nonstatic_oop_map_count, 78.19 ReferenceType rt, TRAPS);
79.1 --- a/src/share/vm/memory/referenceProcessor.cpp Fri Mar 25 11:29:30 2011 -0700 79.2 +++ b/src/share/vm/memory/referenceProcessor.cpp Fri Mar 25 18:19:22 2011 -0400 79.3 @@ -1,5 +1,5 @@ 79.4 /* 79.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 79.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 79.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 79.8 * 79.9 * This code is free software; you can redistribute it and/or modify it 79.10 @@ -102,40 +102,17 @@ 79.11 "Unrecongnized RefDiscoveryPolicy"); 79.12 } 79.13 79.14 -ReferenceProcessor* 79.15 -ReferenceProcessor::create_ref_processor(MemRegion span, 79.16 - bool atomic_discovery, 79.17 - bool mt_discovery, 79.18 - BoolObjectClosure* is_alive_non_header, 79.19 - int parallel_gc_threads, 79.20 - bool mt_processing, 79.21 - bool dl_needs_barrier) { 79.22 - int mt_degree = 1; 79.23 - if (parallel_gc_threads > 1) { 79.24 - mt_degree = parallel_gc_threads; 79.25 - } 79.26 - ReferenceProcessor* rp = 79.27 - new ReferenceProcessor(span, atomic_discovery, 79.28 - mt_discovery, mt_degree, 79.29 - mt_processing && (parallel_gc_threads > 0), 79.30 - dl_needs_barrier); 79.31 - if (rp == NULL) { 79.32 - vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); 79.33 - } 79.34 - rp->set_is_alive_non_header(is_alive_non_header); 79.35 - rp->setup_policy(false /* default soft ref policy */); 79.36 - return rp; 79.37 -} 79.38 - 79.39 ReferenceProcessor::ReferenceProcessor(MemRegion span, 79.40 + bool mt_processing, 79.41 + int mt_processing_degree, 79.42 + bool mt_discovery, 79.43 + int mt_discovery_degree, 79.44 bool atomic_discovery, 79.45 - bool mt_discovery, 79.46 - int mt_degree, 79.47 - bool mt_processing, 79.48 + BoolObjectClosure* is_alive_non_header, 79.49 bool discovered_list_needs_barrier) : 79.50 _discovering_refs(false), 79.51 _enqueuing_is_done(false), 79.52 - _is_alive_non_header(NULL), 79.53 + _is_alive_non_header(is_alive_non_header), 79.54 _discovered_list_needs_barrier(discovered_list_needs_barrier), 79.55 _bs(NULL), 79.56 _processing_is_mt(mt_processing), 79.57 @@ -144,8 +121,8 @@ 79.58 _span = span; 79.59 _discovery_is_atomic = atomic_discovery; 79.60 _discovery_is_mt = mt_discovery; 79.61 - _num_q = mt_degree; 79.62 - _max_num_q = mt_degree; 79.63 + _num_q = MAX2(1, mt_processing_degree); 79.64 + _max_num_q = MAX2(_num_q, mt_discovery_degree); 79.65 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref); 79.66 if (_discoveredSoftRefs == NULL) { 79.67 vm_exit_during_initialization("Could not allocated RefProc Array"); 79.68 @@ -163,6 +140,7 @@ 79.69 if (discovered_list_needs_barrier) { 79.70 _bs = Universe::heap()->barrier_set(); 79.71 } 79.72 + setup_policy(false /* default soft ref policy */); 79.73 } 79.74 79.75 #ifndef PRODUCT 79.76 @@ -405,15 +383,14 @@ 79.77 { } 79.78 79.79 virtual void work(unsigned int work_id) { 79.80 - assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds"); 79.81 + assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 79.82 // Simplest first cut: static partitioning. 79.83 int index = work_id; 79.84 // The increment on "index" must correspond to the maximum number of queues 79.85 // (n_queues) with which that ReferenceProcessor was created. That 79.86 // is because of the "clever" way the discovered references lists were 79.87 - // allocated and are indexed into. That number is ParallelGCThreads 79.88 - // currently. Assert that. 79.89 - assert(_n_queues == (int) ParallelGCThreads, "Different number not expected"); 79.90 + // allocated and are indexed into. 79.91 + assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 79.92 for (int j = 0; 79.93 j < subclasses_of_ref; 79.94 j++, index += _n_queues) { 79.95 @@ -672,7 +649,7 @@ 79.96 } 79.97 } 79.98 NOT_PRODUCT( 79.99 - if (PrintGCDetails && TraceReferenceGC) { 79.100 + if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 79.101 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 79.102 "Refs in discovered list " INTPTR_FORMAT, 79.103 iter.removed(), iter.processed(), (address)refs_list.head()); 79.104 @@ -711,7 +688,7 @@ 79.105 // Now close the newly reachable set 79.106 complete_gc->do_void(); 79.107 NOT_PRODUCT( 79.108 - if (PrintGCDetails && TraceReferenceGC) { 79.109 + if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 79.110 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 79.111 "Refs in discovered list " INTPTR_FORMAT, 79.112 iter.removed(), iter.processed(), (address)refs_list.head()); 79.113 @@ -951,7 +928,7 @@ 79.114 } 79.115 if (PrintReferenceGC && PrintGCDetails) { 79.116 size_t total = 0; 79.117 - for (int i = 0; i < _num_q; ++i) { 79.118 + for (int i = 0; i < _max_num_q; ++i) { 79.119 total += refs_lists[i].length(); 79.120 } 79.121 gclog_or_tty->print(", %u refs", total); 79.122 @@ -967,7 +944,7 @@ 79.123 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 79.124 task_executor->execute(phase1); 79.125 } else { 79.126 - for (int i = 0; i < _num_q; i++) { 79.127 + for (int i = 0; i < _max_num_q; i++) { 79.128 process_phase1(refs_lists[i], policy, 79.129 is_alive, keep_alive, complete_gc); 79.130 } 79.131 @@ -983,7 +960,7 @@ 79.132 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 79.133 task_executor->execute(phase2); 79.134 } else { 79.135 - for (int i = 0; i < _num_q; i++) { 79.136 + for (int i = 0; i < _max_num_q; i++) { 79.137 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 79.138 } 79.139 } 79.140 @@ -994,7 +971,7 @@ 79.141 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 79.142 task_executor->execute(phase3); 79.143 } else { 79.144 - for (int i = 0; i < _num_q; i++) { 79.145 + for (int i = 0; i < _max_num_q; i++) { 79.146 process_phase3(refs_lists[i], clear_referent, 79.147 is_alive, keep_alive, complete_gc); 79.148 } 79.149 @@ -1008,7 +985,7 @@ 79.150 // for (int j = 0; j < _num_q; j++) { 79.151 // int index = i * _max_num_q + j; 79.152 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 79.153 - if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) { 79.154 + if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 79.155 gclog_or_tty->print_cr( 79.156 "\nScrubbing %s discovered list of Null referents", 79.157 list_name(i)); 79.158 @@ -1350,7 +1327,7 @@ 79.159 { 79.160 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 79.161 false, gclog_or_tty); 79.162 - for (int i = 0; i < _num_q; i++) { 79.163 + for (int i = 0; i < _max_num_q; i++) { 79.164 if (yield->should_return()) { 79.165 return; 79.166 } 79.167 @@ -1363,7 +1340,7 @@ 79.168 { 79.169 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 79.170 false, gclog_or_tty); 79.171 - for (int i = 0; i < _num_q; i++) { 79.172 + for (int i = 0; i < _max_num_q; i++) { 79.173 if (yield->should_return()) { 79.174 return; 79.175 } 79.176 @@ -1376,7 +1353,7 @@ 79.177 { 79.178 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 79.179 false, gclog_or_tty); 79.180 - for (int i = 0; i < _num_q; i++) { 79.181 + for (int i = 0; i < _max_num_q; i++) { 79.182 if (yield->should_return()) { 79.183 return; 79.184 } 79.185 @@ -1433,7 +1410,7 @@ 79.186 complete_gc->do_void(); 79.187 79.188 NOT_PRODUCT( 79.189 - if (PrintGCDetails && PrintReferenceGC) { 79.190 + if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { 79.191 gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 79.192 "Refs in discovered list " INTPTR_FORMAT, 79.193 iter.removed(), iter.processed(), (address)refs_list.head());
80.1 --- a/src/share/vm/memory/referenceProcessor.hpp Fri Mar 25 11:29:30 2011 -0700 80.2 +++ b/src/share/vm/memory/referenceProcessor.hpp Fri Mar 25 18:19:22 2011 -0400 80.3 @@ -1,5 +1,5 @@ 80.4 /* 80.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 80.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 80.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 80.8 * 80.9 * This code is free software; you can redistribute it and/or modify it 80.10 @@ -71,7 +71,7 @@ 80.11 bool _enqueuing_is_done; // true if all weak references enqueued 80.12 bool _processing_is_mt; // true during phases when 80.13 // reference processing is MT. 80.14 - int _next_id; // round-robin counter in 80.15 + int _next_id; // round-robin mod _num_q counter in 80.16 // support of work distribution 80.17 80.18 // For collectors that do not keep GC marking information 80.19 @@ -103,7 +103,8 @@ 80.20 80.21 public: 80.22 int num_q() { return _num_q; } 80.23 - void set_mt_degree(int v) { _num_q = v; } 80.24 + int max_num_q() { return _max_num_q; } 80.25 + void set_active_mt_degree(int v) { _num_q = v; } 80.26 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } 80.27 static oop sentinel_ref() { return _sentinelRef; } 80.28 static oop* adr_sentinel_ref() { return &_sentinelRef; } 80.29 @@ -216,6 +217,7 @@ 80.30 VoidClosure* complete_gc, 80.31 YieldClosure* yield); 80.32 80.33 + // round-robin mod _num_q (not: _not_ mode _max_num_q) 80.34 int next_id() { 80.35 int id = _next_id; 80.36 if (++_next_id == _num_q) { 80.37 @@ -256,24 +258,16 @@ 80.38 _max_num_q(0), 80.39 _processing_is_mt(false), 80.40 _next_id(0) 80.41 - {} 80.42 + { } 80.43 80.44 - ReferenceProcessor(MemRegion span, bool atomic_discovery, 80.45 - bool mt_discovery, 80.46 - int mt_degree = 1, 80.47 - bool mt_processing = false, 80.48 + // Default parameters give you a vanilla reference processor. 80.49 + ReferenceProcessor(MemRegion span, 80.50 + bool mt_processing = false, int mt_processing_degree = 1, 80.51 + bool mt_discovery = false, int mt_discovery_degree = 1, 80.52 + bool atomic_discovery = true, 80.53 + BoolObjectClosure* is_alive_non_header = NULL, 80.54 bool discovered_list_needs_barrier = false); 80.55 80.56 - // Allocates and initializes a reference processor. 80.57 - static ReferenceProcessor* create_ref_processor( 80.58 - MemRegion span, 80.59 - bool atomic_discovery, 80.60 - bool mt_discovery, 80.61 - BoolObjectClosure* is_alive_non_header = NULL, 80.62 - int parallel_gc_threads = 1, 80.63 - bool mt_processing = false, 80.64 - bool discovered_list_needs_barrier = false); 80.65 - 80.66 // RefDiscoveryPolicy values 80.67 enum DiscoveryPolicy { 80.68 ReferenceBasedDiscovery = 0, 80.69 @@ -397,20 +391,20 @@ 80.70 // A utility class to temporarily change the MT'ness of 80.71 // reference discovery for the given ReferenceProcessor 80.72 // in the scope that contains it. 80.73 -class ReferenceProcessorMTMutator: StackObj { 80.74 +class ReferenceProcessorMTDiscoveryMutator: StackObj { 80.75 private: 80.76 ReferenceProcessor* _rp; 80.77 bool _saved_mt; 80.78 80.79 public: 80.80 - ReferenceProcessorMTMutator(ReferenceProcessor* rp, 80.81 - bool mt): 80.82 + ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, 80.83 + bool mt): 80.84 _rp(rp) { 80.85 _saved_mt = _rp->discovery_is_mt(); 80.86 _rp->set_mt_discovery(mt); 80.87 } 80.88 80.89 - ~ReferenceProcessorMTMutator() { 80.90 + ~ReferenceProcessorMTDiscoveryMutator() { 80.91 _rp->set_mt_discovery(_saved_mt); 80.92 } 80.93 };
81.1 --- a/src/share/vm/memory/sharedHeap.cpp Fri Mar 25 11:29:30 2011 -0700 81.2 +++ b/src/share/vm/memory/sharedHeap.cpp Fri Mar 25 18:19:22 2011 -0400 81.3 @@ -171,11 +171,13 @@ 81.4 } 81.5 81.6 if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) { 81.7 - if (so & SO_Strings) { 81.8 - StringTable::oops_do(roots); 81.9 - } 81.10 - // Verify if the string table contents are in the perm gen 81.11 - NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure)); 81.12 + if (so & SO_Strings || (!collecting_perm_gen && !JavaObjectsInPerm)) { 81.13 + StringTable::oops_do(roots); 81.14 + } 81.15 + if (JavaObjectsInPerm) { 81.16 + // Verify the string table contents are in the perm gen 81.17 + NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure)); 81.18 + } 81.19 } 81.20 81.21 if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
82.1 --- a/src/share/vm/memory/universe.cpp Fri Mar 25 11:29:30 2011 -0700 82.2 +++ b/src/share/vm/memory/universe.cpp Fri Mar 25 18:19:22 2011 -0400 82.3 @@ -1,5 +1,5 @@ 82.4 /* 82.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 82.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 82.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 82.8 * 82.9 * This code is free software; you can redistribute it and/or modify it 82.10 @@ -51,6 +51,7 @@ 82.11 #include "oops/cpCacheKlass.hpp" 82.12 #include "oops/cpCacheOop.hpp" 82.13 #include "oops/instanceKlass.hpp" 82.14 +#include "oops/instanceMirrorKlass.hpp" 82.15 #include "oops/instanceKlassKlass.hpp" 82.16 #include "oops/instanceRefKlass.hpp" 82.17 #include "oops/klassKlass.hpp" 82.18 @@ -521,6 +522,7 @@ 82.19 { objArrayKlassKlass o; add_vtable(list, &n, &o, count); } 82.20 { instanceKlassKlass o; add_vtable(list, &n, &o, count); } 82.21 { instanceKlass o; add_vtable(list, &n, &o, count); } 82.22 + { instanceMirrorKlass o; add_vtable(list, &n, &o, count); } 82.23 { instanceRefKlass o; add_vtable(list, &n, &o, count); } 82.24 { typeArrayKlassKlass o; add_vtable(list, &n, &o, count); } 82.25 { typeArrayKlass o; add_vtable(list, &n, &o, count); } 82.26 @@ -547,7 +549,7 @@ 82.27 KlassHandle k(THREAD, klassOop(obj)); 82.28 // We will never reach the CATCH below since Exceptions::_throw will cause 82.29 // the VM to exit if an exception is thrown during initialization 82.30 - java_lang_Class::create_mirror(k, CATCH); 82.31 + java_lang_Class::fixup_mirror(k, CATCH); 82.32 // This call unconditionally creates a new mirror for k, 82.33 // and links in k's component_mirror field if k is an array. 82.34 // If k is an objArray, k's element type must already have 82.35 @@ -605,6 +607,10 @@ 82.36 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note 82.37 // that the number of objects allocated at this point is very small. 82.38 assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded"); 82.39 + 82.40 + // Cache the start of the static fields 82.41 + instanceMirrorKlass::init_offset_of_static_fields(); 82.42 + 82.43 FixupMirrorClosure blk; 82.44 Universe::heap()->permanent_object_iterate(&blk); 82.45 } 82.46 @@ -1313,6 +1319,8 @@ 82.47 JNIHandles::verify(); 82.48 if (!silent) gclog_or_tty->print("C-heap "); 82.49 os::check_heap(); 82.50 + if (!silent) gclog_or_tty->print("code cache "); 82.51 + CodeCache::verify_oops(); 82.52 if (!silent) gclog_or_tty->print_cr("]"); 82.53 82.54 _verify_in_progress = false;
83.1 --- a/src/share/vm/oops/arrayKlassKlass.cpp Fri Mar 25 11:29:30 2011 -0700 83.2 +++ b/src/share/vm/oops/arrayKlassKlass.cpp Fri Mar 25 18:19:22 2011 -0400 83.3 @@ -28,6 +28,13 @@ 83.4 #include "oops/arrayKlassKlass.hpp" 83.5 #include "oops/oop.inline.hpp" 83.6 #include "runtime/handles.inline.hpp" 83.7 +#ifndef SERIALGC 83.8 +#include "gc_implementation/parNew/parOopClosures.inline.hpp" 83.9 +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 83.10 +#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 83.11 +#include "memory/cardTableRS.hpp" 83.12 +#include "oops/oop.pcgc.inline.hpp" 83.13 +#endif 83.14 83.15 83.16 klassOop arrayKlassKlass::create_klass(TRAPS) { 83.17 @@ -104,9 +111,12 @@ 83.18 int arrayKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { 83.19 assert(obj->is_klass(), "must be klass"); 83.20 arrayKlass* ak = arrayKlass::cast(klassOop(obj)); 83.21 - blk->do_oop(ak->adr_component_mirror()); 83.22 - blk->do_oop(ak->adr_lower_dimension()); 83.23 - blk->do_oop(ak->adr_higher_dimension()); 83.24 + oop* addr = ak->adr_component_mirror(); 83.25 + if (mr.contains(addr)) blk->do_oop(addr); 83.26 + addr = ak->adr_lower_dimension(); 83.27 + if (mr.contains(addr)) blk->do_oop(addr); 83.28 + addr = ak->adr_higher_dimension(); 83.29 + if (mr.contains(addr)) blk->do_oop(addr); 83.30 ak->vtable()->oop_oop_iterate_m(blk, mr); 83.31 return klassKlass::oop_oop_iterate_m(obj, blk, mr); 83.32 } 83.33 @@ -114,6 +124,12 @@ 83.34 #ifndef SERIALGC 83.35 void arrayKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 83.36 assert(obj->blueprint()->oop_is_arrayKlass(),"must be an array klass"); 83.37 + arrayKlass* ak = arrayKlass::cast(klassOop(obj)); 83.38 + oop* p = ak->adr_component_mirror(); 83.39 + if (PSScavenge::should_scavenge(p)) { 83.40 + pm->claim_or_forward_depth(p); 83.41 + } 83.42 + klassKlass::oop_push_contents(pm, obj); 83.43 } 83.44 83.45 int arrayKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
84.1 --- a/src/share/vm/oops/constantPoolKlass.cpp Fri Mar 25 11:29:30 2011 -0700 84.2 +++ b/src/share/vm/oops/constantPoolKlass.cpp Fri Mar 25 18:19:22 2011 -0400 84.3 @@ -285,10 +285,11 @@ 84.4 void constantPoolKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 84.5 assert(obj->is_constantPool(), "should be constant pool"); 84.6 constantPoolOop cp = (constantPoolOop) obj; 84.7 - if (AnonymousClasses && cp->has_pseudo_string() && cp->tags() != NULL) { 84.8 - oop* base = (oop*)cp->base(); 84.9 - for (int i = 0; i < cp->length(); ++i, ++base) { 84.10 + if (cp->tags() != NULL && 84.11 + (!JavaObjectsInPerm || (AnonymousClasses && cp->has_pseudo_string()))) { 84.12 + for (int i = 1; i < cp->length(); ++i) { 84.13 if (cp->tag_at(i).is_string()) { 84.14 + oop* base = cp->obj_at_addr_raw(i); 84.15 if (PSScavenge::should_scavenge(base)) { 84.16 pm->claim_or_forward_depth(base); 84.17 } 84.18 @@ -460,7 +461,8 @@ 84.19 if (cp->tag_at(i).is_string()) { 84.20 if (!cp->has_pseudo_string()) { 84.21 if (entry.is_oop()) { 84.22 - guarantee(entry.get_oop()->is_perm(), "should be in permspace"); 84.23 + guarantee(!JavaObjectsInPerm || entry.get_oop()->is_perm(), 84.24 + "should be in permspace"); 84.25 guarantee(entry.get_oop()->is_instance(), "should be instance"); 84.26 } 84.27 } else {
85.1 --- a/src/share/vm/oops/constantPoolOop.cpp Fri Mar 25 11:29:30 2011 -0700 85.2 +++ b/src/share/vm/oops/constantPoolOop.cpp Fri Mar 25 18:19:22 2011 -0400 85.3 @@ -1,5 +1,5 @@ 85.4 /* 85.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 85.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 85.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 85.8 * 85.9 * This code is free software; you can redistribute it and/or modify it 85.10 @@ -481,7 +481,7 @@ 85.11 { 85.12 klassOop resolved = klass_at_impl(this_oop, index, CHECK_NULL); 85.13 // ldc wants the java mirror. 85.14 - result_oop = resolved->klass_part()->java_mirror(); 85.15 + result_oop = resolved->java_mirror(); 85.16 break; 85.17 } 85.18
86.1 --- a/src/share/vm/oops/cpCacheKlass.cpp Fri Mar 25 11:29:30 2011 -0700 86.2 +++ b/src/share/vm/oops/cpCacheKlass.cpp Fri Mar 25 18:19:22 2011 -0400 86.3 @@ -168,22 +168,18 @@ 86.4 void constantPoolCacheKlass::oop_push_contents(PSPromotionManager* pm, 86.5 oop obj) { 86.6 assert(obj->is_constantPoolCache(), "should be constant pool"); 86.7 - if (EnableInvokeDynamic) { 86.8 + if (ScavengeRootsInCode) { 86.9 constantPoolCacheOop cache = (constantPoolCacheOop)obj; 86.10 // during a scavenge, it is safe to inspect my pool, since it is perm 86.11 constantPoolOop pool = cache->constant_pool(); 86.12 assert(pool->is_constantPool(), "should be constant pool"); 86.13 - if (pool->has_invokedynamic()) { 86.14 - for (int i = 0; i < cache->length(); i++) { 86.15 - ConstantPoolCacheEntry* e = cache->entry_at(i); 86.16 - oop* p = (oop*)&e->_f1; 86.17 - if (e->is_secondary_entry()) { 86.18 - if (PSScavenge::should_scavenge(p)) 86.19 - pm->claim_or_forward_depth(p); 86.20 - assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)), 86.21 - "no live oops here"); 86.22 - } 86.23 - } 86.24 + for (int i = 0; i < cache->length(); i++) { 86.25 + ConstantPoolCacheEntry* e = cache->entry_at(i); 86.26 + oop* p = (oop*)&e->_f1; 86.27 + if (PSScavenge::should_scavenge(p)) 86.28 + pm->claim_or_forward_depth(p); 86.29 + assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)), 86.30 + "no live oops here"); 86.31 } 86.32 } 86.33 }
87.1 --- a/src/share/vm/oops/cpCacheOop.cpp Fri Mar 25 11:29:30 2011 -0700 87.2 +++ b/src/share/vm/oops/cpCacheOop.cpp Fri Mar 25 18:19:22 2011 -0400 87.3 @@ -133,7 +133,7 @@ 87.4 TosState field_type, 87.5 bool is_final, 87.6 bool is_volatile) { 87.7 - set_f1(field_holder()); 87.8 + set_f1(field_holder()->java_mirror()); 87.9 set_f2(field_offset); 87.10 // The field index is used by jvm/ti and is the index into fields() array 87.11 // in holder instanceKlass. This is scaled by instanceKlass::next_offset.
88.1 --- a/src/share/vm/oops/instanceKlass.cpp Fri Mar 25 11:29:30 2011 -0700 88.2 +++ b/src/share/vm/oops/instanceKlass.cpp Fri Mar 25 18:19:22 2011 -0400 88.3 @@ -37,6 +37,7 @@ 88.4 #include "memory/oopFactory.hpp" 88.5 #include "memory/permGen.hpp" 88.6 #include "oops/instanceKlass.hpp" 88.7 +#include "oops/instanceMirrorKlass.hpp" 88.8 #include "oops/instanceOop.hpp" 88.9 #include "oops/methodOop.hpp" 88.10 #include "oops/objArrayKlassKlass.hpp" 88.11 @@ -649,6 +650,7 @@ 88.12 } 88.13 88.14 instanceOop instanceKlass::allocate_instance(TRAPS) { 88.15 + assert(!oop_is_instanceMirror(), "wrong allocation path"); 88.16 bool has_finalizer_flag = has_finalizer(); // Query before possible GC 88.17 int size = size_helper(); // Query before forming handle. 88.18 88.19 @@ -669,6 +671,7 @@ 88.20 // instances so simply disallow finalizable perm objects. This can 88.21 // be relaxed if a need for it is found. 88.22 assert(!has_finalizer(), "perm objects not allowed to have finalizers"); 88.23 + assert(!oop_is_instanceMirror(), "wrong allocation path"); 88.24 int size = size_helper(); // Query before forming handle. 88.25 KlassHandle h_k(THREAD, as_klassOop()); 88.26 instanceOop i = (instanceOop) 88.27 @@ -898,6 +901,7 @@ 88.28 } 88.29 } 88.30 88.31 + 88.32 void instanceKlass::do_local_static_fields(FieldClosure* cl) { 88.33 fieldDescriptor fd; 88.34 int length = fields()->length(); 88.35 @@ -1609,36 +1613,6 @@ 88.36 // The following macros call specialized macros, passing either oop or 88.37 // narrowOop as the specialization type. These test the UseCompressedOops 88.38 // flag. 88.39 -#define InstanceKlass_OOP_ITERATE(start_p, count, \ 88.40 - do_oop, assert_fn) \ 88.41 -{ \ 88.42 - if (UseCompressedOops) { \ 88.43 - InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ 88.44 - start_p, count, \ 88.45 - do_oop, assert_fn) \ 88.46 - } else { \ 88.47 - InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ 88.48 - start_p, count, \ 88.49 - do_oop, assert_fn) \ 88.50 - } \ 88.51 -} 88.52 - 88.53 -#define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ 88.54 - do_oop, assert_fn) \ 88.55 -{ \ 88.56 - if (UseCompressedOops) { \ 88.57 - InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ 88.58 - start_p, count, \ 88.59 - low, high, \ 88.60 - do_oop, assert_fn) \ 88.61 - } else { \ 88.62 - InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ 88.63 - start_p, count, \ 88.64 - low, high, \ 88.65 - do_oop, assert_fn) \ 88.66 - } \ 88.67 -} 88.68 - 88.69 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ 88.70 { \ 88.71 /* Compute oopmap block range. The common case \ 88.72 @@ -1711,38 +1685,6 @@ 88.73 } \ 88.74 } 88.75 88.76 -void instanceKlass::follow_static_fields() { 88.77 - InstanceKlass_OOP_ITERATE( \ 88.78 - start_of_static_fields(), static_oop_field_size(), \ 88.79 - MarkSweep::mark_and_push(p), \ 88.80 - assert_is_in_closed_subset) 88.81 -} 88.82 - 88.83 -#ifndef SERIALGC 88.84 -void instanceKlass::follow_static_fields(ParCompactionManager* cm) { 88.85 - InstanceKlass_OOP_ITERATE( \ 88.86 - start_of_static_fields(), static_oop_field_size(), \ 88.87 - PSParallelCompact::mark_and_push(cm, p), \ 88.88 - assert_is_in) 88.89 -} 88.90 -#endif // SERIALGC 88.91 - 88.92 -void instanceKlass::adjust_static_fields() { 88.93 - InstanceKlass_OOP_ITERATE( \ 88.94 - start_of_static_fields(), static_oop_field_size(), \ 88.95 - MarkSweep::adjust_pointer(p), \ 88.96 - assert_nothing) 88.97 -} 88.98 - 88.99 -#ifndef SERIALGC 88.100 -void instanceKlass::update_static_fields() { 88.101 - InstanceKlass_OOP_ITERATE( \ 88.102 - start_of_static_fields(), static_oop_field_size(), \ 88.103 - PSParallelCompact::adjust_pointer(p), \ 88.104 - assert_nothing) 88.105 -} 88.106 -#endif // SERIALGC 88.107 - 88.108 void instanceKlass::oop_follow_contents(oop obj) { 88.109 assert(obj != NULL, "can't follow the content of NULL object"); 88.110 obj->follow_header(); 88.111 @@ -1829,22 +1771,6 @@ 88.112 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 88.113 #endif // !SERIALGC 88.114 88.115 -void instanceKlass::iterate_static_fields(OopClosure* closure) { 88.116 - InstanceKlass_OOP_ITERATE( \ 88.117 - start_of_static_fields(), static_oop_field_size(), \ 88.118 - closure->do_oop(p), \ 88.119 - assert_is_in_reserved) 88.120 -} 88.121 - 88.122 -void instanceKlass::iterate_static_fields(OopClosure* closure, 88.123 - MemRegion mr) { 88.124 - InstanceKlass_BOUNDED_OOP_ITERATE( \ 88.125 - start_of_static_fields(), static_oop_field_size(), \ 88.126 - mr.start(), mr.end(), \ 88.127 - (closure)->do_oop_v(p), \ 88.128 - assert_is_in_closed_subset) 88.129 -} 88.130 - 88.131 int instanceKlass::oop_adjust_pointers(oop obj) { 88.132 int size = size_helper(); 88.133 InstanceKlass_OOP_MAP_ITERATE( \ 88.134 @@ -1873,21 +1799,6 @@ 88.135 return size_helper(); 88.136 } 88.137 88.138 -void instanceKlass::push_static_fields(PSPromotionManager* pm) { 88.139 - InstanceKlass_OOP_ITERATE( \ 88.140 - start_of_static_fields(), static_oop_field_size(), \ 88.141 - if (PSScavenge::should_scavenge(p)) { \ 88.142 - pm->claim_or_forward_depth(p); \ 88.143 - }, \ 88.144 - assert_nothing ) 88.145 -} 88.146 - 88.147 -void instanceKlass::copy_static_fields(ParCompactionManager* cm) { 88.148 - InstanceKlass_OOP_ITERATE( \ 88.149 - start_of_static_fields(), static_oop_field_size(), \ 88.150 - PSParallelCompact::adjust_pointer(p), \ 88.151 - assert_is_in) 88.152 -} 88.153 #endif // SERIALGC 88.154 88.155 // This klass is alive but the implementor link is not followed/updated. 88.156 @@ -2002,6 +1913,11 @@ 88.157 if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount(); 88.158 } 88.159 88.160 +address instanceKlass::static_field_addr(int offset) { 88.161 + return (address)(offset + instanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror()); 88.162 +} 88.163 + 88.164 + 88.165 const char* instanceKlass::signature_name() const { 88.166 const char* src = (const char*) (name()->as_C_string()); 88.167 const int src_length = (int)strlen(src); 88.168 @@ -2369,7 +2285,7 @@ 88.169 88.170 void FieldPrinter::do_field(fieldDescriptor* fd) { 88.171 _st->print(BULLET); 88.172 - if (fd->is_static() || (_obj == NULL)) { 88.173 + if (_obj == NULL) { 88.174 fd->print_on(_st); 88.175 _st->cr(); 88.176 } else { 88.177 @@ -2399,8 +2315,8 @@ 88.178 } 88.179 88.180 st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj)); 88.181 - FieldPrinter print_nonstatic_field(st, obj); 88.182 - do_nonstatic_fields(&print_nonstatic_field); 88.183 + FieldPrinter print_field(st, obj); 88.184 + do_nonstatic_fields(&print_field); 88.185 88.186 if (as_klassOop() == SystemDictionary::Class_klass()) { 88.187 st->print(BULLET"signature: "); 88.188 @@ -2418,6 +2334,12 @@ 88.189 st->print(BULLET"fake entry for array: "); 88.190 array_klass->print_value_on(st); 88.191 st->cr(); 88.192 + st->print_cr(BULLET"fake entry for oop_size: %d", java_lang_Class::oop_size(obj)); 88.193 + st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj)); 88.194 + klassOop real_klass = java_lang_Class::as_klassOop(obj); 88.195 + if (real_klass && real_klass->klass_part()->oop_is_instance()) { 88.196 + instanceKlass::cast(real_klass)->do_local_static_fields(&print_field); 88.197 + } 88.198 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { 88.199 st->print(BULLET"signature: "); 88.200 java_lang_invoke_MethodType::print_signature(obj, st); 88.201 @@ -2560,7 +2482,7 @@ 88.202 88.203 88.204 void JNIid::verify(klassOop holder) { 88.205 - int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields(); 88.206 + int first_field_offset = instanceMirrorKlass::offset_of_static_fields(); 88.207 int end_field_offset; 88.208 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize); 88.209
89.1 --- a/src/share/vm/oops/instanceKlass.hpp Fri Mar 25 11:29:30 2011 -0700 89.2 +++ b/src/share/vm/oops/instanceKlass.hpp Fri Mar 25 18:19:22 2011 -0400 89.3 @@ -75,8 +75,6 @@ 89.4 // [Java vtable length ] 89.5 // [oop map cache (stack maps) ] 89.6 // [EMBEDDED Java vtable ] size in words = vtable_len 89.7 -// [EMBEDDED static oop fields ] size in words = static_oop_fields_size 89.8 -// [ static non-oop fields ] size in words = static_field_size - static_oop_fields_size 89.9 // [EMBEDDED nonstatic oop-map blocks] size in words = nonstatic_oop_map_size 89.10 // 89.11 // The embedded nonstatic oop-map blocks are short pairs (offset, length) indicating 89.12 @@ -230,7 +228,7 @@ 89.13 // (including inherited fields but after header_size()). 89.14 int _nonstatic_field_size; 89.15 int _static_field_size; // number words used by static fields (oop and non-oop) in this klass 89.16 - int _static_oop_field_size;// number of static oop fields in this klass 89.17 + int _static_oop_field_count;// number of static oop fields in this klass 89.18 int _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks 89.19 bool _is_marked_dependent; // used for marking during flushing and deoptimization 89.20 bool _rewritten; // methods rewritten. 89.21 @@ -281,8 +279,8 @@ 89.22 int static_field_size() const { return _static_field_size; } 89.23 void set_static_field_size(int size) { _static_field_size = size; } 89.24 89.25 - int static_oop_field_size() const { return _static_oop_field_size; } 89.26 - void set_static_oop_field_size(int size) { _static_oop_field_size = size; } 89.27 + int static_oop_field_count() const { return _static_oop_field_count; } 89.28 + void set_static_oop_field_count(int size) { _static_oop_field_count = size; } 89.29 89.30 // Java vtable 89.31 int vtable_length() const { return _vtable_len; } 89.32 @@ -660,6 +658,7 @@ 89.33 89.34 // Casting from klassOop 89.35 static instanceKlass* cast(klassOop k) { 89.36 + assert(k->is_klass(), "must be"); 89.37 Klass* kp = k->klass_part(); 89.38 assert(kp->null_vtbl() || kp->oop_is_instance_slow(), "cast to instanceKlass"); 89.39 return (instanceKlass*) kp; 89.40 @@ -667,7 +666,7 @@ 89.41 89.42 // Sizing (in words) 89.43 static int header_size() { return align_object_offset(oopDesc::header_size() + sizeof(instanceKlass)/HeapWordSize); } 89.44 - int object_size() const { return object_size(align_object_offset(vtable_length()) + align_object_offset(itable_length()) + static_field_size() + nonstatic_oop_map_size()); } 89.45 + int object_size() const { return object_size(align_object_offset(vtable_length()) + align_object_offset(itable_length()) + nonstatic_oop_map_size()); } 89.46 static int vtable_start_offset() { return header_size(); } 89.47 static int vtable_length_offset() { return oopDesc::header_size() + offset_of(instanceKlass, _vtable_len) / HeapWordSize; } 89.48 static int object_size(int extra) { return align_object_size(header_size() + extra); } 89.49 @@ -676,20 +675,12 @@ 89.50 intptr_t* start_of_itable() const { return start_of_vtable() + align_object_offset(vtable_length()); } 89.51 int itable_offset_in_words() const { return start_of_itable() - (intptr_t*)as_klassOop(); } 89.52 89.53 - // Static field offset is an offset into the Heap, should be converted by 89.54 - // based on UseCompressedOop for traversal 89.55 - HeapWord* start_of_static_fields() const { 89.56 - return (HeapWord*)(start_of_itable() + align_object_offset(itable_length())); 89.57 - } 89.58 - 89.59 intptr_t* end_of_itable() const { return start_of_itable() + itable_length(); } 89.60 89.61 - int offset_of_static_fields() const { 89.62 - return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop(); 89.63 - } 89.64 + address static_field_addr(int offset); 89.65 89.66 OopMapBlock* start_of_nonstatic_oop_maps() const { 89.67 - return (OopMapBlock*) (start_of_static_fields() + static_field_size()); 89.68 + return (OopMapBlock*)(start_of_itable() + align_object_offset(itable_length())); 89.69 } 89.70 89.71 // Allocation profiling support 89.72 @@ -719,8 +710,6 @@ 89.73 89.74 // Garbage collection 89.75 void oop_follow_contents(oop obj); 89.76 - void follow_static_fields(); 89.77 - void adjust_static_fields(); 89.78 int oop_adjust_pointers(oop obj); 89.79 bool object_is_parsable() const { return _init_state != unparsable_by_gc; } 89.80 // Value of _init_state must be zero (unparsable_by_gc) when klass field is set. 89.81 @@ -732,16 +721,6 @@ 89.82 // Parallel Scavenge and Parallel Old 89.83 PARALLEL_GC_DECLS 89.84 89.85 -#ifndef SERIALGC 89.86 - // Parallel Scavenge 89.87 - void push_static_fields(PSPromotionManager* pm); 89.88 - 89.89 - // Parallel Old 89.90 - void follow_static_fields(ParCompactionManager* cm); 89.91 - void copy_static_fields(ParCompactionManager* cm); 89.92 - void update_static_fields(); 89.93 -#endif // SERIALGC 89.94 - 89.95 // Naming 89.96 const char* signature_name() const; 89.97 89.98 @@ -770,9 +749,6 @@ 89.99 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) 89.100 #endif // !SERIALGC 89.101 89.102 - void iterate_static_fields(OopClosure* closure); 89.103 - void iterate_static_fields(OopClosure* closure, MemRegion mr); 89.104 - 89.105 private: 89.106 // initialization state 89.107 #ifdef ASSERT 89.108 @@ -926,6 +902,10 @@ 89.109 // Identifier lookup 89.110 JNIid* find(int offset); 89.111 89.112 + bool find_local_field(fieldDescriptor* fd) { 89.113 + return instanceKlass::cast(holder())->find_local_field_from_offset(offset(), true, fd); 89.114 + } 89.115 + 89.116 // Garbage collection support 89.117 oop* holder_addr() { return (oop*)&_holder; } 89.118 void oops_do(OopClosure* f);
90.1 --- a/src/share/vm/oops/instanceKlassKlass.cpp Fri Mar 25 11:29:30 2011 -0700 90.2 +++ b/src/share/vm/oops/instanceKlassKlass.cpp Fri Mar 25 18:19:22 2011 -0400 90.3 @@ -31,6 +31,7 @@ 90.4 #include "memory/gcLocker.hpp" 90.5 #include "oops/constantPoolOop.hpp" 90.6 #include "oops/instanceKlass.hpp" 90.7 +#include "oops/instanceMirrorKlass.hpp" 90.8 #include "oops/instanceKlassKlass.hpp" 90.9 #include "oops/instanceRefKlass.hpp" 90.10 #include "oops/objArrayKlassKlass.hpp" 90.11 @@ -86,7 +87,6 @@ 90.12 assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass"); 90.13 90.14 instanceKlass* ik = instanceKlass::cast(klassOop(obj)); 90.15 - ik->follow_static_fields(); 90.16 { 90.17 HandleMark hm; 90.18 ik->vtable()->oop_follow_contents(); 90.19 @@ -127,7 +127,6 @@ 90.20 assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass"); 90.21 90.22 instanceKlass* ik = instanceKlass::cast(klassOop(obj)); 90.23 - ik->follow_static_fields(cm); 90.24 ik->vtable()->oop_follow_contents(cm); 90.25 ik->itable()->oop_follow_contents(cm); 90.26 90.27 @@ -168,7 +167,6 @@ 90.28 // Don't call size() or oop_size() since that is a virtual call. 90.29 int size = ik->object_size(); 90.30 90.31 - ik->iterate_static_fields(blk); 90.32 ik->vtable()->oop_oop_iterate(blk); 90.33 ik->itable()->oop_oop_iterate(blk); 90.34 90.35 @@ -209,7 +207,6 @@ 90.36 // Don't call size() or oop_size() since that is a virtual call. 90.37 int size = ik->object_size(); 90.38 90.39 - ik->iterate_static_fields(blk, mr); 90.40 ik->vtable()->oop_oop_iterate_m(blk, mr); 90.41 ik->itable()->oop_oop_iterate_m(blk, mr); 90.42 90.43 @@ -266,7 +263,6 @@ 90.44 assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass"); 90.45 90.46 instanceKlass* ik = instanceKlass::cast(klassOop(obj)); 90.47 - ik->adjust_static_fields(); 90.48 ik->vtable()->oop_adjust_pointers(); 90.49 ik->itable()->oop_adjust_pointers(); 90.50 90.51 @@ -300,7 +296,6 @@ 90.52 #ifndef SERIALGC 90.53 void instanceKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 90.54 instanceKlass* ik = instanceKlass::cast(klassOop(obj)); 90.55 - ik->push_static_fields(pm); 90.56 90.57 oop* loader_addr = ik->adr_class_loader(); 90.58 if (PSScavenge::should_scavenge(loader_addr)) { 90.59 @@ -336,7 +331,6 @@ 90.60 "must be instance klass"); 90.61 90.62 instanceKlass* ik = instanceKlass::cast(klassOop(obj)); 90.63 - ik->update_static_fields(); 90.64 ik->vtable()->oop_update_pointers(cm); 90.65 ik->itable()->oop_update_pointers(cm); 90.66 90.67 @@ -356,22 +350,28 @@ 90.68 #endif // SERIALGC 90.69 90.70 klassOop 90.71 -instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len, 90.72 +instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int itable_len, 90.73 int static_field_size, 90.74 unsigned nonstatic_oop_map_count, 90.75 ReferenceType rt, TRAPS) { 90.76 90.77 const int nonstatic_oop_map_size = 90.78 instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count); 90.79 - int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + static_field_size + nonstatic_oop_map_size); 90.80 + int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + nonstatic_oop_map_size); 90.81 90.82 // Allocation 90.83 KlassHandle h_this_klass(THREAD, as_klassOop()); 90.84 KlassHandle k; 90.85 if (rt == REF_NONE) { 90.86 - // regular klass 90.87 - instanceKlass o; 90.88 - k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_NULL); 90.89 + if (name != vmSymbols::java_lang_Class()) { 90.90 + // regular klass 90.91 + instanceKlass o; 90.92 + k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_NULL); 90.93 + } else { 90.94 + // Class 90.95 + instanceMirrorKlass o; 90.96 + k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_NULL); 90.97 + } 90.98 } else { 90.99 // reference klass 90.100 instanceRefKlass o; 90.101 @@ -408,7 +408,7 @@ 90.102 ik->set_source_debug_extension(NULL); 90.103 ik->set_array_name(NULL); 90.104 ik->set_inner_classes(NULL); 90.105 - ik->set_static_oop_field_size(0); 90.106 + ik->set_static_oop_field_count(0); 90.107 ik->set_nonstatic_field_size(0); 90.108 ik->set_is_marked_dependent(false); 90.109 ik->set_init_state(instanceKlass::allocated); 90.110 @@ -442,9 +442,6 @@ 90.111 // To get verify to work - must be set to partial loaded before first GC point. 90.112 k()->set_partially_loaded(); 90.113 } 90.114 - 90.115 - // GC can happen here 90.116 - java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror 90.117 return k(); 90.118 } 90.119 90.120 @@ -566,13 +563,6 @@ 90.121 FieldPrinter print_nonstatic_field(st); 90.122 ik->do_nonstatic_fields(&print_nonstatic_field); 90.123 90.124 - st->print(BULLET"static oop maps: "); 90.125 - if (ik->static_oop_field_size() > 0) { 90.126 - int first_offset = ik->offset_of_static_fields(); 90.127 - st->print("%d-%d", first_offset, first_offset + ik->static_oop_field_size() - 1); 90.128 - } 90.129 - st->cr(); 90.130 - 90.131 st->print(BULLET"non-static oop maps: "); 90.132 OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); 90.133 OopMapBlock* end_map = map + ik->nonstatic_oop_map_count(); 90.134 @@ -630,7 +620,6 @@ 90.135 90.136 // Verify static fields 90.137 VerifyFieldClosure blk; 90.138 - ik->iterate_static_fields(&blk); 90.139 90.140 // Verify vtables 90.141 if (ik->is_linked()) {
91.1 --- a/src/share/vm/oops/instanceKlassKlass.hpp Fri Mar 25 11:29:30 2011 -0700 91.2 +++ b/src/share/vm/oops/instanceKlassKlass.hpp Fri Mar 25 18:19:22 2011 -0400 91.3 @@ -1,5 +1,5 @@ 91.4 /* 91.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 91.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 91.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 91.8 * 91.9 * This code is free software; you can redistribute it and/or modify it 91.10 @@ -41,7 +41,8 @@ 91.11 // Allocation 91.12 DEFINE_ALLOCATE_PERMANENT(instanceKlassKlass); 91.13 static klassOop create_klass(TRAPS); 91.14 - klassOop allocate_instance_klass(int vtable_len, 91.15 + klassOop allocate_instance_klass(Symbol* name, 91.16 + int vtable_len, 91.17 int itable_len, 91.18 int static_field_size, 91.19 unsigned int nonstatic_oop_map_count,
92.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 92.2 +++ b/src/share/vm/oops/instanceMirrorKlass.cpp Fri Mar 25 18:19:22 2011 -0400 92.3 @@ -0,0 +1,313 @@ 92.4 +/* 92.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. 92.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 92.7 + * 92.8 + * This code is free software; you can redistribute it and/or modify it 92.9 + * under the terms of the GNU General Public License version 2 only, as 92.10 + * published by the Free Software Foundation. 92.11 + * 92.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 92.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 92.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 92.15 + * version 2 for more details (a copy is included in the LICENSE file that 92.16 + * accompanied this code). 92.17 + * 92.18 + * You should have received a copy of the GNU General Public License version 92.19 + * 2 along with this work; if not, write to the Free Software Foundation, 92.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 92.21 + * 92.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 92.23 + * or visit www.oracle.com if you need additional information or have any 92.24 + * questions. 92.25 + * 92.26 + */ 92.27 + 92.28 +#include "precompiled.hpp" 92.29 +#include "classfile/javaClasses.hpp" 92.30 +#include "classfile/systemDictionary.hpp" 92.31 +#include "gc_implementation/shared/markSweep.inline.hpp" 92.32 +#include "gc_interface/collectedHeap.inline.hpp" 92.33 +#include "memory/genOopClosures.inline.hpp" 92.34 +#include "memory/oopFactory.hpp" 92.35 +#include "memory/permGen.hpp" 92.36 +#include "oops/instanceKlass.hpp" 92.37 +#include "oops/instanceMirrorKlass.hpp" 92.38 +#include "oops/instanceOop.hpp" 92.39 +#include "oops/oop.inline.hpp" 92.40 +#include "oops/symbol.hpp" 92.41 +#include "runtime/handles.inline.hpp" 92.42 +#ifndef SERIALGC 92.43 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 92.44 +#include "gc_implementation/g1/g1OopClosures.inline.hpp" 92.45 +#include "gc_implementation/g1/g1RemSet.inline.hpp" 92.46 +#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 92.47 +#include "gc_implementation/parNew/parOopClosures.inline.hpp" 92.48 +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 92.49 +#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 92.50 +#include "oops/oop.pcgc.inline.hpp" 92.51 +#endif 92.52 + 92.53 +int instanceMirrorKlass::_offset_of_static_fields = 0; 92.54 + 92.55 +#ifdef ASSERT 92.56 +template <class T> void assert_is_in(T *p) { 92.57 + T heap_oop = oopDesc::load_heap_oop(p); 92.58 + if (!oopDesc::is_null(heap_oop)) { 92.59 + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 92.60 + assert(Universe::heap()->is_in(o), "should be in heap"); 92.61 + } 92.62 +} 92.63 +template <class T> void assert_is_in_closed_subset(T *p) { 92.64 + T heap_oop = oopDesc::load_heap_oop(p); 92.65 + if (!oopDesc::is_null(heap_oop)) { 92.66 + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 92.67 + assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); 92.68 + } 92.69 +} 92.70 +template <class T> void assert_is_in_reserved(T *p) { 92.71 + T heap_oop = oopDesc::load_heap_oop(p); 92.72 + if (!oopDesc::is_null(heap_oop)) { 92.73 + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 92.74 + assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); 92.75 + } 92.76 +} 92.77 +template <class T> void assert_nothing(T *p) {} 92.78 + 92.79 +#else 92.80 +template <class T> void assert_is_in(T *p) {} 92.81 +template <class T> void assert_is_in_closed_subset(T *p) {} 92.82 +template <class T> void assert_is_in_reserved(T *p) {} 92.83 +template <class T> void assert_nothing(T *p) {} 92.84 +#endif // ASSERT 92.85 + 92.86 +#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE( \ 92.87 + T, start_p, count, do_oop, \ 92.88 + assert_fn) \ 92.89 +{ \ 92.90 + T* p = (T*)(start_p); \ 92.91 + T* const end = p + (count); \ 92.92 + while (p < end) { \ 92.93 + (assert_fn)(p); \ 92.94 + do_oop; \ 92.95 + ++p; \ 92.96 + } \ 92.97 +} 92.98 + 92.99 +#define InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ 92.100 + T, start_p, count, low, high, \ 92.101 + do_oop, assert_fn) \ 92.102 +{ \ 92.103 + T* const l = (T*)(low); \ 92.104 + T* const h = (T*)(high); \ 92.105 + assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ 92.106 + mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ 92.107 + "bounded region must be properly aligned"); \ 92.108 + T* p = (T*)(start_p); \ 92.109 + T* end = p + (count); \ 92.110 + if (p < l) p = l; \ 92.111 + if (end > h) end = h; \ 92.112 + while (p < end) { \ 92.113 + (assert_fn)(p); \ 92.114 + do_oop; \ 92.115 + ++p; \ 92.116 + } \ 92.117 +} 92.118 + 92.119 + 92.120 +#define InstanceMirrorKlass_OOP_ITERATE(start_p, count, \ 92.121 + do_oop, assert_fn) \ 92.122 +{ \ 92.123 + if (UseCompressedOops) { \ 92.124 + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ 92.125 + start_p, count, \ 92.126 + do_oop, assert_fn) \ 92.127 + } else { \ 92.128 + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(oop, \ 92.129 + start_p, count, \ 92.130 + do_oop, assert_fn) \ 92.131 + } \ 92.132 +} 92.133 + 92.134 +// The following macros call specialized macros, passing either oop or 92.135 +// narrowOop as the specialization type. These test the UseCompressedOops 92.136 +// flag. 92.137 +#define InstanceMirrorKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ 92.138 + do_oop, assert_fn) \ 92.139 +{ \ 92.140 + if (UseCompressedOops) { \ 92.141 + InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ 92.142 + start_p, count, \ 92.143 + low, high, \ 92.144 + do_oop, assert_fn) \ 92.145 + } else { \ 92.146 + InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ 92.147 + start_p, count, \ 92.148 + low, high, \ 92.149 + do_oop, assert_fn) \ 92.150 + } \ 92.151 +} 92.152 + 92.153 + 92.154 +void instanceMirrorKlass::oop_follow_contents(oop obj) { 92.155 + instanceKlass::oop_follow_contents(obj); 92.156 + InstanceMirrorKlass_OOP_ITERATE( \ 92.157 + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ 92.158 + MarkSweep::mark_and_push(p), \ 92.159 + assert_is_in_closed_subset) 92.160 +} 92.161 + 92.162 +#ifndef SERIALGC 92.163 +void instanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm, 92.164 + oop obj) { 92.165 + instanceKlass::oop_follow_contents(cm, obj); 92.166 + InstanceMirrorKlass_OOP_ITERATE( \ 92.167 + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ 92.168 + PSParallelCompact::mark_and_push(cm, p), \ 92.169 + assert_is_in) 92.170 +} 92.171 +#endif // SERIALGC 92.172 + 92.173 +int instanceMirrorKlass::oop_adjust_pointers(oop obj) { 92.174 + int size = oop_size(obj); 92.175 + instanceKlass::oop_adjust_pointers(obj); 92.176 + InstanceMirrorKlass_OOP_ITERATE( \ 92.177 + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ 92.178 + MarkSweep::adjust_pointer(p), \ 92.179 + assert_nothing) 92.180 + return size; 92.181 +} 92.182 + 92.183 +#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(T, nv_suffix) \ 92.184 + InstanceMirrorKlass_OOP_ITERATE( \ 92.185 + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ 92.186 + (closure)->do_oop##nv_suffix(p), \ 92.187 + assert_is_in_closed_subset) \ 92.188 + return oop_size(obj); \ 92.189 + 92.190 +#define InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(T, nv_suffix, mr) \ 92.191 + InstanceMirrorKlass_BOUNDED_OOP_ITERATE( \ 92.192 + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ 92.193 + mr.start(), mr.end(), \ 92.194 + (closure)->do_oop##nv_suffix(p), \ 92.195 + assert_is_in_closed_subset) \ 92.196 + return oop_size(obj); \ 92.197 + 92.198 + 92.199 +// Macro to define instanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for 92.200 +// all closures. Macros calling macros above for each oop size. 92.201 + 92.202 +#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 92.203 + \ 92.204 +int instanceMirrorKlass:: \ 92.205 +oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ 92.206 + /* Get size before changing pointers */ \ 92.207 + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \ 92.208 + \ 92.209 + instanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \ 92.210 + \ 92.211 + if (UseCompressedOops) { \ 92.212 + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \ 92.213 + } else { \ 92.214 + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \ 92.215 + } \ 92.216 +} 92.217 + 92.218 +#ifndef SERIALGC 92.219 +#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 92.220 + \ 92.221 +int instanceMirrorKlass:: \ 92.222 +oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ 92.223 + /* Get size before changing pointers */ \ 92.224 + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \ 92.225 + \ 92.226 + instanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \ 92.227 + \ 92.228 + if (UseCompressedOops) { \ 92.229 + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \ 92.230 + } else { \ 92.231 + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \ 92.232 + } \ 92.233 +} 92.234 +#endif // !SERIALGC 92.235 + 92.236 + 92.237 +#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ 92.238 + \ 92.239 +int instanceMirrorKlass:: \ 92.240 +oop_oop_iterate##nv_suffix##_m(oop obj, \ 92.241 + OopClosureType* closure, \ 92.242 + MemRegion mr) { \ 92.243 + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \ 92.244 + \ 92.245 + instanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \ 92.246 + if (UseCompressedOops) { \ 92.247 + InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr); \ 92.248 + } else { \ 92.249 + InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr); \ 92.250 + } \ 92.251 +} 92.252 + 92.253 +ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN) 92.254 +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN) 92.255 +#ifndef SERIALGC 92.256 +ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 92.257 +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 92.258 +#endif // SERIALGC 92.259 +ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m) 92.260 +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m) 92.261 + 92.262 +#ifndef SERIALGC 92.263 +void instanceMirrorKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 92.264 + instanceKlass::oop_push_contents(pm, obj); 92.265 + InstanceMirrorKlass_OOP_ITERATE( \ 92.266 + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\ 92.267 + if (PSScavenge::should_scavenge(p)) { \ 92.268 + pm->claim_or_forward_depth(p); \ 92.269 + }, \ 92.270 + assert_nothing ) 92.271 +} 92.272 + 92.273 +int instanceMirrorKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { 92.274 + instanceKlass::oop_update_pointers(cm, obj); 92.275 + InstanceMirrorKlass_OOP_ITERATE( \ 92.276 + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\ 92.277 + PSParallelCompact::adjust_pointer(p), \ 92.278 + assert_nothing) 92.279 + return oop_size(obj); 92.280 +} 92.281 +#endif // SERIALGC 92.282 + 92.283 +int instanceMirrorKlass::instance_size(KlassHandle k) { 92.284 + if (k() != NULL && k->oop_is_instance()) { 92.285 + return align_object_size(size_helper() + instanceKlass::cast(k())->static_field_size()); 92.286 + } 92.287 + return size_helper(); 92.288 +} 92.289 + 92.290 +instanceOop instanceMirrorKlass::allocate_instance(KlassHandle k, TRAPS) { 92.291 + // Query before forming handle. 92.292 + int size = instance_size(k); 92.293 + KlassHandle h_k(THREAD, as_klassOop()); 92.294 + instanceOop i; 92.295 + 92.296 + if (JavaObjectsInPerm) { 92.297 + i = (instanceOop) CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL); 92.298 + } else { 92.299 + assert(ScavengeRootsInCode > 0, "must be"); 92.300 + i = (instanceOop) CollectedHeap::obj_allocate(h_k, size, CHECK_NULL); 92.301 + } 92.302 + 92.303 + return i; 92.304 +} 92.305 + 92.306 +int instanceMirrorKlass::oop_size(oop obj) const { 92.307 + return java_lang_Class::oop_size(obj); 92.308 +} 92.309 + 92.310 +int instanceMirrorKlass::compute_static_oop_field_count(oop obj) { 92.311 + klassOop k = java_lang_Class::as_klassOop(obj); 92.312 + if (k != NULL && k->klass_part()->oop_is_instance()) { 92.313 + return instanceKlass::cast(k)->static_oop_field_count(); 92.314 + } 92.315 + return 0; 92.316 +}
93.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 93.2 +++ b/src/share/vm/oops/instanceMirrorKlass.hpp Fri Mar 25 18:19:22 2011 -0400 93.3 @@ -0,0 +1,110 @@ 93.4 +/* 93.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. 93.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 93.7 + * 93.8 + * This code is free software; you can redistribute it and/or modify it 93.9 + * under the terms of the GNU General Public License version 2 only, as 93.10 + * published by the Free Software Foundation. 93.11 + * 93.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 93.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 93.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 93.15 + * version 2 for more details (a copy is included in the LICENSE file that 93.16 + * accompanied this code). 93.17 + * 93.18 + * You should have received a copy of the GNU General Public License version 93.19 + * 2 along with this work; if not, write to the Free Software Foundation, 93.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 93.21 + * 93.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 93.23 + * or visit www.oracle.com if you need additional information or have any 93.24 + * questions. 93.25 + * 93.26 + */ 93.27 + 93.28 +#ifndef SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP 93.29 +#define SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP 93.30 + 93.31 +#include "oops/instanceKlass.hpp" 93.32 + 93.33 +// An instanceMirrorKlass is a specialized instanceKlass for 93.34 +// java.lang.Class instances. These instances are special because 93.35 +// they contain the static fields of the class in addition to the 93.36 +// normal fields of Class. This means they are variable sized 93.37 +// instances and need special logic for computing their size and for 93.38 +// iteration of their oops. 93.39 + 93.40 + 93.41 +class instanceMirrorKlass: public instanceKlass { 93.42 + private: 93.43 + static int _offset_of_static_fields; 93.44 + 93.45 + public: 93.46 + // Type testing 93.47 + bool oop_is_instanceMirror() const { return true; } 93.48 + 93.49 + // Casting from klassOop 93.50 + static instanceMirrorKlass* cast(klassOop k) { 93.51 + assert(k->klass_part()->oop_is_instanceMirror(), "cast to instanceMirrorKlass"); 93.52 + return (instanceMirrorKlass*) k->klass_part(); 93.53 + } 93.54 + 93.55 + // Returns the size of the instance including the extra static fields. 93.56 + virtual int oop_size(oop obj) const; 93.57 + 93.58 + // Static field offset is an offset into the Heap, should be converted by 93.59 + // based on UseCompressedOop for traversal 93.60 + static HeapWord* start_of_static_fields(oop obj) { 93.61 + return (HeapWord*)((intptr_t)obj + offset_of_static_fields()); 93.62 + } 93.63 + 93.64 + static void init_offset_of_static_fields() { 93.65 + // Cache the offset of the static fields in the Class instance 93.66 + assert(_offset_of_static_fields == 0, "once"); 93.67 + _offset_of_static_fields = instanceMirrorKlass::cast(SystemDictionary::Class_klass())->size_helper() << LogHeapWordSize; 93.68 + } 93.69 + 93.70 + static int offset_of_static_fields() { 93.71 + return _offset_of_static_fields; 93.72 + } 93.73 + 93.74 + int compute_static_oop_field_count(oop obj); 93.75 + 93.76 + // Given a Klass return the size of the instance 93.77 + int instance_size(KlassHandle k); 93.78 + 93.79 + // allocation 93.80 + DEFINE_ALLOCATE_PERMANENT(instanceMirrorKlass); 93.81 + instanceOop allocate_instance(KlassHandle k, TRAPS); 93.82 + 93.83 + // Garbage collection 93.84 + int oop_adjust_pointers(oop obj); 93.85 + void oop_follow_contents(oop obj); 93.86 + 93.87 + // Parallel Scavenge and Parallel Old 93.88 + PARALLEL_GC_DECLS 93.89 + 93.90 + int oop_oop_iterate(oop obj, OopClosure* blk) { 93.91 + return oop_oop_iterate_v(obj, blk); 93.92 + } 93.93 + int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { 93.94 + return oop_oop_iterate_v_m(obj, blk, mr); 93.95 + } 93.96 + 93.97 +#define InstanceMirrorKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ 93.98 + int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ 93.99 + int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr); 93.100 + 93.101 + ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL) 93.102 + ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL) 93.103 + 93.104 +#ifndef SERIALGC 93.105 +#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ 93.106 + int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); 93.107 + 93.108 + ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) 93.109 + ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) 93.110 +#endif // !SERIALGC 93.111 +}; 93.112 + 93.113 +#endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP
94.1 --- a/src/share/vm/oops/klass.hpp Fri Mar 25 11:29:30 2011 -0700 94.2 +++ b/src/share/vm/oops/klass.hpp Fri Mar 25 18:19:22 2011 -0400 94.3 @@ -1,5 +1,5 @@ 94.4 /* 94.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 94.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 94.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 94.8 * 94.9 * This code is free software; you can redistribute it and/or modify it 94.10 @@ -577,6 +577,7 @@ 94.11 public: 94.12 // type testing operations 94.13 virtual bool oop_is_instance_slow() const { return false; } 94.14 + virtual bool oop_is_instanceMirror() const { return false; } 94.15 virtual bool oop_is_instanceRef() const { return false; } 94.16 virtual bool oop_is_array() const { return false; } 94.17 virtual bool oop_is_objArray_slow() const { return false; } 94.18 @@ -811,4 +812,8 @@ 94.19 #endif 94.20 }; 94.21 94.22 + 94.23 +inline oop klassOopDesc::java_mirror() const { return klass_part()->java_mirror(); } 94.24 + 94.25 + 94.26 #endif // SHARE_VM_OOPS_KLASS_HPP
95.1 --- a/src/share/vm/oops/klassKlass.cpp Fri Mar 25 11:29:30 2011 -0700 95.2 +++ b/src/share/vm/oops/klassKlass.cpp Fri Mar 25 18:19:22 2011 -0400 95.3 @@ -41,6 +41,10 @@ 95.4 #include "oops/typeArrayKlass.hpp" 95.5 #include "runtime/handles.inline.hpp" 95.6 #ifndef SERIALGC 95.7 +#include "gc_implementation/parNew/parOopClosures.inline.hpp" 95.8 +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 95.9 +#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 95.10 +#include "memory/cardTableRS.hpp" 95.11 #include "oops/oop.pcgc.inline.hpp" 95.12 #endif 95.13 95.14 @@ -175,6 +179,12 @@ 95.15 95.16 #ifndef SERIALGC 95.17 void klassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 95.18 + Klass* k = Klass::cast(klassOop(obj)); 95.19 + 95.20 + oop* p = k->adr_java_mirror(); 95.21 + if (PSScavenge::should_scavenge(p)) { 95.22 + pm->claim_or_forward_depth(p); 95.23 + } 95.24 } 95.25 95.26 int klassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { 95.27 @@ -233,7 +243,7 @@ 95.28 95.29 if (k->java_mirror() != NULL || (k->oop_is_instance() && instanceKlass::cast(klassOop(obj))->is_loaded())) { 95.30 guarantee(k->java_mirror() != NULL, "should be allocated"); 95.31 - guarantee(k->java_mirror()->is_perm(), "should be in permspace"); 95.32 + guarantee(k->java_mirror()->is_perm() || !JavaObjectsInPerm, "should be in permspace"); 95.33 guarantee(k->java_mirror()->is_instance(), "should be instance"); 95.34 } 95.35 }
96.1 --- a/src/share/vm/oops/klassOop.hpp Fri Mar 25 11:29:30 2011 -0700 96.2 +++ b/src/share/vm/oops/klassOop.hpp Fri Mar 25 18:19:22 2011 -0400 96.3 @@ -1,5 +1,5 @@ 96.4 /* 96.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 96.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 96.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 96.8 * 96.9 * This code is free software; you can redistribute it and/or modify it 96.10 @@ -45,7 +45,73 @@ 96.11 static int klass_part_offset_in_bytes() { return sizeof(klassOopDesc); } 96.12 96.13 // returns the Klass part containing dispatching behavior 96.14 - Klass* klass_part() { return (Klass*)((address)this + klass_part_offset_in_bytes()); } 96.15 + Klass* klass_part() const { return (Klass*)((address)this + klass_part_offset_in_bytes()); } 96.16 + 96.17 + // Convenience wrapper 96.18 + inline oop java_mirror() const; 96.19 + 96.20 + private: 96.21 + // These have no implementation since klassOop should never be accessed in this fashion 96.22 + oop obj_field(int offset) const; 96.23 + void obj_field_put(int offset, oop value); 96.24 + void obj_field_raw_put(int offset, oop value); 96.25 + 96.26 + jbyte byte_field(int offset) const; 96.27 + void byte_field_put(int offset, jbyte contents); 96.28 + 96.29 + jchar char_field(int offset) const; 96.30 + void char_field_put(int offset, jchar contents); 96.31 + 96.32 + jboolean bool_field(int offset) const; 96.33 + void bool_field_put(int offset, jboolean contents); 96.34 + 96.35 + jint int_field(int offset) const; 96.36 + void int_field_put(int offset, jint contents); 96.37 + 96.38 + jshort short_field(int offset) const; 96.39 + void short_field_put(int offset, jshort contents); 96.40 + 96.41 + jlong long_field(int offset) const; 96.42 + void long_field_put(int offset, jlong contents); 96.43 + 96.44 + jfloat float_field(int offset) const; 96.45 + void float_field_put(int offset, jfloat contents); 96.46 + 96.47 + jdouble double_field(int offset) const; 96.48 + void double_field_put(int offset, jdouble contents); 96.49 + 96.50 + address address_field(int offset) const; 96.51 + void address_field_put(int offset, address contents); 96.52 + 96.53 + oop obj_field_acquire(int offset) const; 96.54 + void release_obj_field_put(int offset, oop value); 96.55 + 96.56 + jbyte byte_field_acquire(int offset) const; 96.57 + void release_byte_field_put(int offset, jbyte contents); 96.58 + 96.59 + jchar char_field_acquire(int offset) const; 96.60 + void release_char_field_put(int offset, jchar contents); 96.61 + 96.62 + jboolean bool_field_acquire(int offset) const; 96.63 + void release_bool_field_put(int offset, jboolean contents); 96.64 + 96.65 + jint int_field_acquire(int offset) const; 96.66 + void release_int_field_put(int offset, jint contents); 96.67 + 96.68 + jshort short_field_acquire(int offset) const; 96.69 + void release_short_field_put(int offset, jshort contents); 96.70 + 96.71 + jlong long_field_acquire(int offset) const; 96.72 + void release_long_field_put(int offset, jlong contents); 96.73 + 96.74 + jfloat float_field_acquire(int offset) const; 96.75 + void release_float_field_put(int offset, jfloat contents); 96.76 + 96.77 + jdouble double_field_acquire(int offset) const; 96.78 + void release_double_field_put(int offset, jdouble contents); 96.79 + 96.80 + address address_field_acquire(int offset) const; 96.81 + void release_address_field_put(int offset, address contents); 96.82 }; 96.83 96.84 #endif // SHARE_VM_OOPS_KLASSOOP_HPP
97.1 --- a/src/share/vm/oops/klassVtable.cpp Fri Mar 25 11:29:30 2011 -0700 97.2 +++ b/src/share/vm/oops/klassVtable.cpp Fri Mar 25 18:19:22 2011 -0400 97.3 @@ -1095,7 +1095,7 @@ 97.4 itableOffsetEntry* ioe = (itableOffsetEntry*)klass->start_of_itable(); 97.5 itableMethodEntry* ime = (itableMethodEntry*)(ioe + nof_interfaces); 97.6 intptr_t* end = klass->end_of_itable(); 97.7 - assert((oop*)(ime + nof_methods) <= (oop*)klass->start_of_static_fields(), "wrong offset calculation (1)"); 97.8 + assert((oop*)(ime + nof_methods) <= (oop*)klass->start_of_nonstatic_oop_maps(), "wrong offset calculation (1)"); 97.9 assert((oop*)(end) == (oop*)(ime + nof_methods), "wrong offset calculation (2)"); 97.10 97.11 // Visit all interfaces and initialize itable offset table
98.1 --- a/src/share/vm/oops/objArrayKlassKlass.cpp Fri Mar 25 11:29:30 2011 -0700 98.2 +++ b/src/share/vm/oops/objArrayKlassKlass.cpp Fri Mar 25 18:19:22 2011 -0400 98.3 @@ -31,6 +31,13 @@ 98.4 #include "oops/objArrayKlassKlass.hpp" 98.5 #include "oops/oop.inline.hpp" 98.6 #include "oops/oop.inline2.hpp" 98.7 +#ifndef SERIALGC 98.8 +#include "gc_implementation/parNew/parOopClosures.inline.hpp" 98.9 +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 98.10 +#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 98.11 +#include "memory/cardTableRS.hpp" 98.12 +#include "oops/oop.pcgc.inline.hpp" 98.13 +#endif 98.14 98.15 klassOop objArrayKlassKlass::create_klass(TRAPS) { 98.16 objArrayKlassKlass o; 98.17 @@ -236,12 +243,23 @@ 98.18 addr = oak->bottom_klass_addr(); 98.19 if (mr.contains(addr)) blk->do_oop(addr); 98.20 98.21 - return arrayKlassKlass::oop_oop_iterate(obj, blk); 98.22 + return arrayKlassKlass::oop_oop_iterate_m(obj, blk, mr); 98.23 } 98.24 98.25 #ifndef SERIALGC 98.26 void objArrayKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 98.27 assert(obj->blueprint()->oop_is_objArrayKlass(),"must be an obj array klass"); 98.28 + objArrayKlass* oak = objArrayKlass::cast((klassOop)obj); 98.29 + oop* p = oak->element_klass_addr(); 98.30 + if (PSScavenge::should_scavenge(p)) { 98.31 + pm->claim_or_forward_depth(p); 98.32 + } 98.33 + p = oak->bottom_klass_addr(); 98.34 + if (PSScavenge::should_scavenge(p)) { 98.35 + pm->claim_or_forward_depth(p); 98.36 + } 98.37 + 98.38 + arrayKlassKlass::oop_push_contents(pm, obj); 98.39 } 98.40 98.41 int objArrayKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { 98.42 @@ -287,7 +305,7 @@ 98.43 // Verification 98.44 98.45 void objArrayKlassKlass::oop_verify_on(oop obj, outputStream* st) { 98.46 - klassKlass::oop_verify_on(obj, st); 98.47 + arrayKlassKlass::oop_verify_on(obj, st); 98.48 objArrayKlass* oak = objArrayKlass::cast((klassOop)obj); 98.49 guarantee(oak->element_klass()->is_perm(), "should be in permspace"); 98.50 guarantee(oak->element_klass()->is_klass(), "should be klass");
99.1 --- a/src/share/vm/oops/oop.hpp Fri Mar 25 11:29:30 2011 -0700 99.2 +++ b/src/share/vm/oops/oop.hpp Fri Mar 25 18:19:22 2011 -0400 99.3 @@ -129,6 +129,7 @@ 99.4 99.5 // type test operations (inlined in oop.inline.h) 99.6 bool is_instance() const; 99.7 + bool is_instanceMirror() const; 99.8 bool is_instanceRef() const; 99.9 bool is_array() const; 99.10 bool is_objArray() const;
100.1 --- a/src/share/vm/oops/oop.inline.hpp Fri Mar 25 11:29:30 2011 -0700 100.2 +++ b/src/share/vm/oops/oop.inline.hpp Fri Mar 25 18:19:22 2011 -0400 100.3 @@ -1,5 +1,5 @@ 100.4 /* 100.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 100.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 100.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 100.8 * 100.9 * This code is free software; you can redistribute it and/or modify it 100.10 @@ -141,6 +141,7 @@ 100.11 inline bool oopDesc::is_a(klassOop k) const { return blueprint()->is_subtype_of(k); } 100.12 100.13 inline bool oopDesc::is_instance() const { return blueprint()->oop_is_instance(); } 100.14 +inline bool oopDesc::is_instanceMirror() const { return blueprint()->oop_is_instanceMirror(); } 100.15 inline bool oopDesc::is_instanceRef() const { return blueprint()->oop_is_instanceRef(); } 100.16 inline bool oopDesc::is_array() const { return blueprint()->oop_is_array(); } 100.17 inline bool oopDesc::is_objArray() const { return blueprint()->oop_is_objArray(); } 100.18 @@ -399,7 +400,7 @@ 100.19 100.20 inline int oopDesc::size_given_klass(Klass* klass) { 100.21 int lh = klass->layout_helper(); 100.22 - int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize 100.23 + int s; 100.24 100.25 // lh is now a value computed at class initialization that may hint 100.26 // at the size. For instances, this is positive and equal to the 100.27 @@ -412,7 +413,13 @@ 100.28 // alive or dead. So the speed here is equal in importance to the 100.29 // speed of allocation. 100.30 100.31 - if (lh <= Klass::_lh_neutral_value) { 100.32 + if (lh > Klass::_lh_neutral_value) { 100.33 + if (!Klass::layout_helper_needs_slow_path(lh)) { 100.34 + s = lh >> LogHeapWordSize; // deliver size scaled by wordSize 100.35 + } else { 100.36 + s = klass->oop_size(this); 100.37 + } 100.38 + } else if (lh <= Klass::_lh_neutral_value) { 100.39 // The most common case is instances; fall through if so. 100.40 if (lh < Klass::_lh_neutral_value) { 100.41 // Second most common case is arrays. We have to fetch the
101.1 --- a/src/share/vm/oops/oopsHierarchy.hpp Fri Mar 25 11:29:30 2011 -0700 101.2 +++ b/src/share/vm/oops/oopsHierarchy.hpp Fri Mar 25 18:19:22 2011 -0400 101.3 @@ -1,5 +1,5 @@ 101.4 /* 101.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 101.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 101.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 101.8 * 101.9 * This code is free software; you can redistribute it and/or modify it 101.10 @@ -174,6 +174,7 @@ 101.11 101.12 class Klass; 101.13 class instanceKlass; 101.14 +class instanceMirrorKlass; 101.15 class instanceRefKlass; 101.16 class methodKlass; 101.17 class constMethodKlass;
102.1 --- a/src/share/vm/opto/compile.cpp Fri Mar 25 11:29:30 2011 -0700 102.2 +++ b/src/share/vm/opto/compile.cpp Fri Mar 25 18:19:22 2011 -0400 102.3 @@ -1,5 +1,5 @@ 102.4 /* 102.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 102.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 102.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 102.8 * 102.9 * This code is free software; you can redistribute it and/or modify it 102.10 @@ -1202,11 +1202,15 @@ 102.11 // Oop pointers need some flattening 102.12 const TypeInstPtr *to = tj->isa_instptr(); 102.13 if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) { 102.14 + ciInstanceKlass *k = to->klass()->as_instance_klass(); 102.15 if( ptr == TypePtr::Constant ) { 102.16 - // No constant oop pointers (such as Strings); they alias with 102.17 - // unknown strings. 102.18 - assert(!is_known_inst, "not scalarizable allocation"); 102.19 - tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); 102.20 + if (to->klass() != ciEnv::current()->Class_klass() || 102.21 + offset < k->size_helper() * wordSize) { 102.22 + // No constant oop pointers (such as Strings); they alias with 102.23 + // unknown strings. 102.24 + assert(!is_known_inst, "not scalarizable allocation"); 102.25 + tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); 102.26 + } 102.27 } else if( is_known_inst ) { 102.28 tj = to; // Keep NotNull and klass_is_exact for instance type 102.29 } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) { 102.30 @@ -1216,7 +1220,6 @@ 102.31 tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); 102.32 } 102.33 // Canonicalize the holder of this field 102.34 - ciInstanceKlass *k = to->klass()->as_instance_klass(); 102.35 if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { 102.36 // First handle header references such as a LoadKlassNode, even if the 102.37 // object's klass is unloaded at compile time (4965979). 102.38 @@ -1224,9 +1227,13 @@ 102.39 tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); 102.40 } 102.41 } else if (offset < 0 || offset >= k->size_helper() * wordSize) { 102.42 - to = NULL; 102.43 - tj = TypeOopPtr::BOTTOM; 102.44 - offset = tj->offset(); 102.45 + // Static fields are in the space above the normal instance 102.46 + // fields in the java.lang.Class instance. 102.47 + if (to->klass() != ciEnv::current()->Class_klass()) { 102.48 + to = NULL; 102.49 + tj = TypeOopPtr::BOTTOM; 102.50 + offset = tj->offset(); 102.51 + } 102.52 } else { 102.53 ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset); 102.54 if (!k->equals(canonical_holder) || tj->offset() != offset) { 102.55 @@ -1399,7 +1406,7 @@ 102.56 102.57 102.58 //--------------------------------find_alias_type------------------------------ 102.59 -Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create) { 102.60 +Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) { 102.61 if (_AliasLevel == 0) 102.62 return alias_type(AliasIdxBot); 102.63 102.64 @@ -1464,22 +1471,28 @@ 102.65 // but the base pointer type is not distinctive enough to identify 102.66 // references into JavaThread.) 102.67 102.68 - // Check for final instance fields. 102.69 + // Check for final fields. 102.70 const TypeInstPtr* tinst = flat->isa_instptr(); 102.71 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) { 102.72 - ciInstanceKlass *k = tinst->klass()->as_instance_klass(); 102.73 - ciField* field = k->get_field_by_offset(tinst->offset(), false); 102.74 + ciField* field; 102.75 + if (tinst->const_oop() != NULL && 102.76 + tinst->klass() == ciEnv::current()->Class_klass() && 102.77 + tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) { 102.78 + // static field 102.79 + ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); 102.80 + field = k->get_field_by_offset(tinst->offset(), true); 102.81 + } else { 102.82 + ciInstanceKlass *k = tinst->klass()->as_instance_klass(); 102.83 + field = k->get_field_by_offset(tinst->offset(), false); 102.84 + } 102.85 + assert(field == NULL || 102.86 + original_field == NULL || 102.87 + (field->holder() == original_field->holder() && 102.88 + field->offset() == original_field->offset() && 102.89 + field->is_static() == original_field->is_static()), "wrong field?"); 102.90 // Set field() and is_rewritable() attributes. 102.91 if (field != NULL) alias_type(idx)->set_field(field); 102.92 } 102.93 - const TypeKlassPtr* tklass = flat->isa_klassptr(); 102.94 - // Check for final static fields. 102.95 - if (tklass && tklass->klass()->is_instance_klass()) { 102.96 - ciInstanceKlass *k = tklass->klass()->as_instance_klass(); 102.97 - ciField* field = k->get_field_by_offset(tklass->offset(), true); 102.98 - // Set field() and is_rewritable() attributes. 102.99 - if (field != NULL) alias_type(idx)->set_field(field); 102.100 - } 102.101 } 102.102 102.103 // Fill the cache for next time. 102.104 @@ -1502,10 +1515,10 @@ 102.105 Compile::AliasType* Compile::alias_type(ciField* field) { 102.106 const TypeOopPtr* t; 102.107 if (field->is_static()) 102.108 - t = TypeKlassPtr::make(field->holder()); 102.109 + t = TypeInstPtr::make(field->holder()->java_mirror()); 102.110 else 102.111 t = TypeOopPtr::make_from_klass_raw(field->holder()); 102.112 - AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes())); 102.113 + AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field); 102.114 assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct"); 102.115 return atp; 102.116 } 102.117 @@ -1522,7 +1535,7 @@ 102.118 if (adr_type == NULL) return true; 102.119 if (adr_type == TypePtr::BOTTOM) return true; 102.120 102.121 - return find_alias_type(adr_type, true) != NULL; 102.122 + return find_alias_type(adr_type, true, NULL) != NULL; 102.123 } 102.124 102.125 //-----------------------------must_alias--------------------------------------
103.1 --- a/src/share/vm/opto/compile.hpp Fri Mar 25 11:29:30 2011 -0700 103.2 +++ b/src/share/vm/opto/compile.hpp Fri Mar 25 18:19:22 2011 -0400 103.3 @@ -1,5 +1,5 @@ 103.4 /* 103.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 103.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 103.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 103.8 * 103.9 * This code is free software; you can redistribute it and/or modify it 103.10 @@ -596,7 +596,7 @@ 103.11 } 103.12 103.13 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } 103.14 - AliasType* alias_type(const TypePtr* adr_type) { return find_alias_type(adr_type, false); } 103.15 + AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); } 103.16 bool have_alias_type(const TypePtr* adr_type); 103.17 AliasType* alias_type(ciField* field); 103.18 103.19 @@ -835,7 +835,7 @@ 103.20 void grow_alias_types(); 103.21 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type); 103.22 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const; 103.23 - AliasType* find_alias_type(const TypePtr* adr_type, bool no_create); 103.24 + AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field); 103.25 103.26 void verify_top(Node*) const PRODUCT_RETURN; 103.27
104.1 --- a/src/share/vm/opto/library_call.cpp Fri Mar 25 11:29:30 2011 -0700 104.2 +++ b/src/share/vm/opto/library_call.cpp Fri Mar 25 18:19:22 2011 -0400 104.3 @@ -1118,7 +1118,7 @@ 104.4 Node* sourcea = basic_plus_adr(string_object, string_object, value_offset); 104.5 Node* source = make_load(no_ctrl, sourcea, source_type, T_OBJECT, string_type->add_offset(value_offset)); 104.6 104.7 - Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array)) ); 104.8 + Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)) ); 104.9 jint target_length = target_array->length(); 104.10 const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); 104.11 const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
105.1 --- a/src/share/vm/opto/memnode.cpp Fri Mar 25 11:29:30 2011 -0700 105.2 +++ b/src/share/vm/opto/memnode.cpp Fri Mar 25 18:19:22 2011 -0400 105.3 @@ -1573,9 +1573,9 @@ 105.4 return TypeInt::make(constant.as_int()); 105.5 } else if (constant.basic_type() == T_ARRAY) { 105.6 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 105.7 - return TypeNarrowOop::make_from_constant(constant.as_object()); 105.8 + return TypeNarrowOop::make_from_constant(constant.as_object(), true); 105.9 } else { 105.10 - return TypeOopPtr::make_from_constant(constant.as_object()); 105.11 + return TypeOopPtr::make_from_constant(constant.as_object(), true); 105.12 } 105.13 } 105.14 }
106.1 --- a/src/share/vm/opto/parse.hpp Fri Mar 25 11:29:30 2011 -0700 106.2 +++ b/src/share/vm/opto/parse.hpp Fri Mar 25 18:19:22 2011 -0400 106.3 @@ -1,5 +1,5 @@ 106.4 /* 106.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 106.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 106.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 106.8 * 106.9 * This code is free software; you can redistribute it and/or modify it 106.10 @@ -491,8 +491,8 @@ 106.11 bool static_field_ok_in_clinit(ciField *field, ciMethod *method); 106.12 106.13 // common code for actually performing the load or store 106.14 - void do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field); 106.15 - void do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field); 106.16 + void do_get_xxx(Node* obj, ciField* field, bool is_field); 106.17 + void do_put_xxx(Node* obj, ciField* field, bool is_field); 106.18 106.19 // loading from a constant field or the constant pool 106.20 // returns false if push failed (non-perm field constants only, not ldcs)
107.1 --- a/src/share/vm/opto/parse3.cpp Fri Mar 25 11:29:30 2011 -0700 107.2 +++ b/src/share/vm/opto/parse3.cpp Fri Mar 25 18:19:22 2011 -0400 107.3 @@ -112,29 +112,31 @@ 107.4 // Compile-time detect of null-exception? 107.5 if (stopped()) return; 107.6 107.7 +#ifdef ASSERT 107.8 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); 107.9 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed"); 107.10 +#endif 107.11 107.12 if (is_get) { 107.13 --_sp; // pop receiver before getting 107.14 - do_get_xxx(tjp, obj, field, is_field); 107.15 + do_get_xxx(obj, field, is_field); 107.16 } else { 107.17 - do_put_xxx(tjp, obj, field, is_field); 107.18 + do_put_xxx(obj, field, is_field); 107.19 --_sp; // pop receiver after putting 107.20 } 107.21 } else { 107.22 - const TypeKlassPtr* tkp = TypeKlassPtr::make(field_holder); 107.23 - obj = _gvn.makecon(tkp); 107.24 + const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror()); 107.25 + obj = _gvn.makecon(tip); 107.26 if (is_get) { 107.27 - do_get_xxx(tkp, obj, field, is_field); 107.28 + do_get_xxx(obj, field, is_field); 107.29 } else { 107.30 - do_put_xxx(tkp, obj, field, is_field); 107.31 + do_put_xxx(obj, field, is_field); 107.32 } 107.33 } 107.34 } 107.35 107.36 107.37 -void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) { 107.38 +void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { 107.39 // Does this field have a constant value? If so, just push the value. 107.40 if (field->is_constant()) { 107.41 if (field->is_static()) { 107.42 @@ -231,7 +233,7 @@ 107.43 } 107.44 } 107.45 107.46 -void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) { 107.47 +void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { 107.48 bool is_vol = field->is_volatile(); 107.49 // If reference is volatile, prevent following memory ops from 107.50 // floating down past the volatile write. Also prevents commoning
108.1 --- a/src/share/vm/opto/stringopts.cpp Fri Mar 25 11:29:30 2011 -0700 108.2 +++ b/src/share/vm/opto/stringopts.cpp Fri Mar 25 18:19:22 2011 -0400 108.3 @@ -910,7 +910,7 @@ 108.4 ciObject* con = field->constant_value().as_object(); 108.5 // Do not "join" in the previous type; it doesn't add value, 108.6 // and may yield a vacuous result if the field is of interface type. 108.7 - type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 108.8 + type = TypeOopPtr::make_from_constant(con, true)->isa_oopptr(); 108.9 assert(type != NULL, "field singleton type must be consistent"); 108.10 } else { 108.11 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
109.1 --- a/src/share/vm/opto/type.cpp Fri Mar 25 11:29:30 2011 -0700 109.2 +++ b/src/share/vm/opto/type.cpp Fri Mar 25 18:19:22 2011 -0400 109.3 @@ -32,6 +32,7 @@ 109.4 #include "memory/oopFactory.hpp" 109.5 #include "memory/resourceArea.hpp" 109.6 #include "oops/instanceKlass.hpp" 109.7 +#include "oops/instanceMirrorKlass.hpp" 109.8 #include "oops/klassKlass.hpp" 109.9 #include "oops/objArrayKlass.hpp" 109.10 #include "oops/typeArrayKlass.hpp" 109.11 @@ -2241,43 +2242,49 @@ 109.12 } else if (this->isa_aryptr()) { 109.13 _is_ptr_to_narrowoop = (klass()->is_obj_array_klass() && 109.14 _offset != arrayOopDesc::length_offset_in_bytes()); 109.15 - } else if (klass() == ciEnv::current()->Class_klass() && 109.16 - (_offset == java_lang_Class::klass_offset_in_bytes() || 109.17 - _offset == java_lang_Class::array_klass_offset_in_bytes())) { 109.18 - // Special hidden fields from the Class. 109.19 - assert(this->isa_instptr(), "must be an instance ptr."); 109.20 - _is_ptr_to_narrowoop = true; 109.21 } else if (klass()->is_instance_klass()) { 109.22 ciInstanceKlass* ik = klass()->as_instance_klass(); 109.23 ciField* field = NULL; 109.24 if (this->isa_klassptr()) { 109.25 - // Perm objects don't use compressed references, except for 109.26 - // static fields which are currently compressed. 109.27 - field = ik->get_field_by_offset(_offset, true); 109.28 - if (field != NULL) { 109.29 - BasicType basic_elem_type = field->layout_type(); 109.30 - _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT || 109.31 - basic_elem_type == T_ARRAY); 109.32 - } 109.33 + // Perm objects don't use compressed references 109.34 } else if (_offset == OffsetBot || _offset == OffsetTop) { 109.35 // unsafe access 109.36 _is_ptr_to_narrowoop = true; 109.37 } else { // exclude unsafe ops 109.38 assert(this->isa_instptr(), "must be an instance ptr."); 109.39 - // Field which contains a compressed oop references. 109.40 - field = ik->get_field_by_offset(_offset, false); 109.41 - if (field != NULL) { 109.42 + 109.43 + if (klass() == ciEnv::current()->Class_klass() && 109.44 + (_offset == java_lang_Class::klass_offset_in_bytes() || 109.45 + _offset == java_lang_Class::array_klass_offset_in_bytes())) { 109.46 + // Special hidden fields from the Class. 109.47 + assert(this->isa_instptr(), "must be an instance ptr."); 109.48 + _is_ptr_to_narrowoop = true; 109.49 + } else if (klass() == ciEnv::current()->Class_klass() && 109.50 + _offset >= instanceMirrorKlass::offset_of_static_fields()) { 109.51 + // Static fields 109.52 + assert(o != NULL, "must be constant"); 109.53 + ciInstanceKlass* k = o->as_instance()->java_lang_Class_klass()->as_instance_klass(); 109.54 + ciField* field = k->get_field_by_offset(_offset, true); 109.55 + assert(field != NULL, "missing field"); 109.56 BasicType basic_elem_type = field->layout_type(); 109.57 _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT || 109.58 basic_elem_type == T_ARRAY); 109.59 - } else if (klass()->equals(ciEnv::current()->Object_klass())) { 109.60 - // Compile::find_alias_type() cast exactness on all types to verify 109.61 - // that it does not affect alias type. 109.62 - _is_ptr_to_narrowoop = true; 109.63 } else { 109.64 - // Type for the copy start in LibraryCallKit::inline_native_clone(). 109.65 - assert(!klass_is_exact(), "only non-exact klass"); 109.66 - _is_ptr_to_narrowoop = true; 109.67 + // Instance fields which contains a compressed oop references. 109.68 + field = ik->get_field_by_offset(_offset, false); 109.69 + if (field != NULL) { 109.70 + BasicType basic_elem_type = field->layout_type(); 109.71 + _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT || 109.72 + basic_elem_type == T_ARRAY); 109.73 + } else if (klass()->equals(ciEnv::current()->Object_klass())) { 109.74 + // Compile::find_alias_type() cast exactness on all types to verify 109.75 + // that it does not affect alias type. 109.76 + _is_ptr_to_narrowoop = true; 109.77 + } else { 109.78 + // Type for the copy start in LibraryCallKit::inline_native_clone(). 109.79 + assert(!klass_is_exact(), "only non-exact klass"); 109.80 + _is_ptr_to_narrowoop = true; 109.81 + } 109.82 } 109.83 } 109.84 }
110.1 --- a/src/share/vm/opto/type.hpp Fri Mar 25 11:29:30 2011 -0700 110.2 +++ b/src/share/vm/opto/type.hpp Fri Mar 25 18:19:22 2011 -0400 110.3 @@ -988,8 +988,8 @@ 110.4 110.5 static const TypeNarrowOop *make( const TypePtr* type); 110.6 110.7 - static const TypeNarrowOop* make_from_constant(ciObject* con) { 110.8 - return make(TypeOopPtr::make_from_constant(con)); 110.9 + static const TypeNarrowOop* make_from_constant(ciObject* con, bool require_constant = false) { 110.10 + return make(TypeOopPtr::make_from_constant(con, require_constant)); 110.11 } 110.12 110.13 // returns the equivalent ptr type for this compressed pointer
111.1 --- a/src/share/vm/prims/jni.cpp Fri Mar 25 11:29:30 2011 -0700 111.2 +++ b/src/share/vm/prims/jni.cpp Fri Mar 25 18:19:22 2011 -0400 111.3 @@ -1,5 +1,5 @@ 111.4 /* 111.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 111.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 111.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 111.8 * 111.9 * This code is free software; you can redistribute it and/or modify it 111.10 @@ -1858,7 +1858,7 @@ 111.11 // Static field. The fieldID a JNIid specifying the field holder and the offset within the klassOop. 111.12 JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID); 111.13 assert(id->is_static_field_id(), "invalid static field id"); 111.14 - found = instanceKlass::cast(id->holder())->find_local_field_from_offset(id->offset(), true, &fd); 111.15 + found = id->find_local_field(&fd); 111.16 } else { 111.17 // Non-static field. The fieldID is really the offset of the field within the instanceOop. 111.18 int offset = jfieldIDWorkaround::from_instance_jfieldID(k, fieldID); 111.19 @@ -1906,9 +1906,7 @@ 111.20 JNIid* id = instanceKlass::cast(fd.field_holder())->jni_id_for(fd.offset()); 111.21 debug_only(id->set_is_static_field_id();) 111.22 111.23 - debug_only(int first_offset = instanceKlass::cast(fd.field_holder())->offset_of_static_fields();) 111.24 - debug_only(int end_offset = first_offset + (instanceKlass::cast(fd.field_holder())->static_field_size() * wordSize);) 111.25 - assert(id->offset() >= first_offset && id->offset() < end_offset, "invalid static field offset"); 111.26 + debug_only(id->verify(fd.field_holder())); 111.27 111.28 ret = jfieldIDWorkaround::to_static_jfieldID(id); 111.29 return ret; 111.30 @@ -1928,7 +1926,7 @@ 111.31 if (JvmtiExport::should_post_field_access()) { 111.32 JvmtiExport::jni_GetField_probe(thread, NULL, NULL, id->holder(), fieldID, true); 111.33 } 111.34 - jobject ret = JNIHandles::make_local(id->holder()->obj_field(id->offset())); 111.35 + jobject ret = JNIHandles::make_local(id->holder()->java_mirror()->obj_field(id->offset())); 111.36 DTRACE_PROBE1(hotspot_jni, GetStaticObjectField__return, ret); 111.37 return ret; 111.38 JNI_END 111.39 @@ -1950,7 +1948,7 @@ 111.40 if (JvmtiExport::should_post_field_access()) { \ 111.41 JvmtiExport::jni_GetField_probe(thread, NULL, NULL, id->holder(), fieldID, true); \ 111.42 } \ 111.43 - ret = id->holder()-> Fieldname##_field (id->offset()); \ 111.44 + ret = id->holder()->java_mirror()-> Fieldname##_field (id->offset()); \ 111.45 return ret;\ 111.46 JNI_END 111.47 111.48 @@ -1976,7 +1974,7 @@ 111.49 field_value.l = value; 111.50 JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, 'L', (jvalue *)&field_value); 111.51 } 111.52 - id->holder()->obj_field_put(id->offset(), JNIHandles::resolve(value)); 111.53 + id->holder()->java_mirror()->obj_field_put(id->offset(), JNIHandles::resolve(value)); 111.54 DTRACE_PROBE(hotspot_jni, SetStaticObjectField__return); 111.55 JNI_END 111.56 111.57 @@ -1999,7 +1997,7 @@ 111.58 field_value.unionType = value; \ 111.59 JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, SigType, (jvalue *)&field_value); \ 111.60 } \ 111.61 - id->holder()-> Fieldname##_field_put (id->offset(), value); \ 111.62 + id->holder()->java_mirror()-> Fieldname##_field_put (id->offset(), value); \ 111.63 DTRACE_PROBE(hotspot_jni, SetStatic##Result##Field__return);\ 111.64 JNI_END 111.65
112.1 --- a/src/share/vm/prims/jniCheck.cpp Fri Mar 25 11:29:30 2011 -0700 112.2 +++ b/src/share/vm/prims/jniCheck.cpp Fri Mar 25 18:19:22 2011 -0400 112.3 @@ -1,5 +1,5 @@ 112.4 /* 112.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 112.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 112.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 112.8 * 112.9 * This code is free software; you can redistribute it and/or modify it 112.10 @@ -224,8 +224,7 @@ 112.11 ReportJNIFatalError(thr, fatal_wrong_static_field); 112.12 112.13 /* check for proper field type */ 112.14 - if (!instanceKlass::cast(f_oop)->find_local_field_from_offset( 112.15 - id->offset(), true, &fd)) 112.16 + if (!id->find_local_field(&fd)) 112.17 ReportJNIFatalError(thr, fatal_static_field_not_found); 112.18 if ((fd.field_type() != ftype) && 112.19 !(fd.field_type() == T_ARRAY && ftype == T_OBJECT)) {
113.1 --- a/src/share/vm/prims/jvm.cpp Fri Mar 25 11:29:30 2011 -0700 113.2 +++ b/src/share/vm/prims/jvm.cpp Fri Mar 25 18:19:22 2011 -0400 113.3 @@ -1808,7 +1808,7 @@ 113.4 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Wrong type at constant pool index"); 113.5 } 113.6 klassOop k = cp->klass_at(index, CHECK_NULL); 113.7 - return (jclass) JNIHandles::make_local(k->klass_part()->java_mirror()); 113.8 + return (jclass) JNIHandles::make_local(k->java_mirror()); 113.9 } 113.10 JVM_END 113.11 113.12 @@ -1824,7 +1824,7 @@ 113.13 } 113.14 klassOop k = constantPoolOopDesc::klass_at_if_loaded(cp, index); 113.15 if (k == NULL) return NULL; 113.16 - return (jclass) JNIHandles::make_local(k->klass_part()->java_mirror()); 113.17 + return (jclass) JNIHandles::make_local(k->java_mirror()); 113.18 } 113.19 JVM_END 113.20
114.1 --- a/src/share/vm/prims/jvmtiEnvBase.cpp Fri Mar 25 11:29:30 2011 -0700 114.2 +++ b/src/share/vm/prims/jvmtiEnvBase.cpp Fri Mar 25 18:19:22 2011 -0400 114.3 @@ -1,5 +1,5 @@ 114.4 /* 114.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 114.6 + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 114.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 114.8 * 114.9 * This code is free software; you can redistribute it and/or modify it 114.10 @@ -616,9 +616,7 @@ 114.11 bool found = false; 114.12 if (jfieldIDWorkaround::is_static_jfieldID(field)) { 114.13 JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field); 114.14 - int offset = id->offset(); 114.15 - klassOop holder = id->holder(); 114.16 - found = instanceKlass::cast(holder)->find_local_field_from_offset(offset, true, fd); 114.17 + found = id->find_local_field(fd); 114.18 } else { 114.19 // Non-static field. The fieldID is really the offset of the field within the object. 114.20 int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field);
115.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Mar 25 11:29:30 2011 -0700 115.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Mar 25 18:19:22 2011 -0400 115.3 @@ -3350,11 +3350,12 @@ 115.4 115.5 for (Klass *subk = ik->subklass(); subk != NULL; 115.6 subk = subk->next_sibling()) { 115.7 - klassOop sub = subk->as_klassOop(); 115.8 - instanceKlass *subik = (instanceKlass *)sub->klass_part(); 115.9 - 115.10 - // recursively do subclasses of the current subclass 115.11 - increment_class_counter(subik, THREAD); 115.12 + if (subk->oop_is_instance()) { 115.13 + // Only update instanceKlasses 115.14 + instanceKlass *subik = (instanceKlass*)subk; 115.15 + // recursively do subclasses of the current subclass 115.16 + increment_class_counter(subik, THREAD); 115.17 + } 115.18 } 115.19 } 115.20
116.1 --- a/src/share/vm/prims/jvmtiTagMap.cpp Fri Mar 25 11:29:30 2011 -0700 116.2 +++ b/src/share/vm/prims/jvmtiTagMap.cpp Fri Mar 25 18:19:22 2011 -0400 116.3 @@ -1,5 +1,5 @@ 116.4 /* 116.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 116.6 + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 116.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 116.8 * 116.9 * This code is free software; you can redistribute it and/or modify it 116.10 @@ -27,6 +27,7 @@ 116.11 #include "classfile/systemDictionary.hpp" 116.12 #include "classfile/vmSymbols.hpp" 116.13 #include "jvmtifiles/jvmtiEnv.hpp" 116.14 +#include "oops/instanceMirrorKlass.hpp" 116.15 #include "oops/objArrayKlass.hpp" 116.16 #include "oops/oop.inline2.hpp" 116.17 #include "prims/jvmtiEventController.hpp" 116.18 @@ -2594,6 +2595,11 @@ 116.19 if (o->is_klass()) { 116.20 klassOop k = (klassOop)o; 116.21 o = Klass::cast(k)->java_mirror(); 116.22 + if (o == NULL) { 116.23 + // Classes without mirrors don't correspond to real Java 116.24 + // classes so just ignore them. 116.25 + return; 116.26 + } 116.27 } else { 116.28 116.29 // SystemDictionary::always_strong_oops_do reports the application 116.30 @@ -2834,10 +2840,10 @@ 116.31 116.32 // verify that a static oop field is in range 116.33 static inline bool verify_static_oop(instanceKlass* ik, 116.34 - klassOop k, int offset) { 116.35 - address obj_p = (address)k + offset; 116.36 - address start = (address)ik->start_of_static_fields(); 116.37 - address end = start + (ik->static_oop_field_size() * heapOopSize); 116.38 + oop mirror, int offset) { 116.39 + address obj_p = (address)mirror + offset; 116.40 + address start = (address)instanceMirrorKlass::start_of_static_fields(mirror); 116.41 + address end = start + (java_lang_Class::static_oop_field_count(mirror) * heapOopSize); 116.42 assert(end >= start, "sanity check"); 116.43 116.44 if (obj_p >= start && obj_p < end) { 116.45 @@ -2938,8 +2944,8 @@ 116.46 ClassFieldDescriptor* field = field_map->field_at(i); 116.47 char type = field->field_type(); 116.48 if (!is_primitive_field_type(type)) { 116.49 - oop fld_o = k->obj_field(field->field_offset()); 116.50 - assert(verify_static_oop(ik, k, field->field_offset()), "sanity check"); 116.51 + oop fld_o = mirror->obj_field(field->field_offset()); 116.52 + assert(verify_static_oop(ik, mirror, field->field_offset()), "sanity check"); 116.53 if (fld_o != NULL) { 116.54 int slot = field->field_index(); 116.55 if (!CallbackInvoker::report_static_field_reference(mirror, fld_o, slot)) { 116.56 @@ -2949,7 +2955,7 @@ 116.57 } 116.58 } else { 116.59 if (is_reporting_primitive_fields()) { 116.60 - address addr = (address)k + field->field_offset(); 116.61 + address addr = (address)mirror + field->field_offset(); 116.62 int slot = field->field_index(); 116.63 if (!CallbackInvoker::report_primitive_static_field(mirror, slot, addr, type)) { 116.64 delete field_map;
117.1 --- a/src/share/vm/prims/unsafe.cpp Fri Mar 25 11:29:30 2011 -0700 117.2 +++ b/src/share/vm/prims/unsafe.cpp Fri Mar 25 18:19:22 2011 -0400 117.3 @@ -688,7 +688,7 @@ 117.4 THROW_0(vmSymbols::java_lang_IllegalArgumentException()); 117.5 } 117.6 117.7 - return JNIHandles::make_local(env, java_lang_Class::as_klassOop(mirror)); 117.8 + return JNIHandles::make_local(env, mirror); 117.9 UNSAFE_END 117.10 117.11 //@deprecated 117.12 @@ -706,7 +706,7 @@ 117.13 if (clazz == NULL) { 117.14 THROW_0(vmSymbols::java_lang_NullPointerException()); 117.15 } 117.16 - return JNIHandles::make_local(env, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz))); 117.17 + return JNIHandles::make_local(env, JNIHandles::resolve_non_null(clazz)); 117.18 UNSAFE_END 117.19 117.20 UNSAFE_ENTRY(void, Unsafe_EnsureClassInitialized(JNIEnv *env, jobject unsafe, jobject clazz))
118.1 --- a/src/share/vm/runtime/arguments.cpp Fri Mar 25 11:29:30 2011 -0700 118.2 +++ b/src/share/vm/runtime/arguments.cpp Fri Mar 25 18:19:22 2011 -0400 118.3 @@ -242,6 +242,7 @@ 118.4 JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) }, 118.5 { "MaxLiveObjectEvacuationRatio", 118.6 JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) }, 118.7 + { "ForceSharedSpaces", JDK_Version::jdk_update(6,25), JDK_Version::jdk(8) }, 118.8 { NULL, JDK_Version(0), JDK_Version(0) } 118.9 }; 118.10 118.11 @@ -1003,28 +1004,6 @@ 118.12 } 118.13 } 118.14 118.15 -void Arguments::check_compressed_oops_compat() { 118.16 -#ifdef _LP64 118.17 - assert(UseCompressedOops, "Precondition"); 118.18 - // Is it on by default or set on ergonomically 118.19 - bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops); 118.20 - 118.21 - // If dumping an archive or forcing its use, disable compressed oops if possible 118.22 - if (DumpSharedSpaces || RequireSharedSpaces) { 118.23 - if (is_on_by_default) { 118.24 - FLAG_SET_DEFAULT(UseCompressedOops, false); 118.25 - return; 118.26 - } else { 118.27 - vm_exit_during_initialization( 118.28 - "Class Data Sharing is not supported with compressed oops yet", NULL); 118.29 - } 118.30 - } else if (UseSharedSpaces) { 118.31 - // UseSharedSpaces is on by default. With compressed oops, we turn it off. 118.32 - FLAG_SET_DEFAULT(UseSharedSpaces, false); 118.33 - } 118.34 -#endif 118.35 -} 118.36 - 118.37 void Arguments::set_tiered_flags() { 118.38 // With tiered, set default policy to AdvancedThresholdPolicy, which is 3. 118.39 if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) { 118.40 @@ -1123,40 +1102,28 @@ 118.41 set_parnew_gc_flags(); 118.42 } 118.43 118.44 + // MaxHeapSize is aligned down in collectorPolicy 118.45 + size_t max_heap = align_size_down(MaxHeapSize, 118.46 + CardTableRS::ct_max_alignment_constraint()); 118.47 + 118.48 // Now make adjustments for CMS 118.49 - size_t young_gen_per_worker; 118.50 - intx new_ratio; 118.51 - size_t min_new_default; 118.52 - intx tenuring_default; 118.53 - if (CMSUseOldDefaults) { // old defaults: "old" as of 6.0 118.54 - if FLAG_IS_DEFAULT(CMSYoungGenPerWorker) { 118.55 - FLAG_SET_ERGO(intx, CMSYoungGenPerWorker, 4*M); 118.56 - } 118.57 - young_gen_per_worker = 4*M; 118.58 - new_ratio = (intx)15; 118.59 - min_new_default = 4*M; 118.60 - tenuring_default = (intx)0; 118.61 - } else { // new defaults: "new" as of 6.0 118.62 - young_gen_per_worker = CMSYoungGenPerWorker; 118.63 - new_ratio = (intx)7; 118.64 - min_new_default = 16*M; 118.65 - tenuring_default = (intx)4; 118.66 - } 118.67 - 118.68 - // Preferred young gen size for "short" pauses 118.69 + intx tenuring_default = (intx)6; 118.70 + size_t young_gen_per_worker = CMSYoungGenPerWorker; 118.71 + 118.72 + // Preferred young gen size for "short" pauses: 118.73 + // upper bound depends on # of threads and NewRatio. 118.74 const uintx parallel_gc_threads = 118.75 (ParallelGCThreads == 0 ? 1 : ParallelGCThreads); 118.76 const size_t preferred_max_new_size_unaligned = 118.77 - ScaleForWordSize(young_gen_per_worker * parallel_gc_threads); 118.78 - const size_t preferred_max_new_size = 118.79 + MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * parallel_gc_threads)); 118.80 + size_t preferred_max_new_size = 118.81 align_size_up(preferred_max_new_size_unaligned, os::vm_page_size()); 118.82 118.83 // Unless explicitly requested otherwise, size young gen 118.84 - // for "short" pauses ~ 4M*ParallelGCThreads 118.85 + // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads 118.86 118.87 // If either MaxNewSize or NewRatio is set on the command line, 118.88 // assume the user is trying to set the size of the young gen. 118.89 - 118.90 if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) { 118.91 118.92 // Set MaxNewSize to our calculated preferred_max_new_size unless 118.93 @@ -1169,49 +1136,13 @@ 118.94 } 118.95 if (PrintGCDetails && Verbose) { 118.96 // Too early to use gclog_or_tty 118.97 - tty->print_cr("Ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize); 118.98 + tty->print_cr("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize); 118.99 } 118.100 118.101 - // Unless explicitly requested otherwise, prefer a large 118.102 - // Old to Young gen size so as to shift the collection load 118.103 - // to the old generation concurrent collector 118.104 - 118.105 - // If this is only guarded by FLAG_IS_DEFAULT(NewRatio) 118.106 - // then NewSize and OldSize may be calculated. That would 118.107 - // generally lead to some differences with ParNewGC for which 118.108 - // there was no obvious reason. Also limit to the case where 118.109 - // MaxNewSize has not been set. 118.110 - 118.111 - FLAG_SET_ERGO(intx, NewRatio, MAX2(NewRatio, new_ratio)); 118.112 - 118.113 // Code along this path potentially sets NewSize and OldSize 118.114 118.115 - // Calculate the desired minimum size of the young gen but if 118.116 - // NewSize has been set on the command line, use it here since 118.117 - // it should be the final value. 118.118 - size_t min_new; 118.119 - if (FLAG_IS_DEFAULT(NewSize)) { 118.120 - min_new = align_size_up(ScaleForWordSize(min_new_default), 118.121 - os::vm_page_size()); 118.122 - } else { 118.123 - min_new = NewSize; 118.124 - } 118.125 - size_t prev_initial_size = InitialHeapSize; 118.126 - if (prev_initial_size != 0 && prev_initial_size < min_new + OldSize) { 118.127 - FLAG_SET_ERGO(uintx, InitialHeapSize, min_new + OldSize); 118.128 - // Currently minimum size and the initial heap sizes are the same. 118.129 - set_min_heap_size(InitialHeapSize); 118.130 - if (PrintGCDetails && Verbose) { 118.131 - warning("Initial heap size increased to " SIZE_FORMAT " M from " 118.132 - SIZE_FORMAT " M; use -XX:NewSize=... for finer control.", 118.133 - InitialHeapSize/M, prev_initial_size/M); 118.134 - } 118.135 - } 118.136 - 118.137 - // MaxHeapSize is aligned down in collectorPolicy 118.138 - size_t max_heap = 118.139 - align_size_down(MaxHeapSize, 118.140 - CardTableRS::ct_max_alignment_constraint()); 118.141 + assert(max_heap >= InitialHeapSize, "Error"); 118.142 + assert(max_heap >= NewSize, "Error"); 118.143 118.144 if (PrintGCDetails && Verbose) { 118.145 // Too early to use gclog_or_tty 118.146 @@ -1220,7 +1151,11 @@ 118.147 " max_heap: " SIZE_FORMAT, 118.148 min_heap_size(), InitialHeapSize, max_heap); 118.149 } 118.150 - if (max_heap > min_new) { 118.151 + size_t min_new = preferred_max_new_size; 118.152 + if (FLAG_IS_CMDLINE(NewSize)) { 118.153 + min_new = NewSize; 118.154 + } 118.155 + if (max_heap > min_new && min_heap_size() > min_new) { 118.156 // Unless explicitly requested otherwise, make young gen 118.157 // at least min_new, and at most preferred_max_new_size. 118.158 if (FLAG_IS_DEFAULT(NewSize)) { 118.159 @@ -1228,18 +1163,17 @@ 118.160 FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize)); 118.161 if (PrintGCDetails && Verbose) { 118.162 // Too early to use gclog_or_tty 118.163 - tty->print_cr("Ergo set NewSize: " SIZE_FORMAT, NewSize); 118.164 + tty->print_cr("CMS ergo set NewSize: " SIZE_FORMAT, NewSize); 118.165 } 118.166 } 118.167 // Unless explicitly requested otherwise, size old gen 118.168 - // so that it's at least 3X of NewSize to begin with; 118.169 - // later NewRatio will decide how it grows; see above. 118.170 + // so it's NewRatio x of NewSize. 118.171 if (FLAG_IS_DEFAULT(OldSize)) { 118.172 if (max_heap > NewSize) { 118.173 - FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize, max_heap - NewSize)); 118.174 + FLAG_SET_ERGO(uintx, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize)); 118.175 if (PrintGCDetails && Verbose) { 118.176 // Too early to use gclog_or_tty 118.177 - tty->print_cr("Ergo set OldSize: " SIZE_FORMAT, OldSize); 118.178 + tty->print_cr("CMS ergo set OldSize: " SIZE_FORMAT, OldSize); 118.179 } 118.180 } 118.181 } 118.182 @@ -1383,7 +1317,7 @@ 118.183 void Arguments::set_ergonomics_flags() { 118.184 // Parallel GC is not compatible with sharing. If one specifies 118.185 // that they want sharing explicitly, do not set ergonomics flags. 118.186 - if (DumpSharedSpaces || ForceSharedSpaces) { 118.187 + if (DumpSharedSpaces || RequireSharedSpaces) { 118.188 return; 118.189 } 118.190 118.191 @@ -1690,13 +1624,13 @@ 118.192 } 118.193 118.194 bool Arguments::verify_min_value(intx val, intx min, const char* name) { 118.195 - // Returns true if given value is greater than specified min threshold 118.196 + // Returns true if given value is at least specified min threshold 118.197 // false, otherwise. 118.198 if (val >= min ) { 118.199 return true; 118.200 } 118.201 jio_fprintf(defaultStream::error_stream(), 118.202 - "%s of " INTX_FORMAT " is invalid; must be greater than " INTX_FORMAT "\n", 118.203 + "%s of " INTX_FORMAT " is invalid; must be at least " INTX_FORMAT "\n", 118.204 name, val, min); 118.205 return false; 118.206 } 118.207 @@ -1846,33 +1780,6 @@ 118.208 118.209 status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit"); 118.210 118.211 - // Check whether user-specified sharing option conflicts with GC or page size. 118.212 - // Both sharing and large pages are enabled by default on some platforms; 118.213 - // large pages override sharing only if explicitly set on the command line. 118.214 - const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode || 118.215 - UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC || 118.216 - UseLargePages && FLAG_IS_CMDLINE(UseLargePages); 118.217 - if (cannot_share) { 118.218 - // Either force sharing on by forcing the other options off, or 118.219 - // force sharing off. 118.220 - if (DumpSharedSpaces || ForceSharedSpaces) { 118.221 - jio_fprintf(defaultStream::error_stream(), 118.222 - "Using Serial GC and default page size because of %s\n", 118.223 - ForceSharedSpaces ? "-Xshare:on" : "-Xshare:dump"); 118.224 - force_serial_gc(); 118.225 - FLAG_SET_DEFAULT(UseLargePages, false); 118.226 - } else { 118.227 - if (UseSharedSpaces && Verbose) { 118.228 - jio_fprintf(defaultStream::error_stream(), 118.229 - "Turning off use of shared archive because of " 118.230 - "choice of garbage collector or large pages\n"); 118.231 - } 118.232 - no_shared_spaces(); 118.233 - } 118.234 - } else if (UseLargePages && (UseSharedSpaces || DumpSharedSpaces)) { 118.235 - FLAG_SET_DEFAULT(UseLargePages, false); 118.236 - } 118.237 - 118.238 status = status && check_gc_consistency(); 118.239 status = status && check_stack_pages(); 118.240 118.241 @@ -1950,6 +1857,8 @@ 118.242 status = false; 118.243 } 118.244 118.245 + status = status && verify_min_value(ParGCArrayScanChunk, 1, "ParGCArrayScanChunk"); 118.246 + 118.247 #ifndef SERIALGC 118.248 if (UseG1GC) { 118.249 status = status && verify_percentage(InitiatingHeapOccupancyPercent, 118.250 @@ -2413,9 +2322,6 @@ 118.251 } else if (match_option(option, "-Xshare:on", &tail)) { 118.252 FLAG_SET_CMDLINE(bool, UseSharedSpaces, true); 118.253 FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true); 118.254 -#ifdef TIERED 118.255 - FLAG_SET_CMDLINE(bool, ForceSharedSpaces, true); 118.256 -#endif // TIERED 118.257 // -Xshare:auto 118.258 } else if (match_option(option, "-Xshare:auto", &tail)) { 118.259 FLAG_SET_CMDLINE(bool, UseSharedSpaces, true); 118.260 @@ -2912,6 +2818,52 @@ 118.261 return JNI_OK; 118.262 } 118.263 118.264 +void Arguments::set_shared_spaces_flags() { 118.265 + const bool must_share = DumpSharedSpaces || RequireSharedSpaces; 118.266 + const bool might_share = must_share || UseSharedSpaces; 118.267 + 118.268 + // The string table is part of the shared archive so the size must match. 118.269 + if (!FLAG_IS_DEFAULT(StringTableSize)) { 118.270 + // Disable sharing. 118.271 + if (must_share) { 118.272 + warning("disabling shared archive %s because of non-default " 118.273 + "StringTableSize", DumpSharedSpaces ? "creation" : "use"); 118.274 + } 118.275 + if (might_share) { 118.276 + FLAG_SET_DEFAULT(DumpSharedSpaces, false); 118.277 + FLAG_SET_DEFAULT(RequireSharedSpaces, false); 118.278 + FLAG_SET_DEFAULT(UseSharedSpaces, false); 118.279 + } 118.280 + return; 118.281 + } 118.282 + 118.283 + // Check whether class data sharing settings conflict with GC, compressed oops 118.284 + // or page size, and fix them up. Explicit sharing options override other 118.285 + // settings. 118.286 + const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode || 118.287 + UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC || 118.288 + UseCompressedOops || UseLargePages && FLAG_IS_CMDLINE(UseLargePages); 118.289 + if (cannot_share) { 118.290 + if (must_share) { 118.291 + warning("selecting serial gc and disabling large pages %s" 118.292 + "because of %s", "" LP64_ONLY("and compressed oops "), 118.293 + DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on"); 118.294 + force_serial_gc(); 118.295 + FLAG_SET_CMDLINE(bool, UseLargePages, false); 118.296 + LP64_ONLY(FLAG_SET_CMDLINE(bool, UseCompressedOops, false)); 118.297 + } else { 118.298 + if (UseSharedSpaces && Verbose) { 118.299 + warning("turning off use of shared archive because of " 118.300 + "choice of garbage collector or large pages"); 118.301 + } 118.302 + no_shared_spaces(); 118.303 + } 118.304 + } else if (UseLargePages && might_share) { 118.305 + // Disable large pages to allow shared spaces. This is sub-optimal, since 118.306 + // there may not even be a shared archive to use. 118.307 + FLAG_SET_DEFAULT(UseLargePages, false); 118.308 + } 118.309 +} 118.310 118.311 // Parse entry point called from JNI_CreateJavaVM 118.312 118.313 @@ -3040,6 +2992,12 @@ 118.314 } 118.315 ScavengeRootsInCode = 1; 118.316 } 118.317 + if (!JavaObjectsInPerm && ScavengeRootsInCode == 0) { 118.318 + if (!FLAG_IS_DEFAULT(ScavengeRootsInCode)) { 118.319 + warning("forcing ScavengeRootsInCode non-zero because JavaObjectsInPerm is false"); 118.320 + } 118.321 + ScavengeRootsInCode = 1; 118.322 + } 118.323 118.324 if (PrintGCDetails) { 118.325 // Turn on -verbose:gc options as well 118.326 @@ -3059,9 +3017,7 @@ 118.327 // Set flags based on ergonomics. 118.328 set_ergonomics_flags(); 118.329 118.330 - if (UseCompressedOops) { 118.331 - check_compressed_oops_compat(); 118.332 - } 118.333 + set_shared_spaces_flags(); 118.334 118.335 // Check the GC selections again. 118.336 if (!check_gc_consistency()) { 118.337 @@ -3079,22 +3035,17 @@ 118.338 } 118.339 118.340 #ifndef KERNEL 118.341 - if (UseConcMarkSweepGC) { 118.342 - // Set flags for CMS and ParNew. Check UseConcMarkSweep first 118.343 - // to ensure that when both UseConcMarkSweepGC and UseParNewGC 118.344 - // are true, we don't call set_parnew_gc_flags() as well. 118.345 + // Set heap size based on available physical memory 118.346 + set_heap_size(); 118.347 + // Set per-collector flags 118.348 + if (UseParallelGC || UseParallelOldGC) { 118.349 + set_parallel_gc_flags(); 118.350 + } else if (UseConcMarkSweepGC) { // should be done before ParNew check below 118.351 set_cms_and_parnew_gc_flags(); 118.352 - } else { 118.353 - // Set heap size based on available physical memory 118.354 - set_heap_size(); 118.355 - // Set per-collector flags 118.356 - if (UseParallelGC || UseParallelOldGC) { 118.357 - set_parallel_gc_flags(); 118.358 - } else if (UseParNewGC) { 118.359 - set_parnew_gc_flags(); 118.360 - } else if (UseG1GC) { 118.361 - set_g1_gc_flags(); 118.362 - } 118.363 + } else if (UseParNewGC) { // skipped if CMS is set above 118.364 + set_parnew_gc_flags(); 118.365 + } else if (UseG1GC) { 118.366 + set_g1_gc_flags(); 118.367 } 118.368 #endif // KERNEL 118.369
119.1 --- a/src/share/vm/runtime/arguments.hpp Fri Mar 25 11:29:30 2011 -0700 119.2 +++ b/src/share/vm/runtime/arguments.hpp Fri Mar 25 18:19:22 2011 -0400 119.3 @@ -301,8 +301,6 @@ 119.4 119.5 // Tiered 119.6 static void set_tiered_flags(); 119.7 - // Check compressed oops compatibility with other flags 119.8 - static void check_compressed_oops_compat(); 119.9 // CMS/ParNew garbage collectors 119.10 static void set_parnew_gc_flags(); 119.11 static void set_cms_and_parnew_gc_flags(); 119.12 @@ -312,6 +310,7 @@ 119.13 static void set_g1_gc_flags(); 119.14 // GC ergonomics 119.15 static void set_ergonomics_flags(); 119.16 + static void set_shared_spaces_flags(); 119.17 // Setup heap size 119.18 static void set_heap_size(); 119.19 // Based on automatic selection criteria, should the
120.1 --- a/src/share/vm/runtime/globals.hpp Fri Mar 25 11:29:30 2011 -0700 120.2 +++ b/src/share/vm/runtime/globals.hpp Fri Mar 25 18:19:22 2011 -0400 120.3 @@ -851,7 +851,7 @@ 120.4 diagnostic(bool, TraceNMethodInstalls, false, \ 120.5 "Trace nmethod intallation") \ 120.6 \ 120.7 - diagnostic(intx, ScavengeRootsInCode, 0, \ 120.8 + diagnostic(intx, ScavengeRootsInCode, 1, \ 120.9 "0: do not allow scavengable oops in the code cache; " \ 120.10 "1: allow scavenging from the code cache; " \ 120.11 "2: emit as many constants as the compiler can see") \ 120.12 @@ -1221,6 +1221,11 @@ 120.13 "Decay time (in milliseconds) to re-enable bulk rebiasing of a " \ 120.14 "type after previous bulk rebias") \ 120.15 \ 120.16 + develop(bool, JavaObjectsInPerm, false, \ 120.17 + "controls whether Classes and interned Strings are allocated" \ 120.18 + "in perm. This purely intended to allow debugging issues" \ 120.19 + "in production.") \ 120.20 + \ 120.21 /* tracing */ \ 120.22 \ 120.23 notproduct(bool, TraceRuntimeCalls, false, \ 120.24 @@ -1540,12 +1545,8 @@ 120.25 product(bool, AlwaysPreTouch, false, \ 120.26 "It forces all freshly committed pages to be pre-touched.") \ 120.27 \ 120.28 - product(bool, CMSUseOldDefaults, false, \ 120.29 - "A flag temporarily introduced to allow reverting to some " \ 120.30 - "older default settings; older as of 6.0") \ 120.31 - \ 120.32 - product(intx, CMSYoungGenPerWorker, 16*M, \ 120.33 - "The amount of young gen chosen by default per GC worker " \ 120.34 + product_pd(intx, CMSYoungGenPerWorker, \ 120.35 + "The maximum size of young gen chosen by default per GC worker " \ 120.36 "thread available") \ 120.37 \ 120.38 product(bool, GCOverheadReporting, false, \ 120.39 @@ -3653,9 +3654,6 @@ 120.40 product(bool, RequireSharedSpaces, false, \ 120.41 "Require shared spaces in the permanent generation") \ 120.42 \ 120.43 - product(bool, ForceSharedSpaces, false, \ 120.44 - "Require shared spaces in the permanent generation") \ 120.45 - \ 120.46 product(bool, DumpSharedSpaces, false, \ 120.47 "Special mode: JVM reads a class list, loads classes, builds " \ 120.48 "shared spaces, and dumps the shared spaces to a file to be " \ 120.49 @@ -3758,6 +3756,9 @@ 120.50 diagnostic(bool, PrintDTraceDOF, false, \ 120.51 "Print the DTrace DOF passed to the system for JSDT probes") \ 120.52 \ 120.53 + product(uintx, StringTableSize, 1009, \ 120.54 + "Number of buckets in the interned String table") \ 120.55 + \ 120.56 product(bool, UseVMInterruptibleIO, false, \ 120.57 "(Unstable, Solaris-specific) Thread interrupt before or with " \ 120.58 "EINTR for I/O operations results in OS_INTRPT. The default value"\
121.1 --- a/src/share/vm/runtime/os.cpp Fri Mar 25 11:29:30 2011 -0700 121.2 +++ b/src/share/vm/runtime/os.cpp Fri Mar 25 18:19:22 2011 -0400 121.3 @@ -1079,11 +1079,6 @@ 121.4 "%/lib/jsse.jar:" 121.5 "%/lib/jce.jar:" 121.6 "%/lib/charsets.jar:" 121.7 - 121.8 - // ## TEMPORARY hack to keep the legacy launcher working when 121.9 - // ## only the boot module is installed (cf. j.l.ClassLoader) 121.10 - "%/lib/modules/jdk.boot.jar:" 121.11 - 121.12 "%/classes"; 121.13 char* sysclasspath = format_boot_path(classpath_format, home, home_len, fileSep, pathSep); 121.14 if (sysclasspath == NULL) return false;
122.1 --- a/src/share/vm/runtime/osThread.hpp Fri Mar 25 11:29:30 2011 -0700 122.2 +++ b/src/share/vm/runtime/osThread.hpp Fri Mar 25 18:19:22 2011 -0400 122.3 @@ -1,5 +1,5 @@ 122.4 /* 122.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 122.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 122.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 122.8 * 122.9 * This code is free software; you can redistribute it and/or modify it 122.10 @@ -65,7 +65,7 @@ 122.11 OSThreadStartFunc _start_proc; // Thread start routine 122.12 void* _start_parm; // Thread start routine parameter 122.13 volatile ThreadState _state; // Thread state *hint* 122.14 - jint _interrupted; // Thread.isInterrupted state 122.15 + volatile jint _interrupted; // Thread.isInterrupted state 122.16 122.17 // Note: _interrupted must be jint, so that Java intrinsics can access it. 122.18 // The value stored there must be either 0 or 1. It must be possible 122.19 @@ -89,7 +89,7 @@ 122.20 void* start_parm() const { return _start_parm; } 122.21 void set_start_parm(void* start_parm) { _start_parm = start_parm; } 122.22 122.23 - bool interrupted() const { return _interrupted != 0; } 122.24 + volatile bool interrupted() const { return _interrupted != 0; } 122.25 void set_interrupted(bool z) { _interrupted = z ? 1 : 0; } 122.26 122.27 // Printing
123.1 --- a/src/share/vm/runtime/reflection.cpp Fri Mar 25 11:29:30 2011 -0700 123.2 +++ b/src/share/vm/runtime/reflection.cpp Fri Mar 25 18:19:22 2011 -0400 123.3 @@ -1,5 +1,5 @@ 123.4 /* 123.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 123.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 123.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 123.8 * 123.9 * This code is free software; you can redistribute it and/or modify it 123.10 @@ -649,7 +649,7 @@ 123.11 if (TraceClassResolution) { 123.12 trace_class_resolution(k); 123.13 } 123.14 - return k->klass_part()->java_mirror(); 123.15 + return k->java_mirror(); 123.16 }; 123.17 } 123.18
124.1 --- a/src/share/vm/runtime/thread.cpp Fri Mar 25 11:29:30 2011 -0700 124.2 +++ b/src/share/vm/runtime/thread.cpp Fri Mar 25 18:19:22 2011 -0400 124.3 @@ -3166,7 +3166,7 @@ 124.4 fieldDescriptor fd; 124.5 // Possible we might not find this field; if so, don't break 124.6 if (ik->find_local_field(vmSymbols::frontCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) { 124.7 - k()->bool_field_put(fd.offset(), true); 124.8 + k()->java_mirror()->bool_field_put(fd.offset(), true); 124.9 } 124.10 } 124.11 124.12 @@ -3182,7 +3182,7 @@ 124.13 fieldDescriptor fd; 124.14 // Possible we might not find this field: if so, silently don't break 124.15 if (ik->find_local_field(vmSymbols::stringCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) { 124.16 - k()->bool_field_put(fd.offset(), true); 124.17 + k()->java_mirror()->bool_field_put(fd.offset(), true); 124.18 } 124.19 } 124.20 }
125.1 --- a/src/share/vm/runtime/vmStructs.cpp Fri Mar 25 11:29:30 2011 -0700 125.2 +++ b/src/share/vm/runtime/vmStructs.cpp Fri Mar 25 18:19:22 2011 -0400 125.3 @@ -269,7 +269,7 @@ 125.4 nonstatic_field(instanceKlass, _inner_classes, typeArrayOop) \ 125.5 nonstatic_field(instanceKlass, _nonstatic_field_size, int) \ 125.6 nonstatic_field(instanceKlass, _static_field_size, int) \ 125.7 - nonstatic_field(instanceKlass, _static_oop_field_size, int) \ 125.8 + nonstatic_field(instanceKlass, _static_oop_field_count, int) \ 125.9 nonstatic_field(instanceKlass, _nonstatic_oop_map_size, int) \ 125.10 nonstatic_field(instanceKlass, _is_marked_dependent, bool) \ 125.11 nonstatic_field(instanceKlass, _minor_version, u2) \ 125.12 @@ -840,7 +840,7 @@ 125.13 /* OSThread */ \ 125.14 /************/ \ 125.15 \ 125.16 - nonstatic_field(OSThread, _interrupted, jint) \ 125.17 + volatile_nonstatic_field(OSThread, _interrupted, jint) \ 125.18 \ 125.19 /************************/ \ 125.20 /* OopMap and OopMapSet */ \ 125.21 @@ -945,6 +945,15 @@ 125.22 static_field(Arguments, _num_jvm_args, int) \ 125.23 static_field(Arguments, _java_command, char*) \ 125.24 \ 125.25 + /*********************************/ \ 125.26 + /* java_lang_Class fields */ \ 125.27 + /*********************************/ \ 125.28 + \ 125.29 + static_field(java_lang_Class, klass_offset, int) \ 125.30 + static_field(java_lang_Class, resolved_constructor_offset, int) \ 125.31 + static_field(java_lang_Class, array_klass_offset, int) \ 125.32 + static_field(java_lang_Class, oop_size_offset, int) \ 125.33 + static_field(java_lang_Class, static_oop_field_count_offset, int) \ 125.34 \ 125.35 /************************/ \ 125.36 /* Miscellaneous fields */ \ 125.37 @@ -1414,6 +1423,7 @@ 125.38 declare_toplevel_type(intptr_t*) \ 125.39 declare_unsigned_integer_type(InvocationCounter) /* FIXME: wrong type (not integer) */ \ 125.40 declare_toplevel_type(JavaThread*) \ 125.41 + declare_toplevel_type(java_lang_Class) \ 125.42 declare_toplevel_type(jbyte*) \ 125.43 declare_toplevel_type(jbyte**) \ 125.44 declare_toplevel_type(jint*) \ 125.45 @@ -1543,12 +1553,6 @@ 125.46 \ 125.47 declare_constant(SymbolTable::symbol_table_size) \ 125.48 \ 125.49 - /***************/ \ 125.50 - /* StringTable */ \ 125.51 - /***************/ \ 125.52 - \ 125.53 - declare_constant(StringTable::string_table_size) \ 125.54 - \ 125.55 /********************/ \ 125.56 /* SystemDictionary */ \ 125.57 /********************/ \ 125.58 @@ -1700,15 +1704,6 @@ 125.59 \ 125.60 declare_constant(ConstantPoolCacheEntry::tosBits) \ 125.61 \ 125.62 - /*********************************/ \ 125.63 - /* java_lang_Class field offsets */ \ 125.64 - /*********************************/ \ 125.65 - \ 125.66 - declare_constant(java_lang_Class::hc_klass_offset) \ 125.67 - declare_constant(java_lang_Class::hc_array_klass_offset) \ 125.68 - declare_constant(java_lang_Class::hc_resolved_constructor_offset) \ 125.69 - declare_constant(java_lang_Class::hc_number_of_fake_oop_fields) \ 125.70 - \ 125.71 /***************************************/ \ 125.72 /* java_lang_Thread::ThreadStatus enum */ \ 125.73 /***************************************/ \
126.1 --- a/src/share/vm/services/heapDumper.cpp Fri Mar 25 11:29:30 2011 -0700 126.2 +++ b/src/share/vm/services/heapDumper.cpp Fri Mar 25 18:19:22 2011 -0400 126.3 @@ -832,7 +832,7 @@ 126.4 126.5 // value 126.6 int offset = fld.offset(); 126.7 - address addr = (address)k + offset; 126.8 + address addr = (address)ikh->java_mirror() + offset; 126.9 126.10 dump_field_value(writer, sig->byte_at(0), addr); 126.11 }
127.1 --- a/src/share/vm/shark/sharkNativeWrapper.cpp Fri Mar 25 11:29:30 2011 -0700 127.2 +++ b/src/share/vm/shark/sharkNativeWrapper.cpp Fri Mar 25 18:19:22 2011 -0400 127.3 @@ -101,7 +101,7 @@ 127.4 builder()->CreateStore( 127.5 builder()->CreateInlineOop( 127.6 JNIHandles::make_local( 127.7 - target()->method_holder()->klass_part()->java_mirror())), 127.8 + target()->method_holder()->java_mirror())), 127.9 oop_tmp_slot()); 127.10 127.11 param_types.push_back(box_type);
128.1 --- a/src/share/vm/utilities/debug.hpp Fri Mar 25 11:29:30 2011 -0700 128.2 +++ b/src/share/vm/utilities/debug.hpp Fri Mar 25 18:19:22 2011 -0400 128.3 @@ -25,6 +25,7 @@ 128.4 #ifndef SHARE_VM_UTILITIES_DEBUG_HPP 128.5 #define SHARE_VM_UTILITIES_DEBUG_HPP 128.6 128.7 +#include "prims/jvm.h" 128.8 #include "utilities/globalDefinitions.hpp" 128.9 128.10 #include <stdarg.h> 128.11 @@ -48,7 +49,7 @@ 128.12 FormatBuffer<bufsz>::FormatBuffer(const char * format, ...) { 128.13 va_list argp; 128.14 va_start(argp, format); 128.15 - vsnprintf(_buf, bufsz, format, argp); 128.16 + jio_vsnprintf(_buf, bufsz, format, argp); 128.17 va_end(argp); 128.18 } 128.19 128.20 @@ -61,7 +62,7 @@ 128.21 128.22 va_list argp; 128.23 va_start(argp, format); 128.24 - vsnprintf(buf_end, bufsz - len, format, argp); 128.25 + jio_vsnprintf(buf_end, bufsz - len, format, argp); 128.26 va_end(argp); 128.27 } 128.28
129.1 --- a/src/share/vm/utilities/globalDefinitions.hpp Fri Mar 25 11:29:30 2011 -0700 129.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp Fri Mar 25 18:19:22 2011 -0400 129.3 @@ -1185,7 +1185,7 @@ 129.4 // '%d' formats to indicate a 64-bit quantity; commonly "l" (in LP64) or "ll" 129.5 // (in ILP32). 129.6 129.7 -#define BOOL_TO_STR(__b) (__b) ? "true" : "false" 129.8 +#define BOOL_TO_STR(_b_) ((_b_) ? "true" : "false") 129.9 129.10 // Format 32-bit quantities. 129.11 #define INT32_FORMAT "%d"
130.1 --- a/src/share/vm/utilities/workgroup.cpp Fri Mar 25 11:29:30 2011 -0700 130.2 +++ b/src/share/vm/utilities/workgroup.cpp Fri Mar 25 18:19:22 2011 -0400 130.3 @@ -1,5 +1,5 @@ 130.4 /* 130.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 130.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 130.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 130.8 * 130.9 * This code is free software; you can redistribute it and/or modify it 130.10 @@ -156,7 +156,7 @@ 130.11 tty->print_cr("/nFinished work gang %s: %d/%d sequence %d", 130.12 name(), finished_workers(), total_workers(), 130.13 _sequence_number); 130.14 - } 130.15 + } 130.16 } 130.17 130.18 void AbstractWorkGang::stop() {
131.1 --- a/src/share/vm/utilities/workgroup.hpp Fri Mar 25 11:29:30 2011 -0700 131.2 +++ b/src/share/vm/utilities/workgroup.hpp Fri Mar 25 18:19:22 2011 -0400 131.3 @@ -1,5 +1,5 @@ 131.4 /* 131.5 - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 131.6 + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. 131.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 131.8 * 131.9 * This code is free software; you can redistribute it and/or modify it 131.10 @@ -36,6 +36,20 @@ 131.11 # include "thread_windows.inline.hpp" 131.12 #endif 131.13 131.14 +// Task class hierarchy: 131.15 +// AbstractGangTask 131.16 +// AbstractGangTaskWOopQueues 131.17 +// 131.18 +// Gang/Group class hierarchy: 131.19 +// AbstractWorkGang 131.20 +// WorkGang 131.21 +// FlexibleWorkGang 131.22 +// YieldingFlexibleWorkGang (defined in another file) 131.23 +// 131.24 +// Worker class hierarchy: 131.25 +// GangWorker (subclass of WorkerThread) 131.26 +// YieldingFlexibleGangWorker (defined in another file) 131.27 + 131.28 // Forward declarations of classes defined here 131.29 131.30 class WorkGang;