Merge

Thu, 06 Oct 2011 13:28:09 -0400

author
tonyp
date
Thu, 06 Oct 2011 13:28:09 -0400
changeset 3183
fd65bc7c09b6
parent 3167
95607b70acb5
parent 3182
65a8ff39a6da
child 3184
246daf2c601d
child 3185
b9390528617c

Merge

agent/make/Makefile file | annotate | diff | comparison | revisions
make/sa.files file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmStructs.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/agent/make/Makefile	Fri Sep 30 22:54:43 2011 -0700
     1.2 +++ b/agent/make/Makefile	Thu Oct 06 13:28:09 2011 -0400
     1.3 @@ -1,5 +1,5 @@
     1.4  #
     1.5 -# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
     1.6 +# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
     1.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8  #
     1.9  # This code is free software; you can redistribute it and/or modify it
    1.10 @@ -84,6 +84,7 @@
    1.11  sun.jvm.hotspot.debugger.windbg.x86 \
    1.12  sun.jvm.hotspot.debugger.x86 \
    1.13  sun.jvm.hotspot.gc_implementation \
    1.14 +sun.jvm.hotspot.gc_implementation.g1 \
    1.15  sun.jvm.hotspot.gc_implementation.parallelScavenge \
    1.16  sun.jvm.hotspot.gc_implementation.shared \
    1.17  sun.jvm.hotspot.gc_interface \
    1.18 @@ -176,6 +177,9 @@
    1.19  sun/jvm/hotspot/debugger/windbg/ia64/*.java \
    1.20  sun/jvm/hotspot/debugger/windbg/x86/*.java \
    1.21  sun/jvm/hotspot/debugger/x86/*.java \
    1.22 +sun/jvm/hotspot/gc_implementation/g1/*.java \
    1.23 +sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \
    1.24 +sun/jvm/hotspot/gc_implementation/shared/*.java \
    1.25  sun/jvm/hotspot/interpreter/*.java \
    1.26  sun/jvm/hotspot/jdi/*.java \
    1.27  sun/jvm/hotspot/livejvm/*.java \
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Thu Oct 06 13:28:09 2011 -0400
     2.3 @@ -0,0 +1,116 @@
     2.4 +/*
     2.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
     2.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.7 + *
     2.8 + * This code is free software; you can redistribute it and/or modify it
     2.9 + * under the terms of the GNU General Public License version 2 only, as
    2.10 + * published by the Free Software Foundation.
    2.11 + *
    2.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    2.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    2.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    2.15 + * version 2 for more details (a copy is included in the LICENSE file that
    2.16 + * accompanied this code).
    2.17 + *
    2.18 + * You should have received a copy of the GNU General Public License version
    2.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    2.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    2.21 + *
    2.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    2.23 + * or visit www.oracle.com if you need additional information or have any
    2.24 + * questions.
    2.25 + *
    2.26 + */
    2.27 +
    2.28 +package sun.jvm.hotspot.gc_implementation.g1;
    2.29 +
    2.30 +import java.util.Iterator;
    2.31 +import java.util.Observable;
    2.32 +import java.util.Observer;
    2.33 +
    2.34 +import sun.jvm.hotspot.debugger.Address;
    2.35 +import sun.jvm.hotspot.gc_interface.CollectedHeapName;
    2.36 +import sun.jvm.hotspot.memory.MemRegion;
    2.37 +import sun.jvm.hotspot.memory.SharedHeap;
    2.38 +import sun.jvm.hotspot.memory.SpaceClosure;
    2.39 +import sun.jvm.hotspot.runtime.VM;
    2.40 +import sun.jvm.hotspot.runtime.VMObjectFactory;
    2.41 +import sun.jvm.hotspot.types.AddressField;
    2.42 +import sun.jvm.hotspot.types.CIntegerField;
    2.43 +import sun.jvm.hotspot.types.Type;
    2.44 +import sun.jvm.hotspot.types.TypeDataBase;
    2.45 +
    2.46 +// Mirror class for G1CollectedHeap.
    2.47 +
    2.48 +public class G1CollectedHeap extends SharedHeap {
    2.49 +    // HeapRegionSeq _seq;
    2.50 +    static private long hrsFieldOffset;
    2.51 +    // MemRegion _g1_committed;
    2.52 +    static private long g1CommittedFieldOffset;
    2.53 +    // size_t _summary_bytes_used;
    2.54 +    static private CIntegerField summaryBytesUsedField;
    2.55 +    // G1MonitoringSupport* _g1mm
    2.56 +    static private AddressField g1mmField;
    2.57 +
    2.58 +    static {
    2.59 +        VM.registerVMInitializedObserver(new Observer() {
    2.60 +                public void update(Observable o, Object data) {
    2.61 +                    initialize(VM.getVM().getTypeDataBase());
    2.62 +                }
    2.63 +            });
    2.64 +    }
    2.65 +
    2.66 +    static private synchronized void initialize(TypeDataBase db) {
    2.67 +        Type type = db.lookupType("G1CollectedHeap");
    2.68 +
    2.69 +        hrsFieldOffset = type.getField("_hrs").getOffset();
    2.70 +        g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
    2.71 +        summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
    2.72 +        g1mmField = type.getAddressField("_g1mm");
    2.73 +    }
    2.74 +
    2.75 +    public long capacity() {
    2.76 +        Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
    2.77 +        MemRegion g1_committed = new MemRegion(g1CommittedAddr);
    2.78 +        return g1_committed.byteSize();
    2.79 +    }
    2.80 +
    2.81 +    public long used() {
    2.82 +        return summaryBytesUsedField.getValue(addr);
    2.83 +    }
    2.84 +
    2.85 +    public long n_regions() {
    2.86 +        return hrs().length();
    2.87 +    }
    2.88 +
    2.89 +    private HeapRegionSeq hrs() {
    2.90 +        Address hrsAddr = addr.addOffsetTo(hrsFieldOffset);
    2.91 +        return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class,
    2.92 +                                                         hrsAddr);
    2.93 +    }
    2.94 +
    2.95 +    public G1MonitoringSupport g1mm() {
    2.96 +        Address g1mmAddr = g1mmField.getValue(addr);
    2.97 +        return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
    2.98 +    }
    2.99 +
   2.100 +    private Iterator<HeapRegion> heapRegionIterator() {
   2.101 +        return hrs().heapRegionIterator();
   2.102 +    }
   2.103 +
   2.104 +    public void heapRegionIterate(SpaceClosure scl) {
   2.105 +        Iterator<HeapRegion> iter = heapRegionIterator();
   2.106 +        while (iter.hasNext()) {
   2.107 +            HeapRegion hr = iter.next();
   2.108 +            scl.doSpace(hr);
   2.109 +        }
   2.110 +    }
   2.111 +
   2.112 +    public CollectedHeapName kind() {
   2.113 +        return CollectedHeapName.G1_COLLECTED_HEAP;
   2.114 +    }
   2.115 +
   2.116 +    public G1CollectedHeap(Address addr) {
   2.117 +        super(addr);
   2.118 +    }
   2.119 +}
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1MonitoringSupport.java	Thu Oct 06 13:28:09 2011 -0400
     3.3 @@ -0,0 +1,99 @@
     3.4 +/*
     3.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
     3.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.7 + *
     3.8 + * This code is free software; you can redistribute it and/or modify it
     3.9 + * under the terms of the GNU General Public License version 2 only, as
    3.10 + * published by the Free Software Foundation.
    3.11 + *
    3.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    3.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    3.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    3.15 + * version 2 for more details (a copy is included in the LICENSE file that
    3.16 + * accompanied this code).
    3.17 + *
    3.18 + * You should have received a copy of the GNU General Public License version
    3.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    3.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    3.21 + *
    3.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    3.23 + * or visit www.oracle.com if you need additional information or have any
    3.24 + * questions.
    3.25 + *
    3.26 + */
    3.27 +
    3.28 +package sun.jvm.hotspot.gc_implementation.g1;
    3.29 +
    3.30 +import java.util.Observable;
    3.31 +import java.util.Observer;
    3.32 +
    3.33 +import sun.jvm.hotspot.debugger.Address;
    3.34 +import sun.jvm.hotspot.runtime.VM;
    3.35 +import sun.jvm.hotspot.runtime.VMObject;
    3.36 +import sun.jvm.hotspot.types.CIntegerField;
    3.37 +import sun.jvm.hotspot.types.Type;
    3.38 +import sun.jvm.hotspot.types.TypeDataBase;
    3.39 +
    3.40 +// Mirror class for G1MonitoringSupport.
    3.41 +
    3.42 +public class G1MonitoringSupport extends VMObject {
    3.43 +    // size_t _eden_committed;
    3.44 +    static private CIntegerField edenCommittedField;
    3.45 +    // size_t _eden_used;
    3.46 +    static private CIntegerField edenUsedField;
    3.47 +    // size_t _survivor_committed;
    3.48 +    static private CIntegerField survivorCommittedField;
    3.49 +    // size_t _survivor_used;
    3.50 +    static private CIntegerField survivorUsedField;
    3.51 +    // size_t _old_committed;
    3.52 +    static private CIntegerField oldCommittedField;
    3.53 +    // size_t _old_used;
    3.54 +    static private CIntegerField oldUsedField;
    3.55 +
    3.56 +    static {
    3.57 +        VM.registerVMInitializedObserver(new Observer() {
    3.58 +                public void update(Observable o, Object data) {
    3.59 +                    initialize(VM.getVM().getTypeDataBase());
    3.60 +                }
    3.61 +            });
    3.62 +    }
    3.63 +
    3.64 +    static private synchronized void initialize(TypeDataBase db) {
    3.65 +        Type type = db.lookupType("G1MonitoringSupport");
    3.66 +
    3.67 +        edenCommittedField = type.getCIntegerField("_eden_committed");
    3.68 +        edenUsedField = type.getCIntegerField("_eden_used");
    3.69 +        survivorCommittedField = type.getCIntegerField("_survivor_committed");
    3.70 +        survivorUsedField = type.getCIntegerField("_survivor_used");
    3.71 +        oldCommittedField = type.getCIntegerField("_old_committed");
    3.72 +        oldUsedField = type.getCIntegerField("_old_used");
    3.73 +    }
    3.74 +
    3.75 +    public long edenCommitted() {
    3.76 +        return edenCommittedField.getValue(addr);
    3.77 +    }
    3.78 +
    3.79 +    public long edenUsed() {
    3.80 +        return edenUsedField.getValue(addr);
    3.81 +    }
    3.82 +
    3.83 +    public long survivorCommitted() {
    3.84 +        return survivorCommittedField.getValue(addr);
    3.85 +    }
    3.86 +
    3.87 +    public long survivorUsed() {
    3.88 +        return survivorUsedField.getValue(addr);
    3.89 +    }
    3.90 +
    3.91 +    public long oldCommitted() {
    3.92 +        return oldCommittedField.getValue(addr);
    3.93 +    }
    3.94 +
    3.95 +    public long oldUsed() {
    3.96 +        return oldUsedField.getValue(addr);
    3.97 +    }
    3.98 +
    3.99 +    public G1MonitoringSupport(Address addr) {
   3.100 +        super(addr);
   3.101 +    }
   3.102 +}
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java	Thu Oct 06 13:28:09 2011 -0400
     4.3 @@ -0,0 +1,66 @@
     4.4 +/*
     4.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
     4.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4.7 + *
     4.8 + * This code is free software; you can redistribute it and/or modify it
     4.9 + * under the terms of the GNU General Public License version 2 only, as
    4.10 + * published by the Free Software Foundation.
    4.11 + *
    4.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    4.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    4.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    4.15 + * version 2 for more details (a copy is included in the LICENSE file that
    4.16 + * accompanied this code).
    4.17 + *
    4.18 + * You should have received a copy of the GNU General Public License version
    4.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    4.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    4.21 + *
    4.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    4.23 + * or visit www.oracle.com if you need additional information or have any
    4.24 + * questions.
    4.25 + *
    4.26 + */
    4.27 +
    4.28 +package sun.jvm.hotspot.gc_implementation.g1;
    4.29 +
    4.30 +import java.util.Observable;
    4.31 +import java.util.Observer;
    4.32 +
    4.33 +import sun.jvm.hotspot.debugger.Address;
    4.34 +import sun.jvm.hotspot.memory.ContiguousSpace;
    4.35 +import sun.jvm.hotspot.runtime.VM;
    4.36 +import sun.jvm.hotspot.types.CIntegerField;
    4.37 +import sun.jvm.hotspot.types.Type;
    4.38 +import sun.jvm.hotspot.types.TypeDataBase;
    4.39 +
    4.40 +// Mirror class for HeapRegion. Currently we don't actually include
    4.41 +// any of its fields but only iterate over it (which we get "for free"
    4.42 +// as HeapRegion ultimately inherits from ContiguousSpace).
    4.43 +
    4.44 +public class HeapRegion extends ContiguousSpace {
    4.45 +    // static int GrainBytes;
    4.46 +    static private CIntegerField grainBytesField;
    4.47 +
    4.48 +    static {
    4.49 +        VM.registerVMInitializedObserver(new Observer() {
    4.50 +                public void update(Observable o, Object data) {
    4.51 +                    initialize(VM.getVM().getTypeDataBase());
    4.52 +                }
    4.53 +            });
    4.54 +    }
    4.55 +
    4.56 +    static private synchronized void initialize(TypeDataBase db) {
    4.57 +        Type type = db.lookupType("HeapRegion");
    4.58 +
    4.59 +        grainBytesField = type.getCIntegerField("GrainBytes");
    4.60 +    }
    4.61 +
    4.62 +    static public long grainBytes() {
    4.63 +        return grainBytesField.getValue();
    4.64 +    }
    4.65 +
    4.66 +    public HeapRegion(Address addr) {
    4.67 +        super(addr);
    4.68 +    }
    4.69 +}
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Thu Oct 06 13:28:09 2011 -0400
     5.3 @@ -0,0 +1,102 @@
     5.4 +/*
     5.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
     5.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5.7 + *
     5.8 + * This code is free software; you can redistribute it and/or modify it
     5.9 + * under the terms of the GNU General Public License version 2 only, as
    5.10 + * published by the Free Software Foundation.
    5.11 + *
    5.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    5.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    5.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    5.15 + * version 2 for more details (a copy is included in the LICENSE file that
    5.16 + * accompanied this code).
    5.17 + *
    5.18 + * You should have received a copy of the GNU General Public License version
    5.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    5.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    5.21 + *
    5.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    5.23 + * or visit www.oracle.com if you need additional information or have any
    5.24 + * questions.
    5.25 + *
    5.26 + */
    5.27 +
    5.28 +package sun.jvm.hotspot.gc_implementation.g1;
    5.29 +
    5.30 +import java.util.Iterator;
    5.31 +import java.util.Observable;
    5.32 +import java.util.Observer;
    5.33 +
    5.34 +import sun.jvm.hotspot.debugger.Address;
    5.35 +import sun.jvm.hotspot.runtime.VM;
    5.36 +import sun.jvm.hotspot.runtime.VMObject;
    5.37 +import sun.jvm.hotspot.runtime.VMObjectFactory;
    5.38 +import sun.jvm.hotspot.types.AddressField;
    5.39 +import sun.jvm.hotspot.types.CIntegerField;
    5.40 +import sun.jvm.hotspot.types.Type;
    5.41 +import sun.jvm.hotspot.types.TypeDataBase;
    5.42 +
    5.43 +// Mirror class for HeapRegionSeq. It's essentially an index -> HeapRegion map.
    5.44 +
    5.45 +public class HeapRegionSeq extends VMObject {
    5.46 +    // HeapRegion** _regions;
    5.47 +    static private AddressField regionsField;
    5.48 +    // size_t _length;
    5.49 +    static private CIntegerField lengthField;
    5.50 +
    5.51 +    static {
    5.52 +        VM.registerVMInitializedObserver(new Observer() {
    5.53 +                public void update(Observable o, Object data) {
    5.54 +                    initialize(VM.getVM().getTypeDataBase());
    5.55 +                }
    5.56 +            });
    5.57 +    }
    5.58 +
    5.59 +    static private synchronized void initialize(TypeDataBase db) {
    5.60 +        Type type = db.lookupType("HeapRegionSeq");
    5.61 +
    5.62 +        regionsField = type.getAddressField("_regions");
    5.63 +        lengthField = type.getCIntegerField("_length");
    5.64 +    }
    5.65 +
    5.66 +    private HeapRegion at(long index) {
    5.67 +        Address arrayAddr = regionsField.getValue(addr);
    5.68 +        // Offset of &_region[index]
    5.69 +        long offset = index * VM.getVM().getAddressSize();
    5.70 +        Address regionAddr = arrayAddr.getAddressAt(offset);
    5.71 +        return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
    5.72 +                                                      regionAddr);
    5.73 +    }
    5.74 +
    5.75 +    public long length() {
    5.76 +        return lengthField.getValue(addr);
    5.77 +    }
    5.78 +
    5.79 +    private class HeapRegionIterator implements Iterator<HeapRegion> {
    5.80 +        private long index;
    5.81 +        private long length;
    5.82 +
    5.83 +        @Override
    5.84 +        public boolean hasNext() { return index < length; }
    5.85 +
    5.86 +        @Override
    5.87 +        public HeapRegion next() { return at(index++);    }
    5.88 +
    5.89 +        @Override
    5.90 +        public void remove()     { /* not supported */    }
    5.91 +
    5.92 +        HeapRegionIterator(Address addr) {
    5.93 +            index = 0;
    5.94 +            length = length();
    5.95 +        }
    5.96 +    }
    5.97 +
    5.98 +    public Iterator<HeapRegion> heapRegionIterator() {
    5.99 +        return new HeapRegionIterator(addr);
   5.100 +    }
   5.101 +
   5.102 +    public HeapRegionSeq(Address addr) {
   5.103 +        super(addr);
   5.104 +    }
   5.105 +}
     6.1 --- a/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java	Fri Sep 30 22:54:43 2011 -0700
     6.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java	Thu Oct 06 13:28:09 2011 -0400
     6.3 @@ -1,5 +1,5 @@
     6.4  /*
     6.5 - * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
     6.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
     6.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.8   *
     6.9   * This code is free software; you can redistribute it and/or modify it
    6.10 @@ -34,6 +34,7 @@
    6.11    public static final CollectedHeapName ABSTRACT = new CollectedHeapName("abstract");
    6.12    public static final CollectedHeapName SHARED_HEAP = new CollectedHeapName("SharedHeap");
    6.13    public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
    6.14 +  public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
    6.15    public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");
    6.16  
    6.17    public String toString() {
     7.1 --- a/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Fri Sep 30 22:54:43 2011 -0700
     7.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java	Thu Oct 06 13:28:09 2011 -0400
     7.3 @@ -1,5 +1,5 @@
     7.4  /*
     7.5 - * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
     7.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
     7.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.8   *
     7.9   * This code is free software; you can redistribute it and/or modify it
    7.10 @@ -28,6 +28,7 @@
    7.11  import java.util.*;
    7.12  import sun.jvm.hotspot.debugger.*;
    7.13  import sun.jvm.hotspot.gc_interface.*;
    7.14 +import sun.jvm.hotspot.gc_implementation.g1.G1CollectedHeap;
    7.15  import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
    7.16  import sun.jvm.hotspot.oops.*;
    7.17  import sun.jvm.hotspot.types.*;
    7.18 @@ -72,6 +73,7 @@
    7.19      heapConstructor = new VirtualConstructor(db);
    7.20      heapConstructor.addMapping("GenCollectedHeap", GenCollectedHeap.class);
    7.21      heapConstructor.addMapping("ParallelScavengeHeap", ParallelScavengeHeap.class);
    7.22 +    heapConstructor.addMapping("G1CollectedHeap", G1CollectedHeap.class);
    7.23  
    7.24      mainThreadGroupField   = type.getOopField("_main_thread_group");
    7.25      systemThreadGroupField = type.getOopField("_system_thread_group");
     8.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Fri Sep 30 22:54:43 2011 -0700
     8.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Thu Oct 06 13:28:09 2011 -0400
     8.3 @@ -33,6 +33,7 @@
     8.4  
     8.5  import sun.jvm.hotspot.debugger.*;
     8.6  import sun.jvm.hotspot.gc_interface.*;
     8.7 +import sun.jvm.hotspot.gc_implementation.g1.*;
     8.8  import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
     8.9  import sun.jvm.hotspot.memory.*;
    8.10  import sun.jvm.hotspot.runtime.*;
    8.11 @@ -514,9 +515,16 @@
    8.12  
    8.13    private void addPermGenLiveRegions(List output, CollectedHeap heap) {
    8.14      LiveRegionsCollector lrc = new LiveRegionsCollector(output);
    8.15 -    if (heap instanceof GenCollectedHeap) {
    8.16 -       GenCollectedHeap genHeap = (GenCollectedHeap) heap;
    8.17 -       Generation gen = genHeap.permGen();
    8.18 +    if (heap instanceof SharedHeap) {
    8.19 +       if (Assert.ASSERTS_ENABLED) {
    8.20 +          Assert.that(heap instanceof GenCollectedHeap ||
    8.21 +                      heap instanceof G1CollectedHeap,
    8.22 +                      "Expecting GenCollectedHeap or G1CollectedHeap, " +
    8.23 +                      "but got " + heap.getClass().getName());
    8.24 +       }
    8.25 +       // Handles both GenCollectedHeap and G1CollectedHeap
    8.26 +       SharedHeap sharedHeap = (SharedHeap) heap;
    8.27 +       Generation gen = sharedHeap.permGen();
    8.28         gen.spaceIterate(lrc, true);
    8.29      } else if (heap instanceof ParallelScavengeHeap) {
    8.30         ParallelScavengeHeap psh = (ParallelScavengeHeap) heap;
    8.31 @@ -524,8 +532,9 @@
    8.32         addLiveRegions(permGen.objectSpace().getLiveRegions(), output);
    8.33      } else {
    8.34         if (Assert.ASSERTS_ENABLED) {
    8.35 -          Assert.that(false, "Expecting GenCollectedHeap or ParallelScavengeHeap, but got " +
    8.36 -                             heap.getClass().getName());
    8.37 +          Assert.that(false,
    8.38 +                      "Expecting SharedHeap or ParallelScavengeHeap, " +
    8.39 +                      "but got " + heap.getClass().getName());
    8.40         }
    8.41      }
    8.42    }
    8.43 @@ -588,10 +597,14 @@
    8.44         addLiveRegions(youngGen.fromSpace().getLiveRegions(), liveRegions);
    8.45         PSOldGen oldGen = psh.oldGen();
    8.46         addLiveRegions(oldGen.objectSpace().getLiveRegions(), liveRegions);
    8.47 +    } else if (heap instanceof G1CollectedHeap) {
    8.48 +        G1CollectedHeap g1h = (G1CollectedHeap) heap;
    8.49 +        g1h.heapRegionIterate(lrc);
    8.50      } else {
    8.51         if (Assert.ASSERTS_ENABLED) {
    8.52 -          Assert.that(false, "Expecting GenCollectedHeap or ParallelScavengeHeap, but got " +
    8.53 -                              heap.getClass().getName());
    8.54 +          Assert.that(false, "Expecting GenCollectedHeap, G1CollectedHeap, " +
    8.55 +                      "or ParallelScavengeHeap, but got " +
    8.56 +                      heap.getClass().getName());
    8.57         }
    8.58      }
    8.59  
     9.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Fri Sep 30 22:54:43 2011 -0700
     9.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Thu Oct 06 13:28:09 2011 -0400
     9.3 @@ -1,5 +1,5 @@
     9.4  /*
     9.5 - * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
     9.6 + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
     9.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.8   *
     9.9   * This code is free software; you can redistribute it and/or modify it
    9.10 @@ -26,11 +26,11 @@
    9.11  
    9.12  import java.util.*;
    9.13  import sun.jvm.hotspot.gc_interface.*;
    9.14 +import sun.jvm.hotspot.gc_implementation.g1.*;
    9.15  import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
    9.16  import sun.jvm.hotspot.gc_implementation.shared.*;
    9.17  import sun.jvm.hotspot.memory.*;
    9.18  import sun.jvm.hotspot.runtime.*;
    9.19 -import sun.jvm.hotspot.tools.*;
    9.20  
    9.21  public class HeapSummary extends Tool {
    9.22  
    9.23 @@ -70,32 +70,45 @@
    9.24        System.out.println();
    9.25        System.out.println("Heap Usage:");
    9.26  
    9.27 -      if (heap instanceof GenCollectedHeap) {
    9.28 -         GenCollectedHeap genHeap = (GenCollectedHeap) heap;
    9.29 -         for (int n = 0; n < genHeap.nGens(); n++) {
    9.30 -            Generation gen = genHeap.getGen(n);
    9.31 -            if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
    9.32 -               System.out.println("New Generation (Eden + 1 Survivor Space):");
    9.33 -               printGen(gen);
    9.34 +      if (heap instanceof SharedHeap) {
    9.35 +         SharedHeap sharedHeap = (SharedHeap) heap;
    9.36 +         if (sharedHeap instanceof GenCollectedHeap) {
    9.37 +            GenCollectedHeap genHeap = (GenCollectedHeap) sharedHeap;
    9.38 +            for (int n = 0; n < genHeap.nGens(); n++) {
    9.39 +               Generation gen = genHeap.getGen(n);
    9.40 +               if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
    9.41 +                  System.out.println("New Generation (Eden + 1 Survivor Space):");
    9.42 +                  printGen(gen);
    9.43  
    9.44 -               ContiguousSpace eden = ((DefNewGeneration)gen).eden();
    9.45 -               System.out.println("Eden Space:");
    9.46 -               printSpace(eden);
    9.47 +                  ContiguousSpace eden = ((DefNewGeneration)gen).eden();
    9.48 +                  System.out.println("Eden Space:");
    9.49 +                  printSpace(eden);
    9.50  
    9.51 -               ContiguousSpace from = ((DefNewGeneration)gen).from();
    9.52 -               System.out.println("From Space:");
    9.53 -               printSpace(from);
    9.54 +                  ContiguousSpace from = ((DefNewGeneration)gen).from();
    9.55 +                  System.out.println("From Space:");
    9.56 +                  printSpace(from);
    9.57  
    9.58 -               ContiguousSpace to = ((DefNewGeneration)gen).to();
    9.59 -               System.out.println("To Space:");
    9.60 -               printSpace(to);
    9.61 -            } else {
    9.62 -               System.out.println(gen.name() + ":");
    9.63 -               printGen(gen);
    9.64 +                  ContiguousSpace to = ((DefNewGeneration)gen).to();
    9.65 +                  System.out.println("To Space:");
    9.66 +                  printSpace(to);
    9.67 +               } else {
    9.68 +                  System.out.println(gen.name() + ":");
    9.69 +                  printGen(gen);
    9.70 +               }
    9.71              }
    9.72 +         } else if (sharedHeap instanceof G1CollectedHeap) {
    9.73 +             G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
    9.74 +             G1MonitoringSupport g1mm = g1h.g1mm();
    9.75 +             System.out.println("G1 Young Generation");
    9.76 +             printG1Space("Eden Space:", g1mm.edenUsed(), g1mm.edenCommitted());
    9.77 +             printG1Space("From Space:", g1mm.survivorUsed(), g1mm.survivorCommitted());
    9.78 +             printG1Space("To Space:", 0, 0);
    9.79 +             printG1Space("G1 Old Generation", g1mm.oldUsed(), g1mm.oldCommitted());
    9.80 +         } else {
    9.81 +             throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
    9.82           }
    9.83 -         // Perm generation
    9.84 -         Generation permGen = genHeap.permGen();
    9.85 +         // Perm generation shared by the above
    9.86 +         Generation permGen = sharedHeap.permGen();
    9.87           System.out.println("Perm Generation:");
    9.88           printGen(permGen);
    9.89        } else if (heap instanceof ParallelScavengeHeap) {
    9.90 @@ -119,7 +132,7 @@
    9.91           printValMB("free     = ", permFree);
    9.92           System.out.println(alignment + (double)permGen.used() * 100.0 / permGen.capacity() + "% used");
    9.93        } else {
    9.94 -         throw new RuntimeException("unknown heap type : " + heap.getClass());
    9.95 +         throw new RuntimeException("unknown CollectedHeap type : " + heap.getClass());
    9.96        }
    9.97     }
    9.98  
    9.99 @@ -151,6 +164,14 @@
   9.100            return;
   9.101         }
   9.102  
   9.103 +       l = getFlagValue("UseG1GC", flagMap);
   9.104 +       if (l == 1L) {
   9.105 +           System.out.print("Garbage-First (G1) GC ");
   9.106 +           l = getFlagValue("ParallelGCThreads", flagMap);
   9.107 +           System.out.println("with " + l + " thread(s)");
   9.108 +           return;
   9.109 +       }
   9.110 +
   9.111         System.out.println("Mark Sweep Compact GC");
   9.112     }
   9.113  
   9.114 @@ -191,6 +212,16 @@
   9.115        System.out.println(alignment +  (double)space.used() * 100.0 / space.capacity() + "% used");
   9.116     }
   9.117  
   9.118 +   private void printG1Space(String spaceName, long used, long capacity) {
   9.119 +      long free = capacity - used;
   9.120 +      System.out.println(spaceName);
   9.121 +      printValMB("capacity = ", capacity);
   9.122 +      printValMB("used     = ", used);
   9.123 +      printValMB("free     = ", free);
   9.124 +      double occPerc = (capacity > 0) ? (double) used * 100.0 / capacity : 0.0;
   9.125 +      System.out.println(alignment + occPerc + "% used");
   9.126 +   }
   9.127 +
   9.128     private static final double FACTOR = 1024*1024;
   9.129     private void printValMB(String title, long value) {
   9.130        if (value < 0) {
    10.1 --- a/make/sa.files	Fri Sep 30 22:54:43 2011 -0700
    10.2 +++ b/make/sa.files	Thu Oct 06 13:28:09 2011 -0400
    10.3 @@ -1,5 +1,5 @@
    10.4  #
    10.5 -# Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
    10.6 +# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
    10.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8  #
    10.9  # This code is free software; you can redistribute it and/or modify it
   10.10 @@ -82,6 +82,7 @@
   10.11  $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/ia64/*.java \
   10.12  $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/x86/*.java \
   10.13  $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/x86/*.java \
   10.14 +$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/g1/*.java \
   10.15  $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \
   10.16  $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/shared/*.java \
   10.17  $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_interface/*.java \
    11.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Sep 30 22:54:43 2011 -0700
    11.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Oct 06 13:28:09 2011 -0400
    11.3 @@ -2004,7 +2004,7 @@
    11.4    ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
    11.5  
    11.6    ref_processor()->set_enqueuing_is_done(false);
    11.7 -  ref_processor()->enable_discovery();
    11.8 +  ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
    11.9    ref_processor()->setup_policy(clear_all_soft_refs);
   11.10    // If an asynchronous collection finishes, the _modUnionTable is
   11.11    // all clear.  If we are assuming the collection from an asynchronous
   11.12 @@ -3490,8 +3490,8 @@
   11.13      MutexLockerEx x(bitMapLock(),
   11.14                      Mutex::_no_safepoint_check_flag);
   11.15      checkpointRootsInitialWork(asynch);
   11.16 -    rp->verify_no_references_recorded();
   11.17 -    rp->enable_discovery(); // enable ("weak") refs discovery
   11.18 +    // enable ("weak") refs discovery
   11.19 +    rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
   11.20      _collectorState = Marking;
   11.21    } else {
   11.22      // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
   11.23 @@ -3503,7 +3503,8 @@
   11.24             "ref discovery for this generation kind");
   11.25      // already have locks
   11.26      checkpointRootsInitialWork(asynch);
   11.27 -    rp->enable_discovery(); // now enable ("weak") refs discovery
   11.28 +    // now enable ("weak") refs discovery
   11.29 +    rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
   11.30      _collectorState = Marking;
   11.31    }
   11.32    SpecializationStats::print();
    12.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Sep 30 22:54:43 2011 -0700
    12.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Oct 06 13:28:09 2011 -0400
    12.3 @@ -818,10 +818,10 @@
    12.4    NoteStartOfMarkHRClosure startcl;
    12.5    g1h->heap_region_iterate(&startcl);
    12.6  
    12.7 -  // Start weak-reference discovery.
    12.8 -  ReferenceProcessor* rp = g1h->ref_processor();
    12.9 -  rp->verify_no_references_recorded();
   12.10 -  rp->enable_discovery(); // enable ("weak") refs discovery
   12.11 +  // Start Concurrent Marking weak-reference discovery.
   12.12 +  ReferenceProcessor* rp = g1h->ref_processor_cm();
   12.13 +  // enable ("weak") refs discovery
   12.14 +  rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   12.15    rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
   12.16  
   12.17    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
   12.18 @@ -1133,6 +1133,7 @@
   12.19    // world is stopped at this checkpoint
   12.20    assert(SafepointSynchronize::is_at_safepoint(),
   12.21           "world should be stopped");
   12.22 +
   12.23    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   12.24  
   12.25    // If a full collection has happened, we shouldn't do this.
   12.26 @@ -1837,6 +1838,10 @@
   12.27    size_t cleaned_up_bytes = start_used_bytes - g1h->used();
   12.28    g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
   12.29  
   12.30 +  // Clean up will have freed any regions completely full of garbage.
   12.31 +  // Update the soft reference policy with the new heap occupancy.
   12.32 +  Universe::update_heap_info_at_gc();
   12.33 +
   12.34    // We need to make this be a "collection" so any collection pause that
   12.35    // races with it goes around and waits for completeCleanup to finish.
   12.36    g1h->increment_total_collections();
   12.37 @@ -2072,8 +2077,10 @@
   12.38    }
   12.39  };
   12.40  
   12.41 -// Implementation of AbstractRefProcTaskExecutor for G1
   12.42 -class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
   12.43 +// Implementation of AbstractRefProcTaskExecutor for parallel
   12.44 +// reference processing at the end of G1 concurrent marking
   12.45 +
   12.46 +class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
   12.47  private:
   12.48    G1CollectedHeap* _g1h;
   12.49    ConcurrentMark*  _cm;
   12.50 @@ -2082,7 +2089,7 @@
   12.51    int              _active_workers;
   12.52  
   12.53  public:
   12.54 -  G1RefProcTaskExecutor(G1CollectedHeap* g1h,
   12.55 +  G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
   12.56                          ConcurrentMark* cm,
   12.57                          CMBitMap* bitmap,
   12.58                          WorkGang* workers,
   12.59 @@ -2096,7 +2103,7 @@
   12.60    virtual void execute(EnqueueTask& task);
   12.61  };
   12.62  
   12.63 -class G1RefProcTaskProxy: public AbstractGangTask {
   12.64 +class G1CMRefProcTaskProxy: public AbstractGangTask {
   12.65    typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   12.66    ProcessTask&     _proc_task;
   12.67    G1CollectedHeap* _g1h;
   12.68 @@ -2104,7 +2111,7 @@
   12.69    CMBitMap*        _bitmap;
   12.70  
   12.71  public:
   12.72 -  G1RefProcTaskProxy(ProcessTask& proc_task,
   12.73 +  G1CMRefProcTaskProxy(ProcessTask& proc_task,
   12.74                       G1CollectedHeap* g1h,
   12.75                       ConcurrentMark* cm,
   12.76                       CMBitMap* bitmap) :
   12.77 @@ -2122,10 +2129,10 @@
   12.78    }
   12.79  };
   12.80  
   12.81 -void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
   12.82 +void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
   12.83    assert(_workers != NULL, "Need parallel worker threads.");
   12.84  
   12.85 -  G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
   12.86 +  G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
   12.87  
   12.88    // We need to reset the phase for each task execution so that
   12.89    // the termination protocol of CMTask::do_marking_step works.
   12.90 @@ -2135,12 +2142,12 @@
   12.91    _g1h->set_par_threads(0);
   12.92  }
   12.93  
   12.94 -class G1RefEnqueueTaskProxy: public AbstractGangTask {
   12.95 +class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
   12.96    typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   12.97    EnqueueTask& _enq_task;
   12.98  
   12.99  public:
  12.100 -  G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
  12.101 +  G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
  12.102      AbstractGangTask("Enqueue reference objects in parallel"),
  12.103      _enq_task(enq_task)
  12.104    { }
  12.105 @@ -2150,10 +2157,10 @@
  12.106    }
  12.107  };
  12.108  
  12.109 -void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
  12.110 +void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
  12.111    assert(_workers != NULL, "Need parallel worker threads.");
  12.112  
  12.113 -  G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
  12.114 +  G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
  12.115  
  12.116    _g1h->set_par_threads(_active_workers);
  12.117    _workers->run_task(&enq_task_proxy);
  12.118 @@ -2163,71 +2170,84 @@
  12.119  void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
  12.120    ResourceMark rm;
  12.121    HandleMark   hm;
  12.122 -  G1CollectedHeap* g1h   = G1CollectedHeap::heap();
  12.123 -  ReferenceProcessor* rp = g1h->ref_processor();
  12.124 -
  12.125 -  // See the comment in G1CollectedHeap::ref_processing_init()
  12.126 -  // about how reference processing currently works in G1.
  12.127 -
  12.128 -  // Process weak references.
  12.129 -  rp->setup_policy(clear_all_soft_refs);
  12.130 -  assert(_markStack.isEmpty(), "mark stack should be empty");
  12.131 -
  12.132 -  G1CMIsAliveClosure   g1_is_alive(g1h);
  12.133 -  G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
  12.134 -  G1CMDrainMarkingStackClosure
  12.135 -    g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
  12.136 -  // We use the work gang from the G1CollectedHeap and we utilize all
  12.137 -  // the worker threads.
  12.138 -  int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
  12.139 -  active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
  12.140 -
  12.141 -  G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
  12.142 -                                          g1h->workers(), active_workers);
  12.143 -
  12.144 -
  12.145 -  if (rp->processing_is_mt()) {
  12.146 -    // Set the degree of MT here.  If the discovery is done MT, there
  12.147 -    // may have been a different number of threads doing the discovery
  12.148 -    // and a different number of discovered lists may have Ref objects.
  12.149 -    // That is OK as long as the Reference lists are balanced (see
  12.150 -    // balance_all_queues() and balance_queues()).
  12.151 -    rp->set_active_mt_degree(active_workers);
  12.152 -
  12.153 -    rp->process_discovered_references(&g1_is_alive,
  12.154 +
  12.155 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  12.156 +
  12.157 +  // Is alive closure.
  12.158 +  G1CMIsAliveClosure g1_is_alive(g1h);
  12.159 +
  12.160 +  // Inner scope to exclude the cleaning of the string and symbol
  12.161 +  // tables from the displayed time.
  12.162 +  {
  12.163 +    bool verbose = PrintGC && PrintGCDetails;
  12.164 +    if (verbose) {
  12.165 +      gclog_or_tty->put(' ');
  12.166 +    }
  12.167 +    TraceTime t("GC ref-proc", verbose, false, gclog_or_tty);
  12.168 +
  12.169 +    ReferenceProcessor* rp = g1h->ref_processor_cm();
  12.170 +
  12.171 +    // See the comment in G1CollectedHeap::ref_processing_init()
  12.172 +    // about how reference processing currently works in G1.
  12.173 +
  12.174 +    // Process weak references.
  12.175 +    rp->setup_policy(clear_all_soft_refs);
  12.176 +    assert(_markStack.isEmpty(), "mark stack should be empty");
  12.177 +
  12.178 +    G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
  12.179 +    G1CMDrainMarkingStackClosure
  12.180 +      g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
  12.181 +
  12.182 +    // We use the work gang from the G1CollectedHeap and we utilize all
  12.183 +    // the worker threads.
  12.184 +    int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
  12.185 +    active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
  12.186 +
  12.187 +    G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
  12.188 +                                              g1h->workers(), active_workers);
  12.189 +
  12.190 +    if (rp->processing_is_mt()) {
  12.191 +      // Set the degree of MT here.  If the discovery is done MT, there
  12.192 +      // may have been a different number of threads doing the discovery
  12.193 +      // and a different number of discovered lists may have Ref objects.
  12.194 +      // That is OK as long as the Reference lists are balanced (see
  12.195 +      // balance_all_queues() and balance_queues()).
  12.196 +      rp->set_active_mt_degree(active_workers);
  12.197 +
  12.198 +      rp->process_discovered_references(&g1_is_alive,
  12.199                                        &g1_keep_alive,
  12.200                                        &g1_drain_mark_stack,
  12.201                                        &par_task_executor);
  12.202  
  12.203 -    // The work routines of the parallel keep_alive and drain_marking_stack
  12.204 -    // will set the has_overflown flag if we overflow the global marking
  12.205 -    // stack.
  12.206 -  } else {
  12.207 -    rp->process_discovered_references(&g1_is_alive,
  12.208 -                                      &g1_keep_alive,
  12.209 -                                      &g1_drain_mark_stack,
  12.210 -                                      NULL);
  12.211 -
  12.212 +      // The work routines of the parallel keep_alive and drain_marking_stack
  12.213 +      // will set the has_overflown flag if we overflow the global marking
  12.214 +      // stack.
  12.215 +    } else {
  12.216 +      rp->process_discovered_references(&g1_is_alive,
  12.217 +                                        &g1_keep_alive,
  12.218 +                                        &g1_drain_mark_stack,
  12.219 +                                        NULL);
  12.220 +    }
  12.221 +
  12.222 +    assert(_markStack.overflow() || _markStack.isEmpty(),
  12.223 +            "mark stack should be empty (unless it overflowed)");
  12.224 +    if (_markStack.overflow()) {
  12.225 +      // Should have been done already when we tried to push an
  12.226 +      // entry on to the global mark stack. But let's do it again.
  12.227 +      set_has_overflown();
  12.228 +    }
  12.229 +
  12.230 +    if (rp->processing_is_mt()) {
  12.231 +      assert(rp->num_q() == active_workers, "why not");
  12.232 +      rp->enqueue_discovered_references(&par_task_executor);
  12.233 +    } else {
  12.234 +      rp->enqueue_discovered_references();
  12.235 +    }
  12.236 +
  12.237 +    rp->verify_no_references_recorded();
  12.238 +    assert(!rp->discovery_enabled(), "Post condition");
  12.239    }
  12.240  
  12.241 -  assert(_markStack.overflow() || _markStack.isEmpty(),
  12.242 -      "mark stack should be empty (unless it overflowed)");
  12.243 -  if (_markStack.overflow()) {
  12.244 -    // Should have been done already when we tried to push an
  12.245 -    // entry on to the global mark stack. But let's do it again.
  12.246 -    set_has_overflown();
  12.247 -  }
  12.248 -
  12.249 -  if (rp->processing_is_mt()) {
  12.250 -    assert(rp->num_q() == active_workers, "why not");
  12.251 -    rp->enqueue_discovered_references(&par_task_executor);
  12.252 -  } else {
  12.253 -    rp->enqueue_discovered_references();
  12.254 -  }
  12.255 -
  12.256 -  rp->verify_no_references_recorded();
  12.257 -  assert(!rp->discovery_enabled(), "should have been disabled");
  12.258 -
  12.259    // Now clean up stale oops in StringTable
  12.260    StringTable::unlink(&g1_is_alive);
  12.261    // Clean up unreferenced symbols in symbol table.
  12.262 @@ -3329,7 +3349,7 @@
  12.263    assert(_ref_processor == NULL, "should be initialized to NULL");
  12.264  
  12.265    if (G1UseConcMarkReferenceProcessing) {
  12.266 -    _ref_processor = g1h->ref_processor();
  12.267 +    _ref_processor = g1h->ref_processor_cm();
  12.268      assert(_ref_processor != NULL, "should not be NULL");
  12.269    }
  12.270  }
  12.271 @@ -4564,6 +4584,15 @@
  12.272                   G1PPRL_DOUBLE_H_FORMAT,
  12.273                   "type", "address-range",
  12.274                   "used", "prev-live", "next-live", "gc-eff");
  12.275 +  _out->print_cr(G1PPRL_LINE_PREFIX
  12.276 +                 G1PPRL_TYPE_H_FORMAT
  12.277 +                 G1PPRL_ADDR_BASE_H_FORMAT
  12.278 +                 G1PPRL_BYTE_H_FORMAT
  12.279 +                 G1PPRL_BYTE_H_FORMAT
  12.280 +                 G1PPRL_BYTE_H_FORMAT
  12.281 +                 G1PPRL_DOUBLE_H_FORMAT,
  12.282 +                 "", "",
  12.283 +                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
  12.284  }
  12.285  
  12.286  // It takes as a parameter a reference to one of the _hum_* fields, it
  12.287 @@ -4575,7 +4604,7 @@
  12.288    // The > 0 check is to deal with the prev and next live bytes which
  12.289    // could be 0.
  12.290    if (*hum_bytes > 0) {
  12.291 -    bytes = MIN2((size_t) HeapRegion::GrainBytes, *hum_bytes);
  12.292 +    bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
  12.293      *hum_bytes -= bytes;
  12.294    }
  12.295    return bytes;
    13.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Sep 30 22:54:43 2011 -0700
    13.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Oct 06 13:28:09 2011 -0400
    13.3 @@ -366,8 +366,8 @@
    13.4    friend class CMConcurrentMarkingTask;
    13.5    friend class G1ParNoteEndTask;
    13.6    friend class CalcLiveObjectsClosure;
    13.7 -  friend class G1RefProcTaskProxy;
    13.8 -  friend class G1RefProcTaskExecutor;
    13.9 +  friend class G1CMRefProcTaskProxy;
   13.10 +  friend class G1CMRefProcTaskExecutor;
   13.11    friend class G1CMParKeepAliveAndDrainClosure;
   13.12    friend class G1CMParDrainMarkingStackClosure;
   13.13  
    14.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 30 22:54:43 2011 -0700
    14.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Oct 06 13:28:09 2011 -0400
    14.3 @@ -42,6 +42,7 @@
    14.4  #include "memory/gcLocker.inline.hpp"
    14.5  #include "memory/genOopClosures.inline.hpp"
    14.6  #include "memory/generationSpec.hpp"
    14.7 +#include "memory/referenceProcessor.hpp"
    14.8  #include "oops/oop.inline.hpp"
    14.9  #include "oops/oop.pcgc.inline.hpp"
   14.10  #include "runtime/aprofiler.hpp"
   14.11 @@ -551,8 +552,7 @@
   14.12  }
   14.13  
   14.14  HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
   14.15 -  assert(!isHumongous(word_size) ||
   14.16 -                                  word_size <= (size_t) HeapRegion::GrainWords,
   14.17 +  assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
   14.18           "the only time we use this to allocate a humongous region is "
   14.19           "when we are allocating a single humongous region");
   14.20  
   14.21 @@ -815,6 +815,11 @@
   14.22      result =
   14.23        humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
   14.24      assert(result != NULL, "it should always return a valid result");
   14.25 +
   14.26 +    // A successful humongous object allocation changes the used space
   14.27 +    // information of the old generation so we need to recalculate the
   14.28 +    // sizes and update the jstat counters here.
   14.29 +    g1mm()->update_sizes();
   14.30    }
   14.31  
   14.32    verify_region_sets_optional();
   14.33 @@ -1164,7 +1169,7 @@
   14.34        if (!hr->isHumongous()) {
   14.35          _hr_printer->post_compaction(hr, G1HRPrinter::Old);
   14.36        } else if (hr->startsHumongous()) {
   14.37 -        if (hr->capacity() == (size_t) HeapRegion::GrainBytes) {
   14.38 +        if (hr->capacity() == HeapRegion::GrainBytes) {
   14.39            // single humongous region
   14.40            _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
   14.41          } else {
   14.42 @@ -1244,15 +1249,11 @@
   14.43  
   14.44      COMPILER2_PRESENT(DerivedPointerTable::clear());
   14.45  
   14.46 -    // We want to discover references, but not process them yet.
   14.47 -    // This mode is disabled in
   14.48 -    // instanceRefKlass::process_discovered_references if the
   14.49 -    // generation does some collection work, or
   14.50 -    // instanceRefKlass::enqueue_discovered_references if the
   14.51 -    // generation returns without doing any work.
   14.52 -    ref_processor()->disable_discovery();
   14.53 -    ref_processor()->abandon_partial_discovery();
   14.54 -    ref_processor()->verify_no_references_recorded();
   14.55 +    // Disable discovery and empty the discovered lists
   14.56 +    // for the CM ref processor.
   14.57 +    ref_processor_cm()->disable_discovery();
   14.58 +    ref_processor_cm()->abandon_partial_discovery();
   14.59 +    ref_processor_cm()->verify_no_references_recorded();
   14.60  
   14.61      // Abandon current iterations of concurrent marking and concurrent
   14.62      // refinement, if any are in progress.
   14.63 @@ -1280,31 +1281,33 @@
   14.64      empty_young_list();
   14.65      g1_policy()->set_full_young_gcs(true);
   14.66  
   14.67 -    // See the comment in G1CollectedHeap::ref_processing_init() about
   14.68 +    // See the comments in g1CollectedHeap.hpp and
   14.69 +    // G1CollectedHeap::ref_processing_init() about
   14.70      // how reference processing currently works in G1.
   14.71  
   14.72 -    // Temporarily make reference _discovery_ single threaded (non-MT).
   14.73 -    ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
   14.74 -
   14.75 -    // Temporarily make refs discovery atomic
   14.76 -    ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
   14.77 -
   14.78 -    // Temporarily clear _is_alive_non_header
   14.79 -    ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
   14.80 -
   14.81 -    ref_processor()->enable_discovery();
   14.82 -    ref_processor()->setup_policy(do_clear_all_soft_refs);
   14.83 +    // Temporarily make discovery by the STW ref processor single threaded (non-MT).
   14.84 +    ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
   14.85 +
   14.86 +    // Temporarily clear the STW ref processor's _is_alive_non_header field.
   14.87 +    ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
   14.88 +
   14.89 +    ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   14.90 +    ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
   14.91 +
   14.92      // Do collection work
   14.93      {
   14.94        HandleMark hm;  // Discard invalid handles created during gc
   14.95 -      G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
   14.96 +      G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
   14.97      }
   14.98 +
   14.99      assert(free_regions() == 0, "we should not have added any free regions");
  14.100      rebuild_region_lists();
  14.101  
  14.102      _summary_bytes_used = recalculate_used();
  14.103  
  14.104 -    ref_processor()->enqueue_discovered_references();
  14.105 +    // Enqueue any discovered reference objects that have
  14.106 +    // not been removed from the discovered lists.
  14.107 +    ref_processor_stw()->enqueue_discovered_references();
  14.108  
  14.109      COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  14.110  
  14.111 @@ -1319,7 +1322,16 @@
  14.112                         /* option      */ VerifyOption_G1UsePrevMarking);
  14.113  
  14.114      }
  14.115 -    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
  14.116 +
  14.117 +    assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  14.118 +    ref_processor_stw()->verify_no_references_recorded();
  14.119 +
  14.120 +    // Note: since we've just done a full GC, concurrent
  14.121 +    // marking is no longer active. Therefore we need not
  14.122 +    // re-enable reference discovery for the CM ref processor.
  14.123 +    // That will be done at the start of the next marking cycle.
  14.124 +    assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
  14.125 +    ref_processor_cm()->verify_no_references_recorded();
  14.126  
  14.127      reset_gc_time_stamp();
  14.128      // Since everything potentially moved, we will clear all remembered
  14.129 @@ -1414,7 +1426,7 @@
  14.130    if (PrintHeapAtGC) {
  14.131      Universe::print_heap_after_gc();
  14.132    }
  14.133 -  g1mm()->update_counters();
  14.134 +  g1mm()->update_sizes();
  14.135    post_full_gc_dump();
  14.136  
  14.137    return true;
  14.138 @@ -1772,14 +1784,17 @@
  14.139    _g1_policy(policy_),
  14.140    _dirty_card_queue_set(false),
  14.141    _into_cset_dirty_card_queue_set(false),
  14.142 -  _is_alive_closure(this),
  14.143 -  _ref_processor(NULL),
  14.144 +  _is_alive_closure_cm(this),
  14.145 +  _is_alive_closure_stw(this),
  14.146 +  _ref_processor_cm(NULL),
  14.147 +  _ref_processor_stw(NULL),
  14.148    _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  14.149    _bot_shared(NULL),
  14.150    _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  14.151    _evac_failure_scan_stack(NULL) ,
  14.152    _mark_in_progress(false),
  14.153    _cg1r(NULL), _summary_bytes_used(0),
  14.154 +  _g1mm(NULL),
  14.155    _refine_cte_cl(NULL),
  14.156    _full_collection(false),
  14.157    _free_list("Master Free List"),
  14.158 @@ -1955,7 +1970,7 @@
  14.159  
  14.160    size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  14.161    guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
  14.162 -  guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
  14.163 +  guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
  14.164              "too many cards per region");
  14.165  
  14.166    HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
  14.167 @@ -2059,7 +2074,7 @@
  14.168  
  14.169    // Do create of the monitoring and management support so that
  14.170    // values in the heap have been properly initialized.
  14.171 -  _g1mm = new G1MonitoringSupport(this, &_g1_storage);
  14.172 +  _g1mm = new G1MonitoringSupport(this);
  14.173  
  14.174    return JNI_OK;
  14.175  }
  14.176 @@ -2067,34 +2082,81 @@
  14.177  void G1CollectedHeap::ref_processing_init() {
  14.178    // Reference processing in G1 currently works as follows:
  14.179    //
  14.180 -  // * There is only one reference processor instance that
  14.181 -  //   'spans' the entire heap. It is created by the code
  14.182 -  //   below.
  14.183 -  // * Reference discovery is not enabled during an incremental
  14.184 -  //   pause (see 6484982).
  14.185 -  // * Discoverered refs are not enqueued nor are they processed
  14.186 -  //   during an incremental pause (see 6484982).
  14.187 -  // * Reference discovery is enabled at initial marking.
  14.188 -  // * Reference discovery is disabled and the discovered
  14.189 -  //   references processed etc during remarking.
  14.190 -  // * Reference discovery is MT (see below).
  14.191 -  // * Reference discovery requires a barrier (see below).
  14.192 -  // * Reference processing is currently not MT (see 6608385).
  14.193 -  // * A full GC enables (non-MT) reference discovery and
  14.194 -  //   processes any discovered references.
  14.195 +  // * There are two reference processor instances. One is
  14.196 +  //   used to record and process discovered references
  14.197 +  //   during concurrent marking; the other is used to
  14.198 +  //   record and process references during STW pauses
  14.199 +  //   (both full and incremental).
  14.200 +  // * Both ref processors need to 'span' the entire heap as
  14.201 +  //   the regions in the collection set may be dotted around.
  14.202 +  //
  14.203 +  // * For the concurrent marking ref processor:
  14.204 +  //   * Reference discovery is enabled at initial marking.
  14.205 +  //   * Reference discovery is disabled and the discovered
  14.206 +  //     references processed etc during remarking.
  14.207 +  //   * Reference discovery is MT (see below).
  14.208 +  //   * Reference discovery requires a barrier (see below).
  14.209 +  //   * Reference processing may or may not be MT
  14.210 +  //     (depending on the value of ParallelRefProcEnabled
  14.211 +  //     and ParallelGCThreads).
  14.212 +  //   * A full GC disables reference discovery by the CM
  14.213 +  //     ref processor and abandons any entries on it's
  14.214 +  //     discovered lists.
  14.215 +  //
  14.216 +  // * For the STW processor:
  14.217 +  //   * Non MT discovery is enabled at the start of a full GC.
  14.218 +  //   * Processing and enqueueing during a full GC is non-MT.
  14.219 +  //   * During a full GC, references are processed after marking.
  14.220 +  //
  14.221 +  //   * Discovery (may or may not be MT) is enabled at the start
  14.222 +  //     of an incremental evacuation pause.
  14.223 +  //   * References are processed near the end of a STW evacuation pause.
  14.224 +  //   * For both types of GC:
  14.225 +  //     * Discovery is atomic - i.e. not concurrent.
  14.226 +  //     * Reference discovery will not need a barrier.
  14.227  
  14.228    SharedHeap::ref_processing_init();
  14.229    MemRegion mr = reserved_region();
  14.230 -  _ref_processor =
  14.231 +
  14.232 +  // Concurrent Mark ref processor
  14.233 +  _ref_processor_cm =
  14.234      new ReferenceProcessor(mr,    // span
  14.235 -                           ParallelRefProcEnabled && (ParallelGCThreads > 1),    // mt processing
  14.236 -                           (int) ParallelGCThreads,   // degree of mt processing
  14.237 -                           ParallelGCThreads > 1 || ConcGCThreads > 1,  // mt discovery
  14.238 -                           (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
  14.239 -                           false,                     // Reference discovery is not atomic
  14.240 -                           &_is_alive_closure,        // is alive closure for efficiency
  14.241 -                           true);                     // Setting next fields of discovered
  14.242 -                                                      // lists requires a barrier.
  14.243 +                           ParallelRefProcEnabled && (ParallelGCThreads > 1),
  14.244 +                                // mt processing
  14.245 +                           (int) ParallelGCThreads,
  14.246 +                                // degree of mt processing
  14.247 +                           (ParallelGCThreads > 1) || (ConcGCThreads > 1),
  14.248 +                                // mt discovery
  14.249 +                           (int) MAX2(ParallelGCThreads, ConcGCThreads),
  14.250 +                                // degree of mt discovery
  14.251 +                           false,
  14.252 +                                // Reference discovery is not atomic
  14.253 +                           &_is_alive_closure_cm,
  14.254 +                                // is alive closure
  14.255 +                                // (for efficiency/performance)
  14.256 +                           true);
  14.257 +                                // Setting next fields of discovered
  14.258 +                                // lists requires a barrier.
  14.259 +
  14.260 +  // STW ref processor
  14.261 +  _ref_processor_stw =
  14.262 +    new ReferenceProcessor(mr,    // span
  14.263 +                           ParallelRefProcEnabled && (ParallelGCThreads > 1),
  14.264 +                                // mt processing
  14.265 +                           MAX2((int)ParallelGCThreads, 1),
  14.266 +                                // degree of mt processing
  14.267 +                           (ParallelGCThreads > 1),
  14.268 +                                // mt discovery
  14.269 +                           MAX2((int)ParallelGCThreads, 1),
  14.270 +                                // degree of mt discovery
  14.271 +                           true,
  14.272 +                                // Reference discovery is atomic
  14.273 +                           &_is_alive_closure_stw,
  14.274 +                                // is alive closure
  14.275 +                                // (for efficiency/performance)
  14.276 +                           false);
  14.277 +                                // Setting next fields of discovered
  14.278 +                                // lists requires a barrier.
  14.279  }
  14.280  
  14.281  size_t G1CollectedHeap::capacity() const {
  14.282 @@ -2988,8 +3050,7 @@
  14.283              _g1_storage.high(),
  14.284              _g1_storage.high_boundary());
  14.285    st->cr();
  14.286 -  st->print("  region size " SIZE_FORMAT "K, ",
  14.287 -            HeapRegion::GrainBytes/K);
  14.288 +  st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
  14.289    size_t young_regions = _young_list->length();
  14.290    st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
  14.291              young_regions, young_regions * HeapRegion::GrainBytes / K);
  14.292 @@ -3117,6 +3178,10 @@
  14.293    COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  14.294                          "derived pointer present"));
  14.295    // always_do_update_barrier = true;
  14.296 +
  14.297 +  // We have just completed a GC. Update the soft reference
  14.298 +  // policy with the new heap occupancy
  14.299 +  Universe::update_heap_info_at_gc();
  14.300  }
  14.301  
  14.302  HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  14.303 @@ -3298,6 +3363,14 @@
  14.304      // for the duration of this pause.
  14.305      g1_policy()->decide_on_conc_mark_initiation();
  14.306  
  14.307 +    // We do not allow initial-mark to be piggy-backed on a
  14.308 +    // partially-young GC.
  14.309 +    assert(!g1_policy()->during_initial_mark_pause() ||
  14.310 +            g1_policy()->full_young_gcs(), "sanity");
  14.311 +
  14.312 +    // We also do not allow partially-young GCs during marking.
  14.313 +    assert(!mark_in_progress() || g1_policy()->full_young_gcs(), "sanity");
  14.314 +
  14.315      char verbose_str[128];
  14.316      sprintf(verbose_str, "GC pause ");
  14.317      if (g1_policy()->full_young_gcs()) {
  14.318 @@ -3354,231 +3427,242 @@
  14.319  
  14.320        COMPILER2_PRESENT(DerivedPointerTable::clear());
  14.321  
  14.322 -      // Please see comment in G1CollectedHeap::ref_processing_init()
  14.323 -      // to see how reference processing currently works in G1.
  14.324 -      //
  14.325 -      // We want to turn off ref discovery, if necessary, and turn it back on
  14.326 -      // on again later if we do. XXX Dubious: why is discovery disabled?
  14.327 -      bool was_enabled = ref_processor()->discovery_enabled();
  14.328 -      if (was_enabled) ref_processor()->disable_discovery();
  14.329 -
  14.330 -      // Forget the current alloc region (we might even choose it to be part
  14.331 -      // of the collection set!).
  14.332 -      release_mutator_alloc_region();
  14.333 -
  14.334 -      // We should call this after we retire the mutator alloc
  14.335 -      // region(s) so that all the ALLOC / RETIRE events are generated
  14.336 -      // before the start GC event.
  14.337 -      _hr_printer.start_gc(false /* full */, (size_t) total_collections());
  14.338 -
  14.339 -      // The elapsed time induced by the start time below deliberately elides
  14.340 -      // the possible verification above.
  14.341 -      double start_time_sec = os::elapsedTime();
  14.342 -      size_t start_used_bytes = used();
  14.343 +      // Please see comment in g1CollectedHeap.hpp and
  14.344 +      // G1CollectedHeap::ref_processing_init() to see how
  14.345 +      // reference processing currently works in G1.
  14.346 +
  14.347 +      // Enable discovery in the STW reference processor
  14.348 +      ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
  14.349 +                                            true /*verify_no_refs*/);
  14.350 +
  14.351 +      {
  14.352 +        // We want to temporarily turn off discovery by the
  14.353 +        // CM ref processor, if necessary, and turn it back on
  14.354 +        // on again later if we do. Using a scoped
  14.355 +        // NoRefDiscovery object will do this.
  14.356 +        NoRefDiscovery no_cm_discovery(ref_processor_cm());
  14.357 +
  14.358 +        // Forget the current alloc region (we might even choose it to be part
  14.359 +        // of the collection set!).
  14.360 +        release_mutator_alloc_region();
  14.361 +
  14.362 +        // We should call this after we retire the mutator alloc
  14.363 +        // region(s) so that all the ALLOC / RETIRE events are generated
  14.364 +        // before the start GC event.
  14.365 +        _hr_printer.start_gc(false /* full */, (size_t) total_collections());
  14.366 +
  14.367 +        // The elapsed time induced by the start time below deliberately elides
  14.368 +        // the possible verification above.
  14.369 +        double start_time_sec = os::elapsedTime();
  14.370 +        size_t start_used_bytes = used();
  14.371  
  14.372  #if YOUNG_LIST_VERBOSE
  14.373 -      gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  14.374 -      _young_list->print();
  14.375 -      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  14.376 +        gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  14.377 +        _young_list->print();
  14.378 +        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  14.379  #endif // YOUNG_LIST_VERBOSE
  14.380  
  14.381 -      g1_policy()->record_collection_pause_start(start_time_sec,
  14.382 -                                                 start_used_bytes);
  14.383 +        g1_policy()->record_collection_pause_start(start_time_sec,
  14.384 +                                                   start_used_bytes);
  14.385  
  14.386  #if YOUNG_LIST_VERBOSE
  14.387 -      gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  14.388 -      _young_list->print();
  14.389 +        gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  14.390 +        _young_list->print();
  14.391  #endif // YOUNG_LIST_VERBOSE
  14.392  
  14.393 -      if (g1_policy()->during_initial_mark_pause()) {
  14.394 -        concurrent_mark()->checkpointRootsInitialPre();
  14.395 -      }
  14.396 -      perm_gen()->save_marks();
  14.397 -
  14.398 -      // We must do this before any possible evacuation that should propagate
  14.399 -      // marks.
  14.400 -      if (mark_in_progress()) {
  14.401 -        double start_time_sec = os::elapsedTime();
  14.402 -
  14.403 -        _cm->drainAllSATBBuffers();
  14.404 -        double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
  14.405 -        g1_policy()->record_satb_drain_time(finish_mark_ms);
  14.406 -      }
  14.407 -      // Record the number of elements currently on the mark stack, so we
  14.408 -      // only iterate over these.  (Since evacuation may add to the mark
  14.409 -      // stack, doing more exposes race conditions.)  If no mark is in
  14.410 -      // progress, this will be zero.
  14.411 -      _cm->set_oops_do_bound();
  14.412 -
  14.413 -      if (mark_in_progress()) {
  14.414 -        concurrent_mark()->newCSet();
  14.415 -      }
  14.416 +        if (g1_policy()->during_initial_mark_pause()) {
  14.417 +          concurrent_mark()->checkpointRootsInitialPre();
  14.418 +        }
  14.419 +        perm_gen()->save_marks();
  14.420 +
  14.421 +        // We must do this before any possible evacuation that should propagate
  14.422 +        // marks.
  14.423 +        if (mark_in_progress()) {
  14.424 +          double start_time_sec = os::elapsedTime();
  14.425 +
  14.426 +          _cm->drainAllSATBBuffers();
  14.427 +          double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
  14.428 +          g1_policy()->record_satb_drain_time(finish_mark_ms);
  14.429 +        }
  14.430 +        // Record the number of elements currently on the mark stack, so we
  14.431 +        // only iterate over these.  (Since evacuation may add to the mark
  14.432 +        // stack, doing more exposes race conditions.)  If no mark is in
  14.433 +        // progress, this will be zero.
  14.434 +        _cm->set_oops_do_bound();
  14.435 +
  14.436 +        if (mark_in_progress()) {
  14.437 +          concurrent_mark()->newCSet();
  14.438 +        }
  14.439  
  14.440  #if YOUNG_LIST_VERBOSE
  14.441 -      gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  14.442 -      _young_list->print();
  14.443 -      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  14.444 +        gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  14.445 +        _young_list->print();
  14.446 +        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  14.447  #endif // YOUNG_LIST_VERBOSE
  14.448  
  14.449 -      g1_policy()->choose_collection_set(target_pause_time_ms);
  14.450 -
  14.451 -      if (_hr_printer.is_active()) {
  14.452 -        HeapRegion* hr = g1_policy()->collection_set();
  14.453 -        while (hr != NULL) {
  14.454 -          G1HRPrinter::RegionType type;
  14.455 -          if (!hr->is_young()) {
  14.456 -            type = G1HRPrinter::Old;
  14.457 -          } else if (hr->is_survivor()) {
  14.458 -            type = G1HRPrinter::Survivor;
  14.459 -          } else {
  14.460 -            type = G1HRPrinter::Eden;
  14.461 -          }
  14.462 -          _hr_printer.cset(hr);
  14.463 -          hr = hr->next_in_collection_set();
  14.464 -        }
  14.465 -      }
  14.466 -
  14.467 -      // We have chosen the complete collection set. If marking is
  14.468 -      // active then, we clear the region fields of any of the
  14.469 -      // concurrent marking tasks whose region fields point into
  14.470 -      // the collection set as these values will become stale. This
  14.471 -      // will cause the owning marking threads to claim a new region
  14.472 -      // when marking restarts.
  14.473 -      if (mark_in_progress()) {
  14.474 -        concurrent_mark()->reset_active_task_region_fields_in_cset();
  14.475 -      }
  14.476 -
  14.477 -#ifdef ASSERT
  14.478 -      VerifyCSetClosure cl;
  14.479 -      collection_set_iterate(&cl);
  14.480 -#endif // ASSERT
  14.481 -
  14.482 -      setup_surviving_young_words();
  14.483 -
  14.484 -      // Initialize the GC alloc regions.
  14.485 -      init_gc_alloc_regions();
  14.486 -
  14.487 -      // Actually do the work...
  14.488 -      evacuate_collection_set();
  14.489 -
  14.490 -      free_collection_set(g1_policy()->collection_set());
  14.491 -      g1_policy()->clear_collection_set();
  14.492 -
  14.493 -      cleanup_surviving_young_words();
  14.494 -
  14.495 -      // Start a new incremental collection set for the next pause.
  14.496 -      g1_policy()->start_incremental_cset_building();
  14.497 -
  14.498 -      // Clear the _cset_fast_test bitmap in anticipation of adding
  14.499 -      // regions to the incremental collection set for the next
  14.500 -      // evacuation pause.
  14.501 -      clear_cset_fast_test();
  14.502 -
  14.503 -      _young_list->reset_sampled_info();
  14.504 -
  14.505 -      // Don't check the whole heap at this point as the
  14.506 -      // GC alloc regions from this pause have been tagged
  14.507 -      // as survivors and moved on to the survivor list.
  14.508 -      // Survivor regions will fail the !is_young() check.
  14.509 -      assert(check_young_list_empty(false /* check_heap */),
  14.510 -        "young list should be empty");
  14.511 -
  14.512 -#if YOUNG_LIST_VERBOSE
  14.513 -      gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
  14.514 -      _young_list->print();
  14.515 -#endif // YOUNG_LIST_VERBOSE
  14.516 -
  14.517 -      g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  14.518 -        _young_list->first_survivor_region(),
  14.519 -        _young_list->last_survivor_region());
  14.520 -
  14.521 -      _young_list->reset_auxilary_lists();
  14.522 -
  14.523 -      if (evacuation_failed()) {
  14.524 -        _summary_bytes_used = recalculate_used();
  14.525 -      } else {
  14.526 -        // The "used" of the the collection set have already been subtracted
  14.527 -        // when they were freed.  Add in the bytes evacuated.
  14.528 -        _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
  14.529 -      }
  14.530 -
  14.531 -      if (g1_policy()->during_initial_mark_pause()) {
  14.532 -        concurrent_mark()->checkpointRootsInitialPost();
  14.533 -        set_marking_started();
  14.534 -        // CAUTION: after the doConcurrentMark() call below,
  14.535 -        // the concurrent marking thread(s) could be running
  14.536 -        // concurrently with us. Make sure that anything after
  14.537 -        // this point does not assume that we are the only GC thread
  14.538 -        // running. Note: of course, the actual marking work will
  14.539 -        // not start until the safepoint itself is released in
  14.540 -        // ConcurrentGCThread::safepoint_desynchronize().
  14.541 -        doConcurrentMark();
  14.542 -      }
  14.543 -
  14.544 -      allocate_dummy_regions();
  14.545 -
  14.546 -#if YOUNG_LIST_VERBOSE
  14.547 -      gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
  14.548 -      _young_list->print();
  14.549 -      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  14.550 -#endif // YOUNG_LIST_VERBOSE
  14.551 -
  14.552 -      init_mutator_alloc_region();
  14.553 -
  14.554 -      {
  14.555 -        size_t expand_bytes = g1_policy()->expansion_amount();
  14.556 -        if (expand_bytes > 0) {
  14.557 -          size_t bytes_before = capacity();
  14.558 -          if (!expand(expand_bytes)) {
  14.559 -            // We failed to expand the heap so let's verify that
  14.560 -            // committed/uncommitted amount match the backing store
  14.561 -            assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
  14.562 -            assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
  14.563 +        g1_policy()->choose_collection_set(target_pause_time_ms);
  14.564 +
  14.565 +        if (_hr_printer.is_active()) {
  14.566 +          HeapRegion* hr = g1_policy()->collection_set();
  14.567 +          while (hr != NULL) {
  14.568 +            G1HRPrinter::RegionType type;
  14.569 +            if (!hr->is_young()) {
  14.570 +              type = G1HRPrinter::Old;
  14.571 +            } else if (hr->is_survivor()) {
  14.572 +              type = G1HRPrinter::Survivor;
  14.573 +            } else {
  14.574 +              type = G1HRPrinter::Eden;
  14.575 +            }
  14.576 +            _hr_printer.cset(hr);
  14.577 +            hr = hr->next_in_collection_set();
  14.578            }
  14.579          }
  14.580 +
  14.581 +        // We have chosen the complete collection set. If marking is
  14.582 +        // active then, we clear the region fields of any of the
  14.583 +        // concurrent marking tasks whose region fields point into
  14.584 +        // the collection set as these values will become stale. This
  14.585 +        // will cause the owning marking threads to claim a new region
  14.586 +        // when marking restarts.
  14.587 +        if (mark_in_progress()) {
  14.588 +          concurrent_mark()->reset_active_task_region_fields_in_cset();
  14.589 +        }
  14.590 +
  14.591 +#ifdef ASSERT
  14.592 +        VerifyCSetClosure cl;
  14.593 +        collection_set_iterate(&cl);
  14.594 +#endif // ASSERT
  14.595 +
  14.596 +        setup_surviving_young_words();
  14.597 +
  14.598 +        // Initialize the GC alloc regions.
  14.599 +        init_gc_alloc_regions();
  14.600 +
  14.601 +        // Actually do the work...
  14.602 +        evacuate_collection_set();
  14.603 +
  14.604 +        free_collection_set(g1_policy()->collection_set());
  14.605 +        g1_policy()->clear_collection_set();
  14.606 +
  14.607 +        cleanup_surviving_young_words();
  14.608 +
  14.609 +        // Start a new incremental collection set for the next pause.
  14.610 +        g1_policy()->start_incremental_cset_building();
  14.611 +
  14.612 +        // Clear the _cset_fast_test bitmap in anticipation of adding
  14.613 +        // regions to the incremental collection set for the next
  14.614 +        // evacuation pause.
  14.615 +        clear_cset_fast_test();
  14.616 +
  14.617 +        _young_list->reset_sampled_info();
  14.618 +
  14.619 +        // Don't check the whole heap at this point as the
  14.620 +        // GC alloc regions from this pause have been tagged
  14.621 +        // as survivors and moved on to the survivor list.
  14.622 +        // Survivor regions will fail the !is_young() check.
  14.623 +        assert(check_young_list_empty(false /* check_heap */),
  14.624 +          "young list should be empty");
  14.625 +
  14.626 +#if YOUNG_LIST_VERBOSE
  14.627 +        gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
  14.628 +        _young_list->print();
  14.629 +#endif // YOUNG_LIST_VERBOSE
  14.630 +
  14.631 +        g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  14.632 +                                            _young_list->first_survivor_region(),
  14.633 +                                            _young_list->last_survivor_region());
  14.634 +
  14.635 +        _young_list->reset_auxilary_lists();
  14.636 +
  14.637 +        if (evacuation_failed()) {
  14.638 +          _summary_bytes_used = recalculate_used();
  14.639 +        } else {
  14.640 +          // The "used" of the the collection set have already been subtracted
  14.641 +          // when they were freed.  Add in the bytes evacuated.
  14.642 +          _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
  14.643 +        }
  14.644 +
  14.645 +        if (g1_policy()->during_initial_mark_pause()) {
  14.646 +          concurrent_mark()->checkpointRootsInitialPost();
  14.647 +          set_marking_started();
  14.648 +          // CAUTION: after the doConcurrentMark() call below,
  14.649 +          // the concurrent marking thread(s) could be running
  14.650 +          // concurrently with us. Make sure that anything after
  14.651 +          // this point does not assume that we are the only GC thread
  14.652 +          // running. Note: of course, the actual marking work will
  14.653 +          // not start until the safepoint itself is released in
  14.654 +          // ConcurrentGCThread::safepoint_desynchronize().
  14.655 +          doConcurrentMark();
  14.656 +        }
  14.657 +
  14.658 +        allocate_dummy_regions();
  14.659 +
  14.660 +#if YOUNG_LIST_VERBOSE
  14.661 +        gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
  14.662 +        _young_list->print();
  14.663 +        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  14.664 +#endif // YOUNG_LIST_VERBOSE
  14.665 +
  14.666 +        init_mutator_alloc_region();
  14.667 +
  14.668 +        {
  14.669 +          size_t expand_bytes = g1_policy()->expansion_amount();
  14.670 +          if (expand_bytes > 0) {
  14.671 +            size_t bytes_before = capacity();
  14.672 +            if (!expand(expand_bytes)) {
  14.673 +              // We failed to expand the heap so let's verify that
  14.674 +              // committed/uncommitted amount match the backing store
  14.675 +              assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
  14.676 +              assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
  14.677 +            }
  14.678 +          }
  14.679 +        }
  14.680 +
  14.681 +        double end_time_sec = os::elapsedTime();
  14.682 +        double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  14.683 +        g1_policy()->record_pause_time_ms(pause_time_ms);
  14.684 +        g1_policy()->record_collection_pause_end();
  14.685 +
  14.686 +        MemoryService::track_memory_usage();
  14.687 +
  14.688 +        // In prepare_for_verify() below we'll need to scan the deferred
  14.689 +        // update buffers to bring the RSets up-to-date if
  14.690 +        // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
  14.691 +        // the update buffers we'll probably need to scan cards on the
  14.692 +        // regions we just allocated to (i.e., the GC alloc
  14.693 +        // regions). However, during the last GC we called
  14.694 +        // set_saved_mark() on all the GC alloc regions, so card
  14.695 +        // scanning might skip the [saved_mark_word()...top()] area of
  14.696 +        // those regions (i.e., the area we allocated objects into
  14.697 +        // during the last GC). But it shouldn't. Given that
  14.698 +        // saved_mark_word() is conditional on whether the GC time stamp
  14.699 +        // on the region is current or not, by incrementing the GC time
  14.700 +        // stamp here we invalidate all the GC time stamps on all the
  14.701 +        // regions and saved_mark_word() will simply return top() for
  14.702 +        // all the regions. This is a nicer way of ensuring this rather
  14.703 +        // than iterating over the regions and fixing them. In fact, the
  14.704 +        // GC time stamp increment here also ensures that
  14.705 +        // saved_mark_word() will return top() between pauses, i.e.,
  14.706 +        // during concurrent refinement. So we don't need the
  14.707 +        // is_gc_active() check to decided which top to use when
  14.708 +        // scanning cards (see CR 7039627).
  14.709 +        increment_gc_time_stamp();
  14.710 +
  14.711 +        if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  14.712 +          HandleMark hm;  // Discard invalid handles created during verification
  14.713 +          gclog_or_tty->print(" VerifyAfterGC:");
  14.714 +          prepare_for_verify();
  14.715 +          Universe::verify(/* allow dirty */ true,
  14.716 +                           /* silent      */ false,
  14.717 +                           /* option      */ VerifyOption_G1UsePrevMarking);
  14.718 +        }
  14.719 +
  14.720 +        assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  14.721 +        ref_processor_stw()->verify_no_references_recorded();
  14.722 +
  14.723 +        // CM reference discovery will be re-enabled if necessary.
  14.724        }
  14.725  
  14.726 -      double end_time_sec = os::elapsedTime();
  14.727 -      double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  14.728 -      g1_policy()->record_pause_time_ms(pause_time_ms);
  14.729 -      g1_policy()->record_collection_pause_end();
  14.730 -
  14.731 -      MemoryService::track_memory_usage();
  14.732 -
  14.733 -      // In prepare_for_verify() below we'll need to scan the deferred
  14.734 -      // update buffers to bring the RSets up-to-date if
  14.735 -      // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
  14.736 -      // the update buffers we'll probably need to scan cards on the
  14.737 -      // regions we just allocated to (i.e., the GC alloc
  14.738 -      // regions). However, during the last GC we called
  14.739 -      // set_saved_mark() on all the GC alloc regions, so card
  14.740 -      // scanning might skip the [saved_mark_word()...top()] area of
  14.741 -      // those regions (i.e., the area we allocated objects into
  14.742 -      // during the last GC). But it shouldn't. Given that
  14.743 -      // saved_mark_word() is conditional on whether the GC time stamp
  14.744 -      // on the region is current or not, by incrementing the GC time
  14.745 -      // stamp here we invalidate all the GC time stamps on all the
  14.746 -      // regions and saved_mark_word() will simply return top() for
  14.747 -      // all the regions. This is a nicer way of ensuring this rather
  14.748 -      // than iterating over the regions and fixing them. In fact, the
  14.749 -      // GC time stamp increment here also ensures that
  14.750 -      // saved_mark_word() will return top() between pauses, i.e.,
  14.751 -      // during concurrent refinement. So we don't need the
  14.752 -      // is_gc_active() check to decided which top to use when
  14.753 -      // scanning cards (see CR 7039627).
  14.754 -      increment_gc_time_stamp();
  14.755 -
  14.756 -      if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  14.757 -        HandleMark hm;  // Discard invalid handles created during verification
  14.758 -        gclog_or_tty->print(" VerifyAfterGC:");
  14.759 -        prepare_for_verify();
  14.760 -        Universe::verify(/* allow dirty */ true,
  14.761 -                         /* silent      */ false,
  14.762 -                         /* option      */ VerifyOption_G1UsePrevMarking);
  14.763 -      }
  14.764 -
  14.765 -      if (was_enabled) ref_processor()->enable_discovery();
  14.766 -
  14.767        {
  14.768          size_t expand_bytes = g1_policy()->expansion_amount();
  14.769          if (expand_bytes > 0) {
  14.770 @@ -3630,7 +3714,7 @@
  14.771    if (PrintHeapAtGC) {
  14.772      Universe::print_heap_after_gc();
  14.773    }
  14.774 -  g1mm()->update_counters();
  14.775 +  g1mm()->update_sizes();
  14.776  
  14.777    if (G1SummarizeRSetStats &&
  14.778        (G1SummarizeRSetStatsPeriod > 0) &&
  14.779 @@ -3728,34 +3812,6 @@
  14.780    _evac_failure_scan_stack = NULL;
  14.781  }
  14.782  
  14.783 -// *** Sequential G1 Evacuation
  14.784 -
  14.785 -class G1IsAliveClosure: public BoolObjectClosure {
  14.786 -  G1CollectedHeap* _g1;
  14.787 -public:
  14.788 -  G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  14.789 -  void do_object(oop p) { assert(false, "Do not call."); }
  14.790 -  bool do_object_b(oop p) {
  14.791 -    // It is reachable if it is outside the collection set, or is inside
  14.792 -    // and forwarded.
  14.793 -    return !_g1->obj_in_cs(p) || p->is_forwarded();
  14.794 -  }
  14.795 -};
  14.796 -
  14.797 -class G1KeepAliveClosure: public OopClosure {
  14.798 -  G1CollectedHeap* _g1;
  14.799 -public:
  14.800 -  G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  14.801 -  void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
  14.802 -  void do_oop(      oop* p) {
  14.803 -    oop obj = *p;
  14.804 -    if (_g1->obj_in_cs(obj)) {
  14.805 -      assert( obj->is_forwarded(), "invariant" );
  14.806 -      *p = obj->forwardee();
  14.807 -    }
  14.808 -  }
  14.809 -};
  14.810 -
  14.811  class UpdateRSetDeferred : public OopsInHeapRegionClosure {
  14.812  private:
  14.813    G1CollectedHeap* _g1;
  14.814 @@ -3946,7 +4002,8 @@
  14.815  
  14.816  oop
  14.817  G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
  14.818 -                                               oop old) {
  14.819 +                                               oop old,
  14.820 +                                               bool should_mark_root) {
  14.821    assert(obj_in_cs(old),
  14.822           err_msg("obj: "PTR_FORMAT" should still be in the CSet",
  14.823                   (HeapWord*) old));
  14.824 @@ -3954,6 +4011,16 @@
  14.825    oop forward_ptr = old->forward_to_atomic(old);
  14.826    if (forward_ptr == NULL) {
  14.827      // Forward-to-self succeeded.
  14.828 +
  14.829 +    // should_mark_root will be true when this routine is called
  14.830 +    // from a root scanning closure during an initial mark pause.
  14.831 +    // In this case the thread that succeeds in self-forwarding the
  14.832 +    // object is also responsible for marking the object.
  14.833 +    if (should_mark_root) {
  14.834 +      assert(!oopDesc::is_null(old), "shouldn't be");
  14.835 +      _cm->grayRoot(old);
  14.836 +    }
  14.837 +
  14.838      if (_evac_failure_closure != cl) {
  14.839        MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  14.840        assert(!_drain_in_progress,
  14.841 @@ -4175,12 +4242,17 @@
  14.842  #endif // ASSERT
  14.843  
  14.844  void G1ParScanThreadState::trim_queue() {
  14.845 +  assert(_evac_cl != NULL, "not set");
  14.846 +  assert(_evac_failure_cl != NULL, "not set");
  14.847 +  assert(_partial_scan_cl != NULL, "not set");
  14.848 +
  14.849    StarTask ref;
  14.850    do {
  14.851      // Drain the overflow stack first, so other threads can steal.
  14.852      while (refs()->pop_overflow(ref)) {
  14.853        deal_with_reference(ref);
  14.854      }
  14.855 +
  14.856      while (refs()->pop_local(ref)) {
  14.857        deal_with_reference(ref);
  14.858      }
  14.859 @@ -4208,7 +4280,8 @@
  14.860    }
  14.861  }
  14.862  
  14.863 -oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_copy) {
  14.864 +oop G1ParCopyHelper::copy_to_survivor_space(oop old, bool should_mark_root,
  14.865 +                                                     bool should_mark_copy) {
  14.866    size_t    word_sz = old->size();
  14.867    HeapRegion* from_region = _g1->heap_region_containing_raw(old);
  14.868    // +1 to make the -1 indexes valid...
  14.869 @@ -4228,7 +4301,7 @@
  14.870      // This will either forward-to-self, or detect that someone else has
  14.871      // installed a forwarding pointer.
  14.872      OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  14.873 -    return _g1->handle_evacuation_failure_par(cl, old);
  14.874 +    return _g1->handle_evacuation_failure_par(cl, old, should_mark_root);
  14.875    }
  14.876  
  14.877    // We're going to allocate linearly, so might as well prefetch ahead.
  14.878 @@ -4330,11 +4403,26 @@
  14.879        // we also need to handle marking of roots in the
  14.880        // event of an evacuation failure. In the event of an
  14.881        // evacuation failure, the object is forwarded to itself
  14.882 -      // and not copied so let's mark it here.
  14.883 +      // and not copied. For root-scanning closures, the
  14.884 +      // object would be marked after a successful self-forward
  14.885 +      // but an object could be pointed to by both a root and non
  14.886 +      // root location and be self-forwarded by a non-root-scanning
  14.887 +      // closure. Therefore we also have to attempt to mark the
  14.888 +      // self-forwarded root object here.
  14.889        if (do_mark_object && obj->forwardee() == obj) {
  14.890          mark_object(p);
  14.891        }
  14.892      } else {
  14.893 +      // During an initial mark pause, objects that are pointed to
  14.894 +      // by the roots need to be marked - even in the event of an
  14.895 +      // evacuation failure. We pass the template parameter
  14.896 +      // do_mark_object (which is true for root scanning closures
  14.897 +      // during an initial mark pause) to copy_to_survivor_space
  14.898 +      // which will pass it on to the evacuation failure handling
  14.899 +      // code. The thread that successfully self-forwards a root
  14.900 +      // object to itself is responsible for marking the object.
  14.901 +      bool should_mark_root = do_mark_object;
  14.902 +
  14.903        // We need to mark the copied object if we're a root scanning
  14.904        // closure during an initial mark pause (i.e. do_mark_object
  14.905        // will be true), or the object is already marked and we need
  14.906 @@ -4343,7 +4431,8 @@
  14.907                                _during_initial_mark ||
  14.908                                (_mark_in_progress && !_g1->is_obj_ill(obj));
  14.909  
  14.910 -      oop copy_oop = copy_to_survivor_space(obj, should_mark_copy);
  14.911 +      oop copy_oop = copy_to_survivor_space(obj, should_mark_root,
  14.912 +                                                 should_mark_copy);
  14.913        oopDesc::encode_store_heap_oop(p, copy_oop);
  14.914      }
  14.915      // When scanning the RS, we only care about objs in CS.
  14.916 @@ -4501,35 +4590,34 @@
  14.917      ResourceMark rm;
  14.918      HandleMark   hm;
  14.919  
  14.920 +    ReferenceProcessor*             rp = _g1h->ref_processor_stw();
  14.921 +
  14.922      G1ParScanThreadState            pss(_g1h, i);
  14.923 -    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
  14.924 -    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
  14.925 -    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
  14.926 +    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
  14.927 +    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
  14.928 +    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
  14.929  
  14.930      pss.set_evac_closure(&scan_evac_cl);
  14.931      pss.set_evac_failure_closure(&evac_failure_cl);
  14.932      pss.set_partial_scan_closure(&partial_scan_cl);
  14.933  
  14.934 -    G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
  14.935 -    G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
  14.936 -    G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
  14.937 -    G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  14.938 -
  14.939 -    G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
  14.940 -    G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
  14.941 -    G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
  14.942 -
  14.943 -    OopsInHeapRegionClosure        *scan_root_cl;
  14.944 -    OopsInHeapRegionClosure        *scan_perm_cl;
  14.945 +    G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
  14.946 +    G1ParScanPermClosure           only_scan_perm_cl(_g1h, &pss, rp);
  14.947 +
  14.948 +    G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
  14.949 +    G1ParScanAndMarkPermClosure    scan_mark_perm_cl(_g1h, &pss, rp);
  14.950 +
  14.951 +    OopClosure*                    scan_root_cl = &only_scan_root_cl;
  14.952 +    OopsInHeapRegionClosure*       scan_perm_cl = &only_scan_perm_cl;
  14.953  
  14.954      if (_g1h->g1_policy()->during_initial_mark_pause()) {
  14.955 +      // We also need to mark copied objects.
  14.956        scan_root_cl = &scan_mark_root_cl;
  14.957        scan_perm_cl = &scan_mark_perm_cl;
  14.958 -    } else {
  14.959 -      scan_root_cl = &only_scan_root_cl;
  14.960 -      scan_perm_cl = &only_scan_perm_cl;
  14.961      }
  14.962  
  14.963 +    G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  14.964 +
  14.965      pss.start_strong_roots();
  14.966      _g1h->g1_process_strong_roots(/* not collecting perm */ false,
  14.967                                    SharedHeap::SO_AllClasses,
  14.968 @@ -4577,6 +4665,7 @@
  14.969                          OopsInHeapRegionClosure* scan_rs,
  14.970                          OopsInGenClosure* scan_perm,
  14.971                          int worker_i) {
  14.972 +
  14.973    // First scan the strong roots, including the perm gen.
  14.974    double ext_roots_start = os::elapsedTime();
  14.975    double closure_app_time_sec = 0.0;
  14.976 @@ -4595,12 +4684,13 @@
  14.977                         &eager_scan_code_roots,
  14.978                         &buf_scan_perm);
  14.979  
  14.980 -  // Now the ref_processor roots.
  14.981 +  // Now the CM ref_processor roots.
  14.982    if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  14.983 -    // We need to treat the discovered reference lists as roots and
  14.984 -    // keep entries (which are added by the marking threads) on them
  14.985 -    // live until they can be processed at the end of marking.
  14.986 -    ref_processor()->weak_oops_do(&buf_scan_non_heap_roots);
  14.987 +    // We need to treat the discovered reference lists of the
  14.988 +    // concurrent mark ref processor as roots and keep entries
  14.989 +    // (which are added by the marking threads) on them live
  14.990 +    // until they can be processed at the end of marking.
  14.991 +    ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
  14.992    }
  14.993  
  14.994    // Finish up any enqueued closure apps (attributed as object copy time).
  14.995 @@ -4641,6 +4731,524 @@
  14.996    SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
  14.997  }
  14.998  
  14.999 +// Weak Reference Processing support
 14.1000 +
 14.1001 +// An always "is_alive" closure that is used to preserve referents.
 14.1002 +// If the object is non-null then it's alive.  Used in the preservation
 14.1003 +// of referent objects that are pointed to by reference objects
 14.1004 +// discovered by the CM ref processor.
 14.1005 +class G1AlwaysAliveClosure: public BoolObjectClosure {
 14.1006 +  G1CollectedHeap* _g1;
 14.1007 +public:
 14.1008 +  G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 14.1009 +  void do_object(oop p) { assert(false, "Do not call."); }
 14.1010 +  bool do_object_b(oop p) {
 14.1011 +    if (p != NULL) {
 14.1012 +      return true;
 14.1013 +    }
 14.1014 +    return false;
 14.1015 +  }
 14.1016 +};
 14.1017 +
 14.1018 +bool G1STWIsAliveClosure::do_object_b(oop p) {
 14.1019 +  // An object is reachable if it is outside the collection set,
 14.1020 +  // or is inside and copied.
 14.1021 +  return !_g1->obj_in_cs(p) || p->is_forwarded();
 14.1022 +}
 14.1023 +
 14.1024 +// Non Copying Keep Alive closure
 14.1025 +class G1KeepAliveClosure: public OopClosure {
 14.1026 +  G1CollectedHeap* _g1;
 14.1027 +public:
 14.1028 +  G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 14.1029 +  void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
 14.1030 +  void do_oop(      oop* p) {
 14.1031 +    oop obj = *p;
 14.1032 +
 14.1033 +    if (_g1->obj_in_cs(obj)) {
 14.1034 +      assert( obj->is_forwarded(), "invariant" );
 14.1035 +      *p = obj->forwardee();
 14.1036 +    }
 14.1037 +  }
 14.1038 +};
 14.1039 +
 14.1040 +// Copying Keep Alive closure - can be called from both
 14.1041 +// serial and parallel code as long as different worker
 14.1042 +// threads utilize different G1ParScanThreadState instances
 14.1043 +// and different queues.
 14.1044 +
 14.1045 +class G1CopyingKeepAliveClosure: public OopClosure {
 14.1046 +  G1CollectedHeap*         _g1h;
 14.1047 +  OopClosure*              _copy_non_heap_obj_cl;
 14.1048 +  OopsInHeapRegionClosure* _copy_perm_obj_cl;
 14.1049 +  G1ParScanThreadState*    _par_scan_state;
 14.1050 +
 14.1051 +public:
 14.1052 +  G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
 14.1053 +                            OopClosure* non_heap_obj_cl,
 14.1054 +                            OopsInHeapRegionClosure* perm_obj_cl,
 14.1055 +                            G1ParScanThreadState* pss):
 14.1056 +    _g1h(g1h),
 14.1057 +    _copy_non_heap_obj_cl(non_heap_obj_cl),
 14.1058 +    _copy_perm_obj_cl(perm_obj_cl),
 14.1059 +    _par_scan_state(pss)
 14.1060 +  {}
 14.1061 +
 14.1062 +  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 14.1063 +  virtual void do_oop(      oop* p) { do_oop_work(p); }
 14.1064 +
 14.1065 +  template <class T> void do_oop_work(T* p) {
 14.1066 +    oop obj = oopDesc::load_decode_heap_oop(p);
 14.1067 +
 14.1068 +    if (_g1h->obj_in_cs(obj)) {
 14.1069 +      // If the referent object has been forwarded (either copied
 14.1070 +      // to a new location or to itself in the event of an
 14.1071 +      // evacuation failure) then we need to update the reference
 14.1072 +      // field and, if both reference and referent are in the G1
 14.1073 +      // heap, update the RSet for the referent.
 14.1074 +      //
 14.1075 +      // If the referent has not been forwarded then we have to keep
 14.1076 +      // it alive by policy. Therefore we have copy the referent.
 14.1077 +      //
 14.1078 +      // If the reference field is in the G1 heap then we can push
 14.1079 +      // on the PSS queue. When the queue is drained (after each
 14.1080 +      // phase of reference processing) the object and it's followers
 14.1081 +      // will be copied, the reference field set to point to the
 14.1082 +      // new location, and the RSet updated. Otherwise we need to
 14.1083 +      // use the the non-heap or perm closures directly to copy
 14.1084 +      // the refernt object and update the pointer, while avoiding
 14.1085 +      // updating the RSet.
 14.1086 +
 14.1087 +      if (_g1h->is_in_g1_reserved(p)) {
 14.1088 +        _par_scan_state->push_on_queue(p);
 14.1089 +      } else {
 14.1090 +        // The reference field is not in the G1 heap.
 14.1091 +        if (_g1h->perm_gen()->is_in(p)) {
 14.1092 +          _copy_perm_obj_cl->do_oop(p);
 14.1093 +        } else {
 14.1094 +          _copy_non_heap_obj_cl->do_oop(p);
 14.1095 +        }
 14.1096 +      }
 14.1097 +    }
 14.1098 +  }
 14.1099 +};
 14.1100 +
 14.1101 +// Serial drain queue closure. Called as the 'complete_gc'
 14.1102 +// closure for each discovered list in some of the
 14.1103 +// reference processing phases.
 14.1104 +
 14.1105 +class G1STWDrainQueueClosure: public VoidClosure {
 14.1106 +protected:
 14.1107 +  G1CollectedHeap* _g1h;
 14.1108 +  G1ParScanThreadState* _par_scan_state;
 14.1109 +
 14.1110 +  G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
 14.1111 +
 14.1112 +public:
 14.1113 +  G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
 14.1114 +    _g1h(g1h),
 14.1115 +    _par_scan_state(pss)
 14.1116 +  { }
 14.1117 +
 14.1118 +  void do_void() {
 14.1119 +    G1ParScanThreadState* const pss = par_scan_state();
 14.1120 +    pss->trim_queue();
 14.1121 +  }
 14.1122 +};
 14.1123 +
 14.1124 +// Parallel Reference Processing closures
 14.1125 +
 14.1126 +// Implementation of AbstractRefProcTaskExecutor for parallel reference
 14.1127 +// processing during G1 evacuation pauses.
 14.1128 +
 14.1129 +class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 14.1130 +private:
 14.1131 +  G1CollectedHeap*   _g1h;
 14.1132 +  RefToScanQueueSet* _queues;
 14.1133 +  WorkGang*          _workers;
 14.1134 +  int                _active_workers;
 14.1135 +
 14.1136 +public:
 14.1137 +  G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
 14.1138 +                        WorkGang* workers,
 14.1139 +                        RefToScanQueueSet *task_queues,
 14.1140 +                        int n_workers) :
 14.1141 +    _g1h(g1h),
 14.1142 +    _queues(task_queues),
 14.1143 +    _workers(workers),
 14.1144 +    _active_workers(n_workers)
 14.1145 +  {
 14.1146 +    assert(n_workers > 0, "shouldn't call this otherwise");
 14.1147 +  }
 14.1148 +
 14.1149 +  // Executes the given task using concurrent marking worker threads.
 14.1150 +  virtual void execute(ProcessTask& task);
 14.1151 +  virtual void execute(EnqueueTask& task);
 14.1152 +};
 14.1153 +
 14.1154 +// Gang task for possibly parallel reference processing
 14.1155 +
 14.1156 +class G1STWRefProcTaskProxy: public AbstractGangTask {
 14.1157 +  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 14.1158 +  ProcessTask&     _proc_task;
 14.1159 +  G1CollectedHeap* _g1h;
 14.1160 +  RefToScanQueueSet *_task_queues;
 14.1161 +  ParallelTaskTerminator* _terminator;
 14.1162 +
 14.1163 +public:
 14.1164 +  G1STWRefProcTaskProxy(ProcessTask& proc_task,
 14.1165 +                     G1CollectedHeap* g1h,
 14.1166 +                     RefToScanQueueSet *task_queues,
 14.1167 +                     ParallelTaskTerminator* terminator) :
 14.1168 +    AbstractGangTask("Process reference objects in parallel"),
 14.1169 +    _proc_task(proc_task),
 14.1170 +    _g1h(g1h),
 14.1171 +    _task_queues(task_queues),
 14.1172 +    _terminator(terminator)
 14.1173 +  {}
 14.1174 +
 14.1175 +  virtual void work(int i) {
 14.1176 +    // The reference processing task executed by a single worker.
 14.1177 +    ResourceMark rm;
 14.1178 +    HandleMark   hm;
 14.1179 +
 14.1180 +    G1STWIsAliveClosure is_alive(_g1h);
 14.1181 +
 14.1182 +    G1ParScanThreadState pss(_g1h, i);
 14.1183 +
 14.1184 +    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
 14.1185 +    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
 14.1186 +    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
 14.1187 +
 14.1188 +    pss.set_evac_closure(&scan_evac_cl);
 14.1189 +    pss.set_evac_failure_closure(&evac_failure_cl);
 14.1190 +    pss.set_partial_scan_closure(&partial_scan_cl);
 14.1191 +
 14.1192 +    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
 14.1193 +    G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
 14.1194 +
 14.1195 +    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
 14.1196 +    G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
 14.1197 +
 14.1198 +    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 14.1199 +    OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
 14.1200 +
 14.1201 +    if (_g1h->g1_policy()->during_initial_mark_pause()) {
 14.1202 +      // We also need to mark copied objects.
 14.1203 +      copy_non_heap_cl = &copy_mark_non_heap_cl;
 14.1204 +      copy_perm_cl = &copy_mark_perm_cl;
 14.1205 +    }
 14.1206 +
 14.1207 +    // Keep alive closure.
 14.1208 +    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
 14.1209 +
 14.1210 +    // Complete GC closure
 14.1211 +    G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
 14.1212 +
 14.1213 +    // Call the reference processing task's work routine.
 14.1214 +    _proc_task.work(i, is_alive, keep_alive, drain_queue);
 14.1215 +
 14.1216 +    // Note we cannot assert that the refs array is empty here as not all
 14.1217 +    // of the processing tasks (specifically phase2 - pp2_work) execute
 14.1218 +    // the complete_gc closure (which ordinarily would drain the queue) so
 14.1219 +    // the queue may not be empty.
 14.1220 +  }
 14.1221 +};
 14.1222 +
 14.1223 +// Driver routine for parallel reference processing.
 14.1224 +// Creates an instance of the ref processing gang
 14.1225 +// task and has the worker threads execute it.
 14.1226 +void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
 14.1227 +  assert(_workers != NULL, "Need parallel worker threads.");
 14.1228 +
 14.1229 +  ParallelTaskTerminator terminator(_active_workers, _queues);
 14.1230 +  G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
 14.1231 +
 14.1232 +  _g1h->set_par_threads(_active_workers);
 14.1233 +  _workers->run_task(&proc_task_proxy);
 14.1234 +  _g1h->set_par_threads(0);
 14.1235 +}
 14.1236 +
 14.1237 +// Gang task for parallel reference enqueueing.
 14.1238 +
 14.1239 +class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
 14.1240 +  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
 14.1241 +  EnqueueTask& _enq_task;
 14.1242 +
 14.1243 +public:
 14.1244 +  G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
 14.1245 +    AbstractGangTask("Enqueue reference objects in parallel"),
 14.1246 +    _enq_task(enq_task)
 14.1247 +  { }
 14.1248 +
 14.1249 +  virtual void work(int i) {
 14.1250 +    _enq_task.work(i);
 14.1251 +  }
 14.1252 +};
 14.1253 +
 14.1254 +// Driver routine for parallel reference enqueing.
 14.1255 +// Creates an instance of the ref enqueueing gang
 14.1256 +// task and has the worker threads execute it.
 14.1257 +
 14.1258 +void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
 14.1259 +  assert(_workers != NULL, "Need parallel worker threads.");
 14.1260 +
 14.1261 +  G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
 14.1262 +
 14.1263 +  _g1h->set_par_threads(_active_workers);
 14.1264 +  _workers->run_task(&enq_task_proxy);
 14.1265 +  _g1h->set_par_threads(0);
 14.1266 +}
 14.1267 +
 14.1268 +// End of weak reference support closures
 14.1269 +
 14.1270 +// Abstract task used to preserve (i.e. copy) any referent objects
 14.1271 +// that are in the collection set and are pointed to by reference
 14.1272 +// objects discovered by the CM ref processor.
 14.1273 +
 14.1274 +class G1ParPreserveCMReferentsTask: public AbstractGangTask {
 14.1275 +protected:
 14.1276 +  G1CollectedHeap* _g1h;
 14.1277 +  RefToScanQueueSet      *_queues;
 14.1278 +  ParallelTaskTerminator _terminator;
 14.1279 +  int _n_workers;
 14.1280 +
 14.1281 +public:
 14.1282 +  G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
 14.1283 +    AbstractGangTask("ParPreserveCMReferents"),
 14.1284 +    _g1h(g1h),
 14.1285 +    _queues(task_queues),
 14.1286 +    _terminator(workers, _queues),
 14.1287 +    _n_workers(workers)
 14.1288 +  { }
 14.1289 +
 14.1290 +  void work(int i) {
 14.1291 +    ResourceMark rm;
 14.1292 +    HandleMark   hm;
 14.1293 +
 14.1294 +    G1ParScanThreadState            pss(_g1h, i);
 14.1295 +    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
 14.1296 +    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
 14.1297 +    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
 14.1298 +
 14.1299 +    pss.set_evac_closure(&scan_evac_cl);
 14.1300 +    pss.set_evac_failure_closure(&evac_failure_cl);
 14.1301 +    pss.set_partial_scan_closure(&partial_scan_cl);
 14.1302 +
 14.1303 +    assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
 14.1304 +
 14.1305 +
 14.1306 +    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
 14.1307 +    G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
 14.1308 +
 14.1309 +    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
 14.1310 +    G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
 14.1311 +
 14.1312 +    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 14.1313 +    OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
 14.1314 +
 14.1315 +    if (_g1h->g1_policy()->during_initial_mark_pause()) {
 14.1316 +      // We also need to mark copied objects.
 14.1317 +      copy_non_heap_cl = &copy_mark_non_heap_cl;
 14.1318 +      copy_perm_cl = &copy_mark_perm_cl;
 14.1319 +    }
 14.1320 +
 14.1321 +    // Is alive closure
 14.1322 +    G1AlwaysAliveClosure always_alive(_g1h);
 14.1323 +
 14.1324 +    // Copying keep alive closure. Applied to referent objects that need
 14.1325 +    // to be copied.
 14.1326 +    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
 14.1327 +
 14.1328 +    ReferenceProcessor* rp = _g1h->ref_processor_cm();
 14.1329 +
 14.1330 +    int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
 14.1331 +    int stride = MIN2(MAX2(_n_workers, 1), limit);
 14.1332 +
 14.1333 +    // limit is set using max_num_q() - which was set using ParallelGCThreads.
 14.1334 +    // So this must be true - but assert just in case someone decides to
 14.1335 +    // change the worker ids.
 14.1336 +    assert(0 <= i && i < limit, "sanity");
 14.1337 +    assert(!rp->discovery_is_atomic(), "check this code");
 14.1338 +
 14.1339 +    // Select discovered lists [i, i+stride, i+2*stride,...,limit)
 14.1340 +    for (int idx = i; idx < limit; idx += stride) {
 14.1341 +      DiscoveredList& ref_list = rp->discovered_soft_refs()[idx];
 14.1342 +
 14.1343 +      DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
 14.1344 +      while (iter.has_next()) {
 14.1345 +        // Since discovery is not atomic for the CM ref processor, we
 14.1346 +        // can see some null referent objects.
 14.1347 +        iter.load_ptrs(DEBUG_ONLY(true));
 14.1348 +        oop ref = iter.obj();
 14.1349 +
 14.1350 +        // This will filter nulls.
 14.1351 +        if (iter.is_referent_alive()) {
 14.1352 +          iter.make_referent_alive();
 14.1353 +        }
 14.1354 +        iter.move_to_next();
 14.1355 +      }
 14.1356 +    }
 14.1357 +
 14.1358 +    // Drain the queue - which may cause stealing
 14.1359 +    G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
 14.1360 +    drain_queue.do_void();
 14.1361 +    // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
 14.1362 +    assert(pss.refs()->is_empty(), "should be");
 14.1363 +  }
 14.1364 +};
 14.1365 +
 14.1366 +// Weak Reference processing during an evacuation pause (part 1).
 14.1367 +void G1CollectedHeap::process_discovered_references() {
 14.1368 +  double ref_proc_start = os::elapsedTime();
 14.1369 +
 14.1370 +  ReferenceProcessor* rp = _ref_processor_stw;
 14.1371 +  assert(rp->discovery_enabled(), "should have been enabled");
 14.1372 +
 14.1373 +  // Any reference objects, in the collection set, that were 'discovered'
 14.1374 +  // by the CM ref processor should have already been copied (either by
 14.1375 +  // applying the external root copy closure to the discovered lists, or
 14.1376 +  // by following an RSet entry).
 14.1377 +  //
 14.1378 +  // But some of the referents, that are in the collection set, that these
 14.1379 +  // reference objects point to may not have been copied: the STW ref
 14.1380 +  // processor would have seen that the reference object had already
 14.1381 +  // been 'discovered' and would have skipped discovering the reference,
 14.1382 +  // but would not have treated the reference object as a regular oop.
 14.1383 +  // As a reult the copy closure would not have been applied to the
 14.1384 +  // referent object.
 14.1385 +  //
 14.1386 +  // We need to explicitly copy these referent objects - the references
 14.1387 +  // will be processed at the end of remarking.
 14.1388 +  //
 14.1389 +  // We also need to do this copying before we process the reference
 14.1390 +  // objects discovered by the STW ref processor in case one of these
 14.1391 +  // referents points to another object which is also referenced by an
 14.1392 +  // object discovered by the STW ref processor.
 14.1393 +
 14.1394 +  int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
 14.1395 +                        workers()->total_workers() : 1);
 14.1396 +
 14.1397 +  set_par_threads(n_workers);
 14.1398 +  G1ParPreserveCMReferentsTask keep_cm_referents(this, n_workers, _task_queues);
 14.1399 +
 14.1400 +  if (G1CollectedHeap::use_parallel_gc_threads()) {
 14.1401 +    workers()->run_task(&keep_cm_referents);
 14.1402 +  } else {
 14.1403 +    keep_cm_referents.work(0);
 14.1404 +  }
 14.1405 +
 14.1406 +  set_par_threads(0);
 14.1407 +
 14.1408 +  // Closure to test whether a referent is alive.
 14.1409 +  G1STWIsAliveClosure is_alive(this);
 14.1410 +
 14.1411 +  // Even when parallel reference processing is enabled, the processing
 14.1412 +  // of JNI refs is serial and performed serially by the current thread
 14.1413 +  // rather than by a worker. The following PSS will be used for processing
 14.1414 +  // JNI refs.
 14.1415 +
 14.1416 +  // Use only a single queue for this PSS.
 14.1417 +  G1ParScanThreadState pss(this, 0);
 14.1418 +
 14.1419 +  // We do not embed a reference processor in the copying/scanning
 14.1420 +  // closures while we're actually processing the discovered
 14.1421 +  // reference objects.
 14.1422 +  G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
 14.1423 +  G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
 14.1424 +  G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
 14.1425 +
 14.1426 +  pss.set_evac_closure(&scan_evac_cl);
 14.1427 +  pss.set_evac_failure_closure(&evac_failure_cl);
 14.1428 +  pss.set_partial_scan_closure(&partial_scan_cl);
 14.1429 +
 14.1430 +  assert(pss.refs()->is_empty(), "pre-condition");
 14.1431 +
 14.1432 +  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
 14.1433 +  G1ParScanPermClosure           only_copy_perm_cl(this, &pss, NULL);
 14.1434 +
 14.1435 +  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
 14.1436 +  G1ParScanAndMarkPermClosure    copy_mark_perm_cl(this, &pss, NULL);
 14.1437 +
 14.1438 +  OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
 14.1439 +  OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
 14.1440 +
 14.1441 +  if (_g1h->g1_policy()->during_initial_mark_pause()) {
 14.1442 +    // We also need to mark copied objects.
 14.1443 +    copy_non_heap_cl = &copy_mark_non_heap_cl;
 14.1444 +    copy_perm_cl = &copy_mark_perm_cl;
 14.1445 +  }
 14.1446 +
 14.1447 +  // Keep alive closure.
 14.1448 +  G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss);
 14.1449 +
 14.1450 +  // Serial Complete GC closure
 14.1451 +  G1STWDrainQueueClosure drain_queue(this, &pss);
 14.1452 +
 14.1453 +  // Setup the soft refs policy...
 14.1454 +  rp->setup_policy(false);
 14.1455 +
 14.1456 +  if (!rp->processing_is_mt()) {
 14.1457 +    // Serial reference processing...
 14.1458 +    rp->process_discovered_references(&is_alive,
 14.1459 +                                      &keep_alive,
 14.1460 +                                      &drain_queue,
 14.1461 +                                      NULL);
 14.1462 +  } else {
 14.1463 +    // Parallel reference processing
 14.1464 +    int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
 14.1465 +    assert(rp->num_q() == active_workers, "sanity");
 14.1466 +    assert(active_workers <= rp->max_num_q(), "sanity");
 14.1467 +
 14.1468 +    G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
 14.1469 +    rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
 14.1470 +  }
 14.1471 +
 14.1472 +  // We have completed copying any necessary live referent objects
 14.1473 +  // (that were not copied during the actual pause) so we can
 14.1474 +  // retire any active alloc buffers
 14.1475 +  pss.retire_alloc_buffers();
 14.1476 +  assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
 14.1477 +
 14.1478 +  double ref_proc_time = os::elapsedTime() - ref_proc_start;
 14.1479 +  g1_policy()->record_ref_proc_time(ref_proc_time * 1000.0);
 14.1480 +}
 14.1481 +
 14.1482 +// Weak Reference processing during an evacuation pause (part 2).
 14.1483 +void G1CollectedHeap::enqueue_discovered_references() {
 14.1484 +  double ref_enq_start = os::elapsedTime();
 14.1485 +
 14.1486 +  ReferenceProcessor* rp = _ref_processor_stw;
 14.1487 +  assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
 14.1488 +
 14.1489 +  // Now enqueue any remaining on the discovered lists on to
 14.1490 +  // the pending list.
 14.1491 +  if (!rp->processing_is_mt()) {
 14.1492 +    // Serial reference processing...
 14.1493 +    rp->enqueue_discovered_references();
 14.1494 +  } else {
 14.1495 +    // Parallel reference enqueuing
 14.1496 +
 14.1497 +    int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
 14.1498 +    assert(rp->num_q() == active_workers, "sanity");
 14.1499 +    assert(active_workers <= rp->max_num_q(), "sanity");
 14.1500 +
 14.1501 +    G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
 14.1502 +    rp->enqueue_discovered_references(&par_task_executor);
 14.1503 +  }
 14.1504 +
 14.1505 +  rp->verify_no_references_recorded();
 14.1506 +  assert(!rp->discovery_enabled(), "should have been disabled");
 14.1507 +
 14.1508 +  // FIXME
 14.1509 +  // CM's reference processing also cleans up the string and symbol tables.
 14.1510 +  // Should we do that here also? We could, but it is a serial operation
 14.1511 +  // and could signicantly increase the pause time.
 14.1512 +
 14.1513 +  double ref_enq_time = os::elapsedTime() - ref_enq_start;
 14.1514 +  g1_policy()->record_ref_enq_time(ref_enq_time * 1000.0);
 14.1515 +}
 14.1516 +
 14.1517  void G1CollectedHeap::evacuate_collection_set() {
 14.1518    set_evacuation_failed(false);
 14.1519  
 14.1520 @@ -4658,6 +5266,7 @@
 14.1521  
 14.1522    assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
 14.1523    double start_par = os::elapsedTime();
 14.1524 +
 14.1525    if (G1CollectedHeap::use_parallel_gc_threads()) {
 14.1526      // The individual threads will set their evac-failure closures.
 14.1527      StrongRootsScope srs(this);
 14.1528 @@ -4672,15 +5281,23 @@
 14.1529    g1_policy()->record_par_time(par_time);
 14.1530    set_par_threads(0);
 14.1531  
 14.1532 +  // Process any discovered reference objects - we have
 14.1533 +  // to do this _before_ we retire the GC alloc regions
 14.1534 +  // as we may have to copy some 'reachable' referent
 14.1535 +  // objects (and their reachable sub-graphs) that were
 14.1536 +  // not copied during the pause.
 14.1537 +  process_discovered_references();
 14.1538 +
 14.1539    // Weak root processing.
 14.1540    // Note: when JSR 292 is enabled and code blobs can contain
 14.1541    // non-perm oops then we will need to process the code blobs
 14.1542    // here too.
 14.1543    {
 14.1544 -    G1IsAliveClosure is_alive(this);
 14.1545 +    G1STWIsAliveClosure is_alive(this);
 14.1546      G1KeepAliveClosure keep_alive(this);
 14.1547      JNIHandles::weak_oops_do(&is_alive, &keep_alive);
 14.1548    }
 14.1549 +
 14.1550    release_gc_alloc_regions();
 14.1551    g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 14.1552  
 14.1553 @@ -4702,6 +5319,15 @@
 14.1554      }
 14.1555    }
 14.1556  
 14.1557 +  // Enqueue any remaining references remaining on the STW
 14.1558 +  // reference processor's discovered lists. We need to do
 14.1559 +  // this after the card table is cleaned (and verified) as
 14.1560 +  // the act of enqueuing entries on to the pending list
 14.1561 +  // will log these updates (and dirty their associated
 14.1562 +  // cards). We need these updates logged to update any
 14.1563 +  // RSets.
 14.1564 +  enqueue_discovered_references();
 14.1565 +
 14.1566    if (G1DeferredRSUpdate) {
 14.1567      RedirtyLoggedCardTableEntryFastClosure redirty;
 14.1568      dirty_card_queue_set().set_closure(&redirty);
 14.1569 @@ -4902,7 +5528,7 @@
 14.1570    }
 14.1571  
 14.1572    double elapsed = os::elapsedTime() - start;
 14.1573 -  g1_policy()->record_clear_ct_time( elapsed * 1000.0);
 14.1574 +  g1_policy()->record_clear_ct_time(elapsed * 1000.0);
 14.1575  #ifndef PRODUCT
 14.1576    if (G1VerifyCTCleanup || VerifyAfterGC) {
 14.1577      G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
 14.1578 @@ -5193,7 +5819,6 @@
 14.1579        g1_policy()->update_region_num(true /* next_is_young */);
 14.1580        set_region_short_lived_locked(new_alloc_region);
 14.1581        _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
 14.1582 -      g1mm()->update_eden_counters();
 14.1583        return new_alloc_region;
 14.1584      }
 14.1585    }
 14.1586 @@ -5208,6 +5833,10 @@
 14.1587    g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
 14.1588    _summary_bytes_used += allocated_bytes;
 14.1589    _hr_printer.retire(alloc_region);
 14.1590 +  // We update the eden sizes here, when the region is retired,
 14.1591 +  // instead of when it's allocated, since this is the point that its
 14.1592 +  // used space has been recored in _summary_bytes_used.
 14.1593 +  g1mm()->update_eden_size();
 14.1594  }
 14.1595  
 14.1596  HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
    15.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Sep 30 22:54:43 2011 -0700
    15.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Oct 06 13:28:09 2011 -0400
    15.3 @@ -155,6 +155,19 @@
    15.4      : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
    15.5  };
    15.6  
    15.7 +// The G1 STW is alive closure.
    15.8 +// An instance is embedded into the G1CH and used as the
    15.9 +// (optional) _is_alive_non_header closure in the STW
   15.10 +// reference processor. It is also extensively used during
   15.11 +// refence processing during STW evacuation pauses.
   15.12 +class G1STWIsAliveClosure: public BoolObjectClosure {
   15.13 +  G1CollectedHeap* _g1;
   15.14 +public:
   15.15 +  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
   15.16 +  void do_object(oop p) { assert(false, "Do not call."); }
   15.17 +  bool do_object_b(oop p);
   15.18 +};
   15.19 +
   15.20  class SurvivorGCAllocRegion : public G1AllocRegion {
   15.21  protected:
   15.22    virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
   15.23 @@ -174,6 +187,7 @@
   15.24  };
   15.25  
   15.26  class RefineCardTableEntryClosure;
   15.27 +
   15.28  class G1CollectedHeap : public SharedHeap {
   15.29    friend class VM_G1CollectForAllocation;
   15.30    friend class VM_GenCollectForPermanentAllocation;
   15.31 @@ -573,9 +587,20 @@
   15.32    // allocated block, or else "NULL".
   15.33    HeapWord* expand_and_allocate(size_t word_size);
   15.34  
   15.35 +  // Process any reference objects discovered during
   15.36 +  // an incremental evacuation pause.
   15.37 +  void process_discovered_references();
   15.38 +
   15.39 +  // Enqueue any remaining discovered references
   15.40 +  // after processing.
   15.41 +  void enqueue_discovered_references();
   15.42 +
   15.43  public:
   15.44  
   15.45 -  G1MonitoringSupport* g1mm() { return _g1mm; }
   15.46 +  G1MonitoringSupport* g1mm() {
   15.47 +    assert(_g1mm != NULL, "should have been initialized");
   15.48 +    return _g1mm;
   15.49 +  }
   15.50  
   15.51    // Expand the garbage-first heap by at least the given size (in bytes!).
   15.52    // Returns true if the heap was expanded by the requested amount;
   15.53 @@ -822,17 +847,87 @@
   15.54    void finalize_for_evac_failure();
   15.55  
   15.56    // An attempt to evacuate "obj" has failed; take necessary steps.
   15.57 -  oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
   15.58 +  oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj,
   15.59 +                                    bool should_mark_root);
   15.60    void handle_evacuation_failure_common(oop obj, markOop m);
   15.61  
   15.62 +  // ("Weak") Reference processing support.
   15.63 +  //
   15.64 +  // G1 has 2 instances of the referece processor class. One
   15.65 +  // (_ref_processor_cm) handles reference object discovery
   15.66 +  // and subsequent processing during concurrent marking cycles.
   15.67 +  //
   15.68 +  // The other (_ref_processor_stw) handles reference object
   15.69 +  // discovery and processing during full GCs and incremental
   15.70 +  // evacuation pauses.
   15.71 +  //
   15.72 +  // During an incremental pause, reference discovery will be
   15.73 +  // temporarily disabled for _ref_processor_cm and will be
   15.74 +  // enabled for _ref_processor_stw. At the end of the evacuation
   15.75 +  // pause references discovered by _ref_processor_stw will be
   15.76 +  // processed and discovery will be disabled. The previous
   15.77 +  // setting for reference object discovery for _ref_processor_cm
   15.78 +  // will be re-instated.
   15.79 +  //
   15.80 +  // At the start of marking:
   15.81 +  //  * Discovery by the CM ref processor is verified to be inactive
   15.82 +  //    and it's discovered lists are empty.
   15.83 +  //  * Discovery by the CM ref processor is then enabled.
   15.84 +  //
   15.85 +  // At the end of marking:
   15.86 +  //  * Any references on the CM ref processor's discovered
   15.87 +  //    lists are processed (possibly MT).
   15.88 +  //
   15.89 +  // At the start of full GC we:
   15.90 +  //  * Disable discovery by the CM ref processor and
   15.91 +  //    empty CM ref processor's discovered lists
   15.92 +  //    (without processing any entries).
   15.93 +  //  * Verify that the STW ref processor is inactive and it's
   15.94 +  //    discovered lists are empty.
   15.95 +  //  * Temporarily set STW ref processor discovery as single threaded.
   15.96 +  //  * Temporarily clear the STW ref processor's _is_alive_non_header
   15.97 +  //    field.
   15.98 +  //  * Finally enable discovery by the STW ref processor.
   15.99 +  //
  15.100 +  // The STW ref processor is used to record any discovered
  15.101 +  // references during the full GC.
  15.102 +  //
  15.103 +  // At the end of a full GC we:
  15.104 +  //  * Enqueue any reference objects discovered by the STW ref processor
  15.105 +  //    that have non-live referents. This has the side-effect of
  15.106 +  //    making the STW ref processor inactive by disabling discovery.
  15.107 +  //  * Verify that the CM ref processor is still inactive
  15.108 +  //    and no references have been placed on it's discovered
  15.109 +  //    lists (also checked as a precondition during initial marking).
  15.110 +
  15.111 +  // The (stw) reference processor...
  15.112 +  ReferenceProcessor* _ref_processor_stw;
  15.113 +
  15.114 +  // During reference object discovery, the _is_alive_non_header
  15.115 +  // closure (if non-null) is applied to the referent object to
  15.116 +  // determine whether the referent is live. If so then the
  15.117 +  // reference object does not need to be 'discovered' and can
  15.118 +  // be treated as a regular oop. This has the benefit of reducing
  15.119 +  // the number of 'discovered' reference objects that need to
  15.120 +  // be processed.
  15.121 +  //
  15.122 +  // Instance of the is_alive closure for embedding into the
  15.123 +  // STW reference processor as the _is_alive_non_header field.
  15.124 +  // Supplying a value for the _is_alive_non_header field is
  15.125 +  // optional but doing so prevents unnecessary additions to
  15.126 +  // the discovered lists during reference discovery.
  15.127 +  G1STWIsAliveClosure _is_alive_closure_stw;
  15.128 +
  15.129 +  // The (concurrent marking) reference processor...
  15.130 +  ReferenceProcessor* _ref_processor_cm;
  15.131 +
  15.132    // Instance of the concurrent mark is_alive closure for embedding
  15.133 -  // into the reference processor as the is_alive_non_header. This
  15.134 -  // prevents unnecessary additions to the discovered lists during
  15.135 -  // concurrent discovery.
  15.136 -  G1CMIsAliveClosure _is_alive_closure;
  15.137 -
  15.138 -  // ("Weak") Reference processing support
  15.139 -  ReferenceProcessor* _ref_processor;
  15.140 +  // into the Concurrent Marking reference processor as the
  15.141 +  // _is_alive_non_header field. Supplying a value for the
  15.142 +  // _is_alive_non_header field is optional but doing so prevents
  15.143 +  // unnecessary additions to the discovered lists during reference
  15.144 +  // discovery.
  15.145 +  G1CMIsAliveClosure _is_alive_closure_cm;
  15.146  
  15.147    enum G1H_process_strong_roots_tasks {
  15.148      G1H_PS_mark_stack_oops_do,
  15.149 @@ -873,6 +968,7 @@
  15.150    // specified by the policy object.
  15.151    jint initialize();
  15.152  
  15.153 +  // Initialize weak reference processing.
  15.154    virtual void ref_processing_init();
  15.155  
  15.156    void set_par_threads(int t) {
  15.157 @@ -924,8 +1020,13 @@
  15.158    // The shared block offset table array.
  15.159    G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
  15.160  
  15.161 -  // Reference Processing accessor
  15.162 -  ReferenceProcessor* ref_processor() { return _ref_processor; }
  15.163 +  // Reference Processing accessors
  15.164 +
  15.165 +  // The STW reference processor....
  15.166 +  ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
  15.167 +
  15.168 +  // The Concurent Marking reference processor...
  15.169 +  ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
  15.170  
  15.171    virtual size_t capacity() const;
  15.172    virtual size_t used() const;
    16.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Sep 30 22:54:43 2011 -0700
    16.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Oct 06 13:28:09 2011 -0400
    16.3 @@ -152,8 +152,12 @@
    16.4  
    16.5    _summary(new Summary()),
    16.6  
    16.7 +  _cur_clear_ct_time_ms(0.0),
    16.8 +
    16.9 +  _cur_ref_proc_time_ms(0.0),
   16.10 +  _cur_ref_enq_time_ms(0.0),
   16.11 +
   16.12  #ifndef PRODUCT
   16.13 -  _cur_clear_ct_time_ms(0.0),
   16.14    _min_clear_cc_time_ms(-1.0),
   16.15    _max_clear_cc_time_ms(-1.0),
   16.16    _cur_clear_cc_time_ms(0.0),
   16.17 @@ -294,10 +298,10 @@
   16.18    }
   16.19  
   16.20    // Verify PLAB sizes
   16.21 -  const uint region_size = HeapRegion::GrainWords;
   16.22 +  const size_t region_size = HeapRegion::GrainWords;
   16.23    if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   16.24      char buffer[128];
   16.25 -    jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
   16.26 +    jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
   16.27                   OldPLABSize > region_size ? "Old" : "Young", region_size);
   16.28      vm_exit_during_initialization(buffer);
   16.29    }
   16.30 @@ -459,15 +463,16 @@
   16.31  // ParallelScavengeHeap::initialize()). We might change this in the
   16.32  // future, but it's a good start.
   16.33  class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
   16.34 +private:
   16.35 +  size_t size_to_region_num(size_t byte_size) {
   16.36 +    return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
   16.37 +  }
   16.38  
   16.39  public:
   16.40    G1YoungGenSizer() {
   16.41      initialize_flags();
   16.42      initialize_size_info();
   16.43    }
   16.44 -  size_t size_to_region_num(size_t byte_size) {
   16.45 -    return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
   16.46 -  }
   16.47    size_t min_young_region_num() {
   16.48      return size_to_region_num(_min_gen0_size);
   16.49    }
   16.50 @@ -501,11 +506,10 @@
   16.51  
   16.52    if (FLAG_IS_CMDLINE(NewRatio)) {
   16.53      if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   16.54 -      gclog_or_tty->print_cr("-XX:NewSize and -XX:MaxNewSize overrides -XX:NewRatio");
   16.55 +      warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   16.56      } else {
   16.57        // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
   16.58 -      size_t heap_regions = sizer.size_to_region_num(_g1->n_regions());
   16.59 -      update_young_list_size_using_newratio(heap_regions);
   16.60 +      update_young_list_size_using_newratio(_g1->n_regions());
   16.61        _using_new_ratio_calculations = true;
   16.62      }
   16.63    }
   16.64 @@ -1479,6 +1483,8 @@
   16.65  #endif
   16.66      print_stats(1, "Other", other_time_ms);
   16.67      print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
   16.68 +    print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
   16.69 +    print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
   16.70  
   16.71      for (int i = 0; i < _aux_num; ++i) {
   16.72        if (_cur_aux_times_set[i]) {
   16.73 @@ -1519,11 +1525,17 @@
   16.74    }
   16.75  
   16.76    if (_last_full_young_gc) {
   16.77 -    ergo_verbose2(ErgoPartiallyYoungGCs,
   16.78 -                  "start partially-young GCs",
   16.79 -                  ergo_format_byte_perc("known garbage"),
   16.80 -                  _known_garbage_bytes, _known_garbage_ratio * 100.0);
   16.81 -    set_full_young_gcs(false);
   16.82 +    if (!last_pause_included_initial_mark) {
   16.83 +      ergo_verbose2(ErgoPartiallyYoungGCs,
   16.84 +                    "start partially-young GCs",
   16.85 +                    ergo_format_byte_perc("known garbage"),
   16.86 +                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
   16.87 +      set_full_young_gcs(false);
   16.88 +    } else {
   16.89 +      ergo_verbose0(ErgoPartiallyYoungGCs,
   16.90 +                    "do not start partially-young GCs",
   16.91 +                    ergo_format_reason("concurrent cycle is about to start"));
   16.92 +    }
   16.93      _last_full_young_gc = false;
   16.94    }
   16.95  
   16.96 @@ -2485,6 +2497,13 @@
   16.97        // initiate a new cycle.
   16.98  
   16.99        set_during_initial_mark_pause();
  16.100 +      // We do not allow non-full young GCs during marking.
  16.101 +      if (!full_young_gcs()) {
  16.102 +        set_full_young_gcs(true);
  16.103 +        ergo_verbose0(ErgoPartiallyYoungGCs,
  16.104 +                      "end partially-young GCs",
  16.105 +                      ergo_format_reason("concurrent cycle is about to start"));
  16.106 +      }
  16.107  
  16.108        // And we can now clear initiate_conc_mark_if_possible() as
  16.109        // we've already acted on it.
    17.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Sep 30 22:54:43 2011 -0700
    17.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Oct 06 13:28:09 2011 -0400
    17.3 @@ -119,6 +119,8 @@
    17.4    double _cur_satb_drain_time_ms;
    17.5    double _cur_clear_ct_time_ms;
    17.6    bool   _satb_drain_time_set;
    17.7 +  double _cur_ref_proc_time_ms;
    17.8 +  double _cur_ref_enq_time_ms;
    17.9  
   17.10  #ifndef PRODUCT
   17.11    // Card Table Count Cache stats
   17.12 @@ -986,6 +988,14 @@
   17.13      _cur_aux_times_ms[i] += ms;
   17.14    }
   17.15  
   17.16 +  void record_ref_proc_time(double ms) {
   17.17 +    _cur_ref_proc_time_ms = ms;
   17.18 +  }
   17.19 +
   17.20 +  void record_ref_enq_time(double ms) {
   17.21 +    _cur_ref_enq_time_ms = ms;
   17.22 +  }
   17.23 +
   17.24  #ifndef PRODUCT
   17.25    void record_cc_clear_time(double ms) {
   17.26      if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
   17.27 @@ -1139,6 +1149,10 @@
   17.28      return young_list_length < young_list_max_length;
   17.29    }
   17.30  
   17.31 +  size_t young_list_max_length() {
   17.32 +    return _young_list_max_length;
   17.33 +  }
   17.34 +
   17.35    void update_region_num(bool young);
   17.36  
   17.37    bool full_young_gcs() {
    18.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Sep 30 22:54:43 2011 -0700
    18.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Oct 06 13:28:09 2011 -0400
    18.3 @@ -62,6 +62,8 @@
    18.4    // hook up weak ref data so it can be used during Mark-Sweep
    18.5    assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
    18.6    assert(rp != NULL, "should be non-NULL");
    18.7 +  assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
    18.8 +
    18.9    GenMarkSweep::_ref_processor = rp;
   18.10    rp->setup_policy(clear_all_softrefs);
   18.11  
   18.12 @@ -139,6 +141,8 @@
   18.13  
   18.14    // Process reference objects found during marking
   18.15    ReferenceProcessor* rp = GenMarkSweep::ref_processor();
   18.16 +  assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
   18.17 +
   18.18    rp->setup_policy(clear_all_softrefs);
   18.19    rp->process_discovered_references(&GenMarkSweep::is_alive,
   18.20                                      &GenMarkSweep::keep_alive,
   18.21 @@ -166,7 +170,6 @@
   18.22    GenMarkSweep::follow_mdo_weak_refs();
   18.23    assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
   18.24  
   18.25 -
   18.26    // Visit interned string tables and delete unmarked oops
   18.27    StringTable::unlink(&GenMarkSweep::is_alive);
   18.28    // Clean up unreferenced symbols in symbol table.
   18.29 @@ -346,7 +349,8 @@
   18.30                             NULL,  // do not touch code cache here
   18.31                             &GenMarkSweep::adjust_pointer_closure);
   18.32  
   18.33 -  g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
   18.34 +  assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
   18.35 +  g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
   18.36  
   18.37    // Now adjust pointers in remaining weak roots.  (All of which should
   18.38    // have been cleared if they pointed to non-surviving objects.)
    19.1 --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Fri Sep 30 22:54:43 2011 -0700
    19.2 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Thu Oct 06 13:28:09 2011 -0400
    19.3 @@ -27,19 +27,69 @@
    19.4  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    19.5  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    19.6  
    19.7 -G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h,
    19.8 -                                         VirtualSpace* g1_storage_addr) :
    19.9 +G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm,
   19.10 +                                           const char* name,
   19.11 +                                           int ordinal, int spaces,
   19.12 +                                           size_t min_capacity,
   19.13 +                                           size_t max_capacity,
   19.14 +                                           size_t curr_capacity)
   19.15 +  : GenerationCounters(name, ordinal, spaces, min_capacity,
   19.16 +                       max_capacity, curr_capacity), _g1mm(g1mm) { }
   19.17 +
   19.18 +// We pad the capacity three times given that the young generation
   19.19 +// contains three spaces (eden and two survivors).
   19.20 +G1YoungGenerationCounters::G1YoungGenerationCounters(G1MonitoringSupport* g1mm,
   19.21 +                                                     const char* name)
   19.22 +  : G1GenerationCounters(g1mm, name, 0 /* ordinal */, 3 /* spaces */,
   19.23 +               G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */,
   19.24 +               G1MonitoringSupport::pad_capacity(g1mm->young_gen_max(), 3),
   19.25 +               G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) {
   19.26 +  update_all();
   19.27 +}
   19.28 +
   19.29 +G1OldGenerationCounters::G1OldGenerationCounters(G1MonitoringSupport* g1mm,
   19.30 +                                                 const char* name)
   19.31 +  : G1GenerationCounters(g1mm, name, 1 /* ordinal */, 1 /* spaces */,
   19.32 +               G1MonitoringSupport::pad_capacity(0) /* min_capacity */,
   19.33 +               G1MonitoringSupport::pad_capacity(g1mm->old_gen_max()),
   19.34 +               G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) {
   19.35 +  update_all();
   19.36 +}
   19.37 +
   19.38 +void G1YoungGenerationCounters::update_all() {
   19.39 +  size_t committed =
   19.40 +            G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3);
   19.41 +  _current_size->set_value(committed);
   19.42 +}
   19.43 +
   19.44 +void G1OldGenerationCounters::update_all() {
   19.45 +  size_t committed =
   19.46 +            G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed());
   19.47 +  _current_size->set_value(committed);
   19.48 +}
   19.49 +
   19.50 +G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
   19.51    _g1h(g1h),
   19.52    _incremental_collection_counters(NULL),
   19.53    _full_collection_counters(NULL),
   19.54 -  _non_young_collection_counters(NULL),
   19.55 +  _old_collection_counters(NULL),
   19.56    _old_space_counters(NULL),
   19.57    _young_collection_counters(NULL),
   19.58    _eden_counters(NULL),
   19.59    _from_counters(NULL),
   19.60    _to_counters(NULL),
   19.61 -  _g1_storage_addr(g1_storage_addr)
   19.62 -{
   19.63 +
   19.64 +  _overall_reserved(0),
   19.65 +  _overall_committed(0),    _overall_used(0),
   19.66 +  _young_region_num(0),
   19.67 +  _young_gen_committed(0),
   19.68 +  _eden_committed(0),       _eden_used(0),
   19.69 +  _survivor_committed(0),   _survivor_used(0),
   19.70 +  _old_committed(0),        _old_used(0) {
   19.71 +
   19.72 +  _overall_reserved = g1h->max_capacity();
   19.73 +  recalculate_sizes();
   19.74 +
   19.75    // Counters for GC collections
   19.76    //
   19.77    //  name "collector.0".  In a generational collector this would be the
   19.78 @@ -69,110 +119,147 @@
   19.79    // generational GC terms.  The "1, 1" parameters are for
   19.80    // the n-th generation (=1) with 1 space.
   19.81    // Counters are created from minCapacity, maxCapacity, and capacity
   19.82 -  _non_young_collection_counters =
   19.83 -    new GenerationCounters("whole heap", 1, 1, _g1_storage_addr);
   19.84 +  _old_collection_counters = new G1OldGenerationCounters(this, "old");
   19.85  
   19.86    //  name  "generation.1.space.0"
   19.87    // Counters are created from maxCapacity, capacity, initCapacity,
   19.88    // and used.
   19.89 -  _old_space_counters = new HSpaceCounters("space", 0,
   19.90 -    _g1h->max_capacity(), _g1h->capacity(), _non_young_collection_counters);
   19.91 +  _old_space_counters = new HSpaceCounters("space", 0 /* ordinal */,
   19.92 +    pad_capacity(overall_reserved()) /* max_capacity */,
   19.93 +    pad_capacity(old_space_committed()) /* init_capacity */,
   19.94 +   _old_collection_counters);
   19.95  
   19.96    //   Young collection set
   19.97    //  name "generation.0".  This is logically the young generation.
   19.98    //  The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
   19.99 -  // See  _non_young_collection_counters for additional counters
  19.100 -  _young_collection_counters = new GenerationCounters("young", 0, 3, NULL);
  19.101 +  // See  _old_collection_counters for additional counters
  19.102 +  _young_collection_counters = new G1YoungGenerationCounters(this, "young");
  19.103  
  19.104 -  // Replace "max_heap_byte_size() with maximum young gen size for
  19.105 -  // g1Collectedheap
  19.106    //  name "generation.0.space.0"
  19.107    // See _old_space_counters for additional counters
  19.108 -  _eden_counters = new HSpaceCounters("eden", 0,
  19.109 -    _g1h->max_capacity(), eden_space_committed(),
  19.110 +  _eden_counters = new HSpaceCounters("eden", 0 /* ordinal */,
  19.111 +    pad_capacity(overall_reserved()) /* max_capacity */,
  19.112 +    pad_capacity(eden_space_committed()) /* init_capacity */,
  19.113      _young_collection_counters);
  19.114  
  19.115    //  name "generation.0.space.1"
  19.116    // See _old_space_counters for additional counters
  19.117    // Set the arguments to indicate that this survivor space is not used.
  19.118 -  _from_counters = new HSpaceCounters("s0", 1, (long) 0, (long) 0,
  19.119 +  _from_counters = new HSpaceCounters("s0", 1 /* ordinal */,
  19.120 +    pad_capacity(0) /* max_capacity */,
  19.121 +    pad_capacity(0) /* init_capacity */,
  19.122      _young_collection_counters);
  19.123 +  // Given that this survivor space is not used, we update it here
  19.124 +  // once to reflect that its used space is 0 so that we don't have to
  19.125 +  // worry about updating it again later.
  19.126 +  _from_counters->update_used(0);
  19.127  
  19.128    //  name "generation.0.space.2"
  19.129    // See _old_space_counters for additional counters
  19.130 -  _to_counters = new HSpaceCounters("s1", 2,
  19.131 -    _g1h->max_capacity(),
  19.132 -    survivor_space_committed(),
  19.133 +  _to_counters = new HSpaceCounters("s1", 2 /* ordinal */,
  19.134 +    pad_capacity(overall_reserved()) /* max_capacity */,
  19.135 +    pad_capacity(survivor_space_committed()) /* init_capacity */,
  19.136      _young_collection_counters);
  19.137  }
  19.138  
  19.139 -size_t G1MonitoringSupport::overall_committed() {
  19.140 -  return g1h()->capacity();
  19.141 +void G1MonitoringSupport::recalculate_sizes() {
  19.142 +  G1CollectedHeap* g1 = g1h();
  19.143 +
  19.144 +  // Recalculate all the sizes from scratch. We assume that this is
  19.145 +  // called at a point where no concurrent updates to the various
  19.146 +  // values we read here are possible (i.e., at a STW phase at the end
  19.147 +  // of a GC).
  19.148 +
  19.149 +  size_t young_list_length = g1->young_list()->length();
  19.150 +  size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
  19.151 +  assert(young_list_length >= survivor_list_length, "invariant");
  19.152 +  size_t eden_list_length = young_list_length - survivor_list_length;
  19.153 +  // Max length includes any potential extensions to the young gen
  19.154 +  // we'll do when the GC locker is active.
  19.155 +  size_t young_list_max_length = g1->g1_policy()->young_list_max_length();
  19.156 +  assert(young_list_max_length >= survivor_list_length, "invariant");
  19.157 +  size_t eden_list_max_length = young_list_max_length - survivor_list_length;
  19.158 +
  19.159 +  _overall_used = g1->used_unlocked();
  19.160 +  _eden_used = eden_list_length * HeapRegion::GrainBytes;
  19.161 +  _survivor_used = survivor_list_length * HeapRegion::GrainBytes;
  19.162 +  _young_region_num = young_list_length;
  19.163 +  _old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
  19.164 +
  19.165 +  // First calculate the committed sizes that can be calculated independently.
  19.166 +  _survivor_committed = _survivor_used;
  19.167 +  _old_committed = HeapRegion::align_up_to_region_byte_size(_old_used);
  19.168 +
  19.169 +  // Next, start with the overall committed size.
  19.170 +  _overall_committed = g1->capacity();
  19.171 +  size_t committed = _overall_committed;
  19.172 +
  19.173 +  // Remove the committed size we have calculated so far (for the
  19.174 +  // survivor and old space).
  19.175 +  assert(committed >= (_survivor_committed + _old_committed), "sanity");
  19.176 +  committed -= _survivor_committed + _old_committed;
  19.177 +
  19.178 +  // Next, calculate and remove the committed size for the eden.
  19.179 +  _eden_committed = eden_list_max_length * HeapRegion::GrainBytes;
  19.180 +  // Somewhat defensive: be robust in case there are inaccuracies in
  19.181 +  // the calculations
  19.182 +  _eden_committed = MIN2(_eden_committed, committed);
  19.183 +  committed -= _eden_committed;
  19.184 +
  19.185 +  // Finally, give the rest to the old space...
  19.186 +  _old_committed += committed;
  19.187 +  // ..and calculate the young gen committed.
  19.188 +  _young_gen_committed = _eden_committed + _survivor_committed;
  19.189 +
  19.190 +  assert(_overall_committed ==
  19.191 +         (_eden_committed + _survivor_committed + _old_committed),
  19.192 +         "the committed sizes should add up");
  19.193 +  // Somewhat defensive: cap the eden used size to make sure it
  19.194 +  // never exceeds the committed size.
  19.195 +  _eden_used = MIN2(_eden_used, _eden_committed);
  19.196 +  // _survivor_committed and _old_committed are calculated in terms of
  19.197 +  // the corresponding _*_used value, so the next two conditions
  19.198 +  // should hold.
  19.199 +  assert(_survivor_used <= _survivor_committed, "post-condition");
  19.200 +  assert(_old_used <= _old_committed, "post-condition");
  19.201  }
  19.202  
  19.203 -size_t G1MonitoringSupport::overall_used() {
  19.204 -  return g1h()->used_unlocked();
  19.205 -}
  19.206 +void G1MonitoringSupport::recalculate_eden_size() {
  19.207 +  G1CollectedHeap* g1 = g1h();
  19.208  
  19.209 -size_t G1MonitoringSupport::eden_space_committed() {
  19.210 -  return MAX2(eden_space_used(), (size_t) HeapRegion::GrainBytes);
  19.211 -}
  19.212 +  // When a new eden region is allocated, only the eden_used size is
  19.213 +  // affected (since we have recalculated everything else at the last GC).
  19.214  
  19.215 -size_t G1MonitoringSupport::eden_space_used() {
  19.216 -  size_t young_list_length = g1h()->young_list()->length();
  19.217 -  size_t eden_used = young_list_length * HeapRegion::GrainBytes;
  19.218 -  size_t survivor_used = survivor_space_used();
  19.219 -  eden_used = subtract_up_to_zero(eden_used, survivor_used);
  19.220 -  return eden_used;
  19.221 -}
  19.222 -
  19.223 -size_t G1MonitoringSupport::survivor_space_committed() {
  19.224 -  return MAX2(survivor_space_used(),
  19.225 -              (size_t) HeapRegion::GrainBytes);
  19.226 -}
  19.227 -
  19.228 -size_t G1MonitoringSupport::survivor_space_used() {
  19.229 -  size_t survivor_num = g1h()->g1_policy()->recorded_survivor_regions();
  19.230 -  size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
  19.231 -  return survivor_used;
  19.232 -}
  19.233 -
  19.234 -size_t G1MonitoringSupport::old_space_committed() {
  19.235 -  size_t committed = overall_committed();
  19.236 -  size_t eden_committed = eden_space_committed();
  19.237 -  size_t survivor_committed = survivor_space_committed();
  19.238 -  committed = subtract_up_to_zero(committed, eden_committed);
  19.239 -  committed = subtract_up_to_zero(committed, survivor_committed);
  19.240 -  committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
  19.241 -  return committed;
  19.242 -}
  19.243 -
  19.244 -// See the comment near the top of g1MonitoringSupport.hpp for
  19.245 -// an explanation of these calculations for "used" and "capacity".
  19.246 -size_t G1MonitoringSupport::old_space_used() {
  19.247 -  size_t used = overall_used();
  19.248 -  size_t eden_used = eden_space_used();
  19.249 -  size_t survivor_used = survivor_space_used();
  19.250 -  used = subtract_up_to_zero(used, eden_used);
  19.251 -  used = subtract_up_to_zero(used, survivor_used);
  19.252 -  return used;
  19.253 -}
  19.254 -
  19.255 -void G1MonitoringSupport::update_counters() {
  19.256 -  if (UsePerfData) {
  19.257 -    eden_counters()->update_capacity(eden_space_committed());
  19.258 -    eden_counters()->update_used(eden_space_used());
  19.259 -    to_counters()->update_capacity(survivor_space_committed());
  19.260 -    to_counters()->update_used(survivor_space_used());
  19.261 -    old_space_counters()->update_capacity(old_space_committed());
  19.262 -    old_space_counters()->update_used(old_space_used());
  19.263 -    non_young_collection_counters()->update_all();
  19.264 +  size_t young_region_num = g1h()->young_list()->length();
  19.265 +  if (young_region_num > _young_region_num) {
  19.266 +    size_t diff = young_region_num - _young_region_num;
  19.267 +    _eden_used += diff * HeapRegion::GrainBytes;
  19.268 +    // Somewhat defensive: cap the eden used size to make sure it
  19.269 +    // never exceeds the committed size.
  19.270 +    _eden_used = MIN2(_eden_used, _eden_committed);
  19.271 +    _young_region_num = young_region_num;
  19.272    }
  19.273  }
  19.274  
  19.275 -void G1MonitoringSupport::update_eden_counters() {
  19.276 +void G1MonitoringSupport::update_sizes() {
  19.277 +  recalculate_sizes();
  19.278    if (UsePerfData) {
  19.279 -    eden_counters()->update_capacity(eden_space_committed());
  19.280 +    eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
  19.281 +    eden_counters()->update_used(eden_space_used());
  19.282 +    // only the to survivor space (s1) is active, so we don't need to
  19.283 +    // update the counteres for the from survivor space (s0)
  19.284 +    to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
  19.285 +    to_counters()->update_used(survivor_space_used());
  19.286 +    old_space_counters()->update_capacity(pad_capacity(old_space_committed()));
  19.287 +    old_space_counters()->update_used(old_space_used());
  19.288 +    old_collection_counters()->update_all();
  19.289 +    young_collection_counters()->update_all();
  19.290 +  }
  19.291 +}
  19.292 +
  19.293 +void G1MonitoringSupport::update_eden_size() {
  19.294 +  recalculate_eden_size();
  19.295 +  if (UsePerfData) {
  19.296      eden_counters()->update_used(eden_space_used());
  19.297    }
  19.298  }
    20.1 --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Fri Sep 30 22:54:43 2011 -0700
    20.2 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Thu Oct 06 13:28:09 2011 -0400
    20.3 @@ -28,101 +28,95 @@
    20.4  #include "gc_implementation/shared/hSpaceCounters.hpp"
    20.5  
    20.6  class G1CollectedHeap;
    20.7 -class G1SpaceMonitoringSupport;
    20.8  
    20.9 -// Class for monitoring logical spaces in G1.
   20.10 -// G1 defines a set of regions as a young
   20.11 -// collection (analogous to a young generation).
   20.12 -// The young collection is a logical generation
   20.13 -// with no fixed chunk (see space.hpp) reflecting
   20.14 -// the address space for the generation.  In addition
   20.15 -// to the young collection there is its complement
   20.16 -// the non-young collection that is simply the regions
   20.17 -// not in the young collection.  The non-young collection
   20.18 -// is treated here as a logical old generation only
   20.19 -// because the monitoring tools expect a generational
   20.20 -// heap.  The monitoring tools expect that a Space
   20.21 -// (see space.hpp) exists that describe the
   20.22 -// address space of young collection and non-young
   20.23 -// collection and such a view is provided here.
   20.24 +// Class for monitoring logical spaces in G1. It provides data for
   20.25 +// both G1's jstat counters as well as G1's memory pools.
   20.26  //
   20.27 -// This class provides interfaces to access
   20.28 -// the value of variables for the young collection
   20.29 -// that include the "capacity" and "used" of the
   20.30 -// young collection along with constant values
   20.31 -// for the minimum and maximum capacities for
   20.32 -// the logical spaces.  Similarly for the non-young
   20.33 -// collection.
   20.34 +// G1 splits the heap into heap regions and each heap region belongs
   20.35 +// to one of the following categories:
   20.36  //
   20.37 -// Also provided are counters for G1 concurrent collections
   20.38 -// and stop-the-world full heap collecitons.
   20.39 +// * eden      : regions that have been allocated since the last GC
   20.40 +// * survivors : regions with objects that survived the last few GCs
   20.41 +// * old       : long-lived non-humongous regions
   20.42 +// * humongous : humongous regions
   20.43 +// * free      : free regions
   20.44  //
   20.45 -// Below is a description of how "used" and "capactiy"
   20.46 -// (or committed) is calculated for the logical spaces.
   20.47 +// The combination of eden and survivor regions form the equivalent of
   20.48 +// the young generation in the other GCs. The combination of old and
   20.49 +// humongous regions form the equivalent of the old generation in the
   20.50 +// other GCs. Free regions do not have a good equivalent in the other
   20.51 +// GCs given that they can be allocated as any of the other region types.
   20.52  //
   20.53 -// 1) The used space calculation for a pool is not necessarily
   20.54 -// independent of the others. We can easily get from G1 the overall
   20.55 -// used space in the entire heap, the number of regions in the young
   20.56 -// generation (includes both eden and survivors), and the number of
   20.57 -// survivor regions. So, from that we calculate:
   20.58 +// The monitoring tools expect the heap to contain a number of
   20.59 +// generations (young, old, perm) and each generation to contain a
   20.60 +// number of spaces (young: eden, survivors, old). Given that G1 does
   20.61 +// not maintain those spaces physically (e.g., the set of
   20.62 +// non-contiguous eden regions can be considered as a "logical"
   20.63 +// space), we'll provide the illusion that those generations and
   20.64 +// spaces exist. In reality, each generation and space refers to a set
   20.65 +// of heap regions that are potentially non-contiguous.
   20.66  //
   20.67 -//  survivor_used = survivor_num * region_size
   20.68 -//  eden_used     = young_region_num * region_size - survivor_used
   20.69 -//  old_gen_used  = overall_used - eden_used - survivor_used
   20.70 +// This class provides interfaces to access the min, current, and max
   20.71 +// capacity and current occupancy for each of G1's logical spaces and
   20.72 +// generations we expose to the monitoring tools. Also provided are
   20.73 +// counters for G1 concurrent collections and stop-the-world full heap
   20.74 +// collections.
   20.75  //
   20.76 -// Note that survivor_used and eden_used are upper bounds. To get the
   20.77 -// actual value we would have to iterate over the regions and add up
   20.78 -// ->used(). But that'd be expensive. So, we'll accept some lack of
   20.79 -// accuracy for those two. But, we have to be careful when calculating
   20.80 -// old_gen_used, in case we subtract from overall_used more then the
   20.81 -// actual number and our result goes negative.
   20.82 +// Below is a description of how the various sizes are calculated.
   20.83  //
   20.84 -// 2) Calculating the used space is straightforward, as described
   20.85 -// above. However, how do we calculate the committed space, given that
   20.86 -// we allocate space for the eden, survivor, and old gen out of the
   20.87 -// same pool of regions? One way to do this is to use the used value
   20.88 -// as also the committed value for the eden and survivor spaces and
   20.89 -// then calculate the old gen committed space as follows:
   20.90 +// * Current Capacity
   20.91  //
   20.92 -//  old_gen_committed = overall_committed - eden_committed - survivor_committed
   20.93 +//    - heap_capacity = current heap capacity (e.g., current committed size)
   20.94 +//    - young_gen_capacity = current max young gen target capacity
   20.95 +//          (i.e., young gen target capacity + max allowed expansion capacity)
   20.96 +//    - survivor_capacity = current survivor region capacity
   20.97 +//    - eden_capacity = young_gen_capacity - survivor_capacity
   20.98 +//    - old_capacity = heap_capacity - young_gen_capacity
   20.99  //
  20.100 -// Maybe a better way to do that would be to calculate used for eden
  20.101 -// and survivor as a sum of ->used() over their regions and then
  20.102 -// calculate committed as region_num * region_size (i.e., what we use
  20.103 -// to calculate the used space now). This is something to consider
  20.104 -// in the future.
  20.105 +//    What we do in the above is to distribute the free regions among
  20.106 +//    eden_capacity and old_capacity.
  20.107  //
  20.108 -// 3) Another decision that is again not straightforward is what is
  20.109 -// the max size that each memory pool can grow to. One way to do this
  20.110 -// would be to use the committed size for the max for the eden and
  20.111 -// survivors and calculate the old gen max as follows (basically, it's
  20.112 -// a similar pattern to what we use for the committed space, as
  20.113 -// described above):
  20.114 +// * Occupancy
  20.115  //
  20.116 -//  old_gen_max = overall_max - eden_max - survivor_max
  20.117 +//    - young_gen_used = current young region capacity
  20.118 +//    - survivor_used = survivor_capacity
  20.119 +//    - eden_used = young_gen_used - survivor_used
  20.120 +//    - old_used = overall_used - young_gen_used
  20.121  //
  20.122 -// Unfortunately, the above makes the max of each pool fluctuate over
  20.123 -// time and, even though this is allowed according to the spec, it
  20.124 -// broke several assumptions in the M&M framework (there were cases
  20.125 -// where used would reach a value greater than max). So, for max we
  20.126 -// use -1, which means "undefined" according to the spec.
  20.127 +//    Unfortunately, we currently only keep track of the number of
  20.128 +//    currently allocated young and survivor regions + the overall used
  20.129 +//    bytes in the heap, so the above can be a little inaccurate.
  20.130  //
  20.131 -// 4) Now, there is a very subtle issue with all the above. The
  20.132 -// framework will call get_memory_usage() on the three pools
  20.133 -// asynchronously. As a result, each call might get a different value
  20.134 -// for, say, survivor_num which will yield inconsistent values for
  20.135 -// eden_used, survivor_used, and old_gen_used (as survivor_num is used
  20.136 -// in the calculation of all three). This would normally be
  20.137 -// ok. However, it's possible that this might cause the sum of
  20.138 -// eden_used, survivor_used, and old_gen_used to go over the max heap
  20.139 -// size and this seems to sometimes cause JConsole (and maybe other
  20.140 -// clients) to get confused. There's not a really an easy / clean
  20.141 -// solution to this problem, due to the asynchrounous nature of the
  20.142 -// framework.
  20.143 +// * Min Capacity
  20.144 +//
  20.145 +//    We set this to 0 for all spaces. We could consider setting the old
  20.146 +//    min capacity to the min capacity of the heap (see 7078465).
  20.147 +//
  20.148 +// * Max Capacity
  20.149 +//
  20.150 +//    For jstat, we set the max capacity of all spaces to heap_capacity,
  20.151 +//    given that we don't always have a reasonably upper bound on how big
  20.152 +//    each space can grow. For the memory pools, we actually make the max
  20.153 +//    capacity undefined. We could consider setting the old max capacity
  20.154 +//    to the max capacity of the heap (see 7078465).
  20.155 +//
  20.156 +// If we had more accurate occupancy / capacity information per
  20.157 +// region set the above calculations would be greatly simplified and
  20.158 +// be made more accurate.
  20.159 +//
  20.160 +// We update all the above synchronously and we store the results in
  20.161 +// fields so that we just read said fields when needed. A subtle point
  20.162 +// is that all the above sizes need to be recalculated when the old
  20.163 +// gen changes capacity (after a GC or after a humongous allocation)
  20.164 +// but only the eden occupancy changes when a new eden region is
  20.165 +// allocated. So, in the latter case we have minimal recalcuation to
  20.166 +// do which is important as we want to keep the eden region allocation
  20.167 +// path as low-overhead as possible.
  20.168  
  20.169  class G1MonitoringSupport : public CHeapObj {
  20.170 +  friend class VMStructs;
  20.171 +
  20.172    G1CollectedHeap* _g1h;
  20.173 -  VirtualSpace* _g1_storage_addr;
  20.174  
  20.175    // jstat performance counters
  20.176    //  incremental collections both fully and partially young
  20.177 @@ -133,9 +127,9 @@
  20.178    // _from_counters, and _to_counters are associated with
  20.179    // this "generational" counter.
  20.180    GenerationCounters*  _young_collection_counters;
  20.181 -  //  non-young collection set counters. The _old_space_counters
  20.182 +  //  old collection set counters. The _old_space_counters
  20.183    // below are associated with this "generational" counter.
  20.184 -  GenerationCounters*  _non_young_collection_counters;
  20.185 +  GenerationCounters*  _old_collection_counters;
  20.186    // Counters for the capacity and used for
  20.187    //   the whole heap
  20.188    HSpaceCounters*      _old_space_counters;
  20.189 @@ -145,6 +139,27 @@
  20.190    HSpaceCounters*      _from_counters;
  20.191    HSpaceCounters*      _to_counters;
  20.192  
  20.193 +  // When it's appropriate to recalculate the various sizes (at the
  20.194 +  // end of a GC, when a new eden region is allocated, etc.) we store
  20.195 +  // them here so that we can easily report them when needed and not
  20.196 +  // have to recalculate them every time.
  20.197 +
  20.198 +  size_t _overall_reserved;
  20.199 +  size_t _overall_committed;
  20.200 +  size_t _overall_used;
  20.201 +
  20.202 +  size_t _young_region_num;
  20.203 +  size_t _young_gen_committed;
  20.204 +  size_t _eden_committed;
  20.205 +  size_t _eden_used;
  20.206 +  size_t _survivor_committed;
  20.207 +  size_t _survivor_used;
  20.208 +
  20.209 +  size_t _old_committed;
  20.210 +  size_t _old_used;
  20.211 +
  20.212 +  G1CollectedHeap* g1h() { return _g1h; }
  20.213 +
  20.214    // It returns x - y if x > y, 0 otherwise.
  20.215    // As described in the comment above, some of the inputs to the
  20.216    // calculations we have to do are obtained concurrently and hence
  20.217 @@ -160,15 +175,35 @@
  20.218      }
  20.219    }
  20.220  
  20.221 +  // Recalculate all the sizes.
  20.222 +  void recalculate_sizes();
  20.223 +  // Recalculate only what's necessary when a new eden region is allocated.
  20.224 +  void recalculate_eden_size();
  20.225 +
  20.226   public:
  20.227 -  G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr);
  20.228 +  G1MonitoringSupport(G1CollectedHeap* g1h);
  20.229  
  20.230 -  G1CollectedHeap* g1h() { return _g1h; }
  20.231 -  VirtualSpace* g1_storage_addr() { return _g1_storage_addr; }
  20.232 +  // Unfortunately, the jstat tool assumes that no space has 0
  20.233 +  // capacity. In our case, given that each space is logical, it's
  20.234 +  // possible that no regions will be allocated to it, hence to have 0
  20.235 +  // capacity (e.g., if there are no survivor regions, the survivor
  20.236 +  // space has 0 capacity). The way we deal with this is to always pad
  20.237 +  // each capacity value we report to jstat by a very small amount to
  20.238 +  // make sure that it's never zero. Given that we sometimes have to
  20.239 +  // report a capacity of a generation that contains several spaces
  20.240 +  // (e.g., young gen includes one eden, two survivor spaces), the
  20.241 +  // mult parameter is provided in order to adding the appropriate
  20.242 +  // padding multiple times so that the capacities add up correctly.
  20.243 +  static size_t pad_capacity(size_t size_bytes, size_t mult = 1) {
  20.244 +    return size_bytes + MinObjAlignmentInBytes * mult;
  20.245 +  }
  20.246  
  20.247 -  // Performance Counter accessors
  20.248 -  void update_counters();
  20.249 -  void update_eden_counters();
  20.250 +  // Recalculate all the sizes from scratch and update all the jstat
  20.251 +  // counters accordingly.
  20.252 +  void update_sizes();
  20.253 +  // Recalculate only what's necessary when a new eden region is
  20.254 +  // allocated and update any jstat counters that need to be updated.
  20.255 +  void update_eden_size();
  20.256  
  20.257    CollectorCounters* incremental_collection_counters() {
  20.258      return _incremental_collection_counters;
  20.259 @@ -176,8 +211,11 @@
  20.260    CollectorCounters* full_collection_counters() {
  20.261      return _full_collection_counters;
  20.262    }
  20.263 -  GenerationCounters* non_young_collection_counters() {
  20.264 -    return _non_young_collection_counters;
  20.265 +  GenerationCounters* young_collection_counters() {
  20.266 +    return _young_collection_counters;
  20.267 +  }
  20.268 +  GenerationCounters* old_collection_counters() {
  20.269 +    return _old_collection_counters;
  20.270    }
  20.271    HSpaceCounters*      old_space_counters() { return _old_space_counters; }
  20.272    HSpaceCounters*      eden_counters() { return _eden_counters; }
  20.273 @@ -187,17 +225,45 @@
  20.274    // Monitoring support used by
  20.275    //   MemoryService
  20.276    //   jstat counters
  20.277 -  size_t overall_committed();
  20.278 -  size_t overall_used();
  20.279  
  20.280 -  size_t eden_space_committed();
  20.281 -  size_t eden_space_used();
  20.282 +  size_t overall_reserved()           { return _overall_reserved;     }
  20.283 +  size_t overall_committed()          { return _overall_committed;    }
  20.284 +  size_t overall_used()               { return _overall_used;         }
  20.285  
  20.286 -  size_t survivor_space_committed();
  20.287 -  size_t survivor_space_used();
  20.288 +  size_t young_gen_committed()        { return _young_gen_committed;  }
  20.289 +  size_t young_gen_max()              { return overall_reserved();    }
  20.290 +  size_t eden_space_committed()       { return _eden_committed;       }
  20.291 +  size_t eden_space_used()            { return _eden_used;            }
  20.292 +  size_t survivor_space_committed()   { return _survivor_committed;   }
  20.293 +  size_t survivor_space_used()        { return _survivor_used;        }
  20.294  
  20.295 -  size_t old_space_committed();
  20.296 -  size_t old_space_used();
  20.297 +  size_t old_gen_committed()          { return old_space_committed(); }
  20.298 +  size_t old_gen_max()                { return overall_reserved();    }
  20.299 +  size_t old_space_committed()        { return _old_committed;        }
  20.300 +  size_t old_space_used()             { return _old_used;             }
  20.301 +};
  20.302 +
  20.303 +class G1GenerationCounters: public GenerationCounters {
  20.304 +protected:
  20.305 +  G1MonitoringSupport* _g1mm;
  20.306 +
  20.307 +public:
  20.308 +  G1GenerationCounters(G1MonitoringSupport* g1mm,
  20.309 +                       const char* name, int ordinal, int spaces,
  20.310 +                       size_t min_capacity, size_t max_capacity,
  20.311 +                       size_t curr_capacity);
  20.312 +};
  20.313 +
  20.314 +class G1YoungGenerationCounters: public G1GenerationCounters {
  20.315 +public:
  20.316 +  G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
  20.317 +  virtual void update_all();
  20.318 +};
  20.319 +
  20.320 +class G1OldGenerationCounters: public G1GenerationCounters {
  20.321 +public:
  20.322 +  G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
  20.323 +  virtual void update_all();
  20.324  };
  20.325  
  20.326  #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
    21.1 --- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Fri Sep 30 22:54:43 2011 -0700
    21.2 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Oct 06 13:28:09 2011 -0400
    21.3 @@ -34,6 +34,7 @@
    21.4  class CMMarkStack;
    21.5  class G1ParScanThreadState;
    21.6  class CMTask;
    21.7 +class ReferenceProcessor;
    21.8  
    21.9  // A class that scans oops in a given heap region (much as OopsInGenClosure
   21.10  // scans oops in a generation.)
   21.11 @@ -59,8 +60,10 @@
   21.12  
   21.13  class G1ParPushHeapRSClosure : public G1ParClosureSuper {
   21.14  public:
   21.15 -  G1ParPushHeapRSClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
   21.16 +  G1ParPushHeapRSClosure(G1CollectedHeap* g1,
   21.17 +                         G1ParScanThreadState* par_scan_state):
   21.18      G1ParClosureSuper(g1, par_scan_state) { }
   21.19 +
   21.20    template <class T> void do_oop_nv(T* p);
   21.21    virtual void do_oop(oop* p)          { do_oop_nv(p); }
   21.22    virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
   21.23 @@ -68,8 +71,13 @@
   21.24  
   21.25  class G1ParScanClosure : public G1ParClosureSuper {
   21.26  public:
   21.27 -  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
   21.28 -    G1ParClosureSuper(g1, par_scan_state) { }
   21.29 +  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
   21.30 +    G1ParClosureSuper(g1, par_scan_state)
   21.31 +  {
   21.32 +    assert(_ref_processor == NULL, "sanity");
   21.33 +    _ref_processor = rp;
   21.34 +  }
   21.35 +
   21.36    template <class T> void do_oop_nv(T* p);
   21.37    virtual void do_oop(oop* p)          { do_oop_nv(p); }
   21.38    virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
   21.39 @@ -92,9 +100,18 @@
   21.40  
   21.41  class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
   21.42    G1ParScanClosure _scanner;
   21.43 +
   21.44  public:
   21.45 -  G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
   21.46 -    G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
   21.47 +  G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
   21.48 +    G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
   21.49 +  {
   21.50 +    assert(_ref_processor == NULL, "sanity");
   21.51 +  }
   21.52 +
   21.53 +  G1ParScanClosure* scanner() {
   21.54 +    return &_scanner;
   21.55 +  }
   21.56 +
   21.57    template <class T> void do_oop_nv(T* p);
   21.58    virtual void do_oop(oop* p)       { do_oop_nv(p); }
   21.59    virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
   21.60 @@ -105,7 +122,8 @@
   21.61    G1ParScanClosure *_scanner;
   21.62  protected:
   21.63    template <class T> void mark_object(T* p);
   21.64 -  oop copy_to_survivor_space(oop obj, bool should_mark_copy);
   21.65 +  oop copy_to_survivor_space(oop obj, bool should_mark_root,
   21.66 +                                      bool should_mark_copy);
   21.67  public:
   21.68    G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
   21.69                    G1ParScanClosure *scanner) :
   21.70 @@ -116,10 +134,20 @@
   21.71           bool do_mark_object>
   21.72  class G1ParCopyClosure : public G1ParCopyHelper {
   21.73    G1ParScanClosure _scanner;
   21.74 +
   21.75    template <class T> void do_oop_work(T* p);
   21.76 +
   21.77  public:
   21.78 -  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
   21.79 -    _scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
   21.80 +  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
   21.81 +                   ReferenceProcessor* rp) :
   21.82 +    _scanner(g1, par_scan_state, rp),
   21.83 +    G1ParCopyHelper(g1, par_scan_state, &_scanner)
   21.84 +  {
   21.85 +    assert(_ref_processor == NULL, "sanity");
   21.86 +  }
   21.87 +
   21.88 +  G1ParScanClosure* scanner() { return &_scanner; }
   21.89 +
   21.90    template <class T> void do_oop_nv(T* p) {
   21.91      do_oop_work(p);
   21.92    }
   21.93 @@ -129,21 +157,25 @@
   21.94  
   21.95  typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
   21.96  typedef G1ParCopyClosure<true,  G1BarrierNone, false> G1ParScanPermClosure;
   21.97 -typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
   21.98 +
   21.99  typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
  21.100  typedef G1ParCopyClosure<true,  G1BarrierNone, true> G1ParScanAndMarkPermClosure;
  21.101 -typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
  21.102  
  21.103 -// This is the only case when we set skip_cset_test. Basically, this
  21.104 -// closure is (should?) only be called directly while we're draining
  21.105 -// the overflow and task queues. In that case we know that the
  21.106 -// reference in question points into the collection set, otherwise we
  21.107 -// would not have pushed it on the queue. The following is defined in
  21.108 -// g1_specialized_oop_closures.hpp.
  21.109 -// typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
  21.110 -// We need a separate closure to handle references during evacuation
  21.111 -// failure processing, as we cannot asume that the reference already
  21.112 -// points into the collection set (like G1ParScanHeapEvacClosure does).
  21.113 +// The following closure types are no longer used but are retained
  21.114 +// for historical reasons:
  21.115 +// typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
  21.116 +// typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
  21.117 +
  21.118 +// The following closure type is defined in g1_specialized_oop_closures.hpp:
  21.119 +//
  21.120 +// typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
  21.121 +
  21.122 +// We use a separate closure to handle references during evacuation
  21.123 +// failure processing.
  21.124 +// We could have used another instance of G1ParScanHeapEvacClosure
  21.125 +// (since that closure no longer assumes that the references it
  21.126 +// handles point into the collection set).
  21.127 +
  21.128  typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
  21.129  
  21.130  class FilterIntoCSClosure: public OopClosure {
  21.131 @@ -152,9 +184,10 @@
  21.132    DirtyCardToOopClosure* _dcto_cl;
  21.133  public:
  21.134    FilterIntoCSClosure(  DirtyCardToOopClosure* dcto_cl,
  21.135 -                        G1CollectedHeap* g1, OopClosure* oc) :
  21.136 -    _dcto_cl(dcto_cl), _g1(g1), _oc(oc)
  21.137 -  {}
  21.138 +                        G1CollectedHeap* g1,
  21.139 +                        OopClosure* oc) :
  21.140 +    _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
  21.141 +
  21.142    template <class T> void do_oop_nv(T* p);
  21.143    virtual void do_oop(oop* p)        { do_oop_nv(p); }
  21.144    virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
    22.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Sep 30 22:54:43 2011 -0700
    22.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Oct 06 13:28:09 2011 -0400
    22.3 @@ -234,6 +234,7 @@
    22.4    HeapRegion *startRegion = calculateStartRegion(worker_i);
    22.5  
    22.6    ScanRSClosure scanRScl(oc, worker_i);
    22.7 +
    22.8    _g1->collection_set_iterate_from(startRegion, &scanRScl);
    22.9    scanRScl.set_try_claimed();
   22.10    _g1->collection_set_iterate_from(startRegion, &scanRScl);
   22.11 @@ -283,6 +284,7 @@
   22.12    double start = os::elapsedTime();
   22.13    // Apply the given closure to all remaining log entries.
   22.14    RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
   22.15 +
   22.16    _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
   22.17  
   22.18    // Now there should be no dirty cards.
    23.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Sep 30 22:54:43 2011 -0700
    23.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Oct 06 13:28:09 2011 -0400
    23.3 @@ -33,11 +33,11 @@
    23.4  #include "memory/iterator.hpp"
    23.5  #include "oops/oop.inline.hpp"
    23.6  
    23.7 -int HeapRegion::LogOfHRGrainBytes = 0;
    23.8 -int HeapRegion::LogOfHRGrainWords = 0;
    23.9 -int HeapRegion::GrainBytes        = 0;
   23.10 -int HeapRegion::GrainWords        = 0;
   23.11 -int HeapRegion::CardsPerRegion    = 0;
   23.12 +int    HeapRegion::LogOfHRGrainBytes = 0;
   23.13 +int    HeapRegion::LogOfHRGrainWords = 0;
   23.14 +size_t HeapRegion::GrainBytes        = 0;
   23.15 +size_t HeapRegion::GrainWords        = 0;
   23.16 +size_t HeapRegion::CardsPerRegion    = 0;
   23.17  
   23.18  HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
   23.19                                   HeapRegion* hr, OopClosure* cl,
   23.20 @@ -45,7 +45,7 @@
   23.21                                   FilterKind fk) :
   23.22    ContiguousSpaceDCTOC(hr, cl, precision, NULL),
   23.23    _hr(hr), _fk(fk), _g1(g1)
   23.24 -{}
   23.25 +{ }
   23.26  
   23.27  FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
   23.28                                                     OopClosure* oc) :
   23.29 @@ -210,15 +210,17 @@
   23.30                                                HeapWord* top,
   23.31                                                OopClosure* cl) {
   23.32    G1CollectedHeap* g1h = _g1;
   23.33 +  int oop_size;
   23.34 +  OopClosure* cl2 = NULL;
   23.35  
   23.36 -  int oop_size;
   23.37 -
   23.38 -  OopClosure* cl2 = cl;
   23.39    FilterIntoCSClosure intoCSFilt(this, g1h, cl);
   23.40    FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
   23.41 +
   23.42    switch (_fk) {
   23.43 +  case NoFilterKind:          cl2 = cl; break;
   23.44    case IntoCSFilterKind:      cl2 = &intoCSFilt; break;
   23.45    case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
   23.46 +  default:                    ShouldNotReachHere();
   23.47    }
   23.48  
   23.49    // Start filtering what we add to the remembered set. If the object is
   23.50 @@ -239,16 +241,19 @@
   23.51      case NoFilterKind:
   23.52        bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
   23.53        break;
   23.54 +
   23.55      case IntoCSFilterKind: {
   23.56        FilterIntoCSClosure filt(this, g1h, cl);
   23.57        bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
   23.58        break;
   23.59      }
   23.60 +
   23.61      case OutOfRegionFilterKind: {
   23.62        FilterOutOfRegionClosure filt(_hr, cl);
   23.63        bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
   23.64        break;
   23.65      }
   23.66 +
   23.67      default:
   23.68        ShouldNotReachHere();
   23.69      }
   23.70 @@ -317,11 +322,11 @@
   23.71    guarantee(GrainBytes == 0, "we should only set it once");
   23.72    // The cast to int is safe, given that we've bounded region_size by
   23.73    // MIN_REGION_SIZE and MAX_REGION_SIZE.
   23.74 -  GrainBytes = (int) region_size;
   23.75 +  GrainBytes = (size_t)region_size;
   23.76  
   23.77    guarantee(GrainWords == 0, "we should only set it once");
   23.78    GrainWords = GrainBytes >> LogHeapWordSize;
   23.79 -  guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
   23.80 +  guarantee((size_t)(1 << LogOfHRGrainWords) == GrainWords, "sanity");
   23.81  
   23.82    guarantee(CardsPerRegion == 0, "we should only set it once");
   23.83    CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
   23.84 @@ -374,8 +379,7 @@
   23.85  
   23.86  void HeapRegion::par_clear() {
   23.87    assert(used() == 0, "the region should have been already cleared");
   23.88 -  assert(capacity() == (size_t) HeapRegion::GrainBytes,
   23.89 -         "should be back to normal");
   23.90 +  assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
   23.91    HeapRegionRemSet* hrrs = rem_set();
   23.92    hrrs->clear();
   23.93    CardTableModRefBS* ct_bs =
   23.94 @@ -431,7 +435,7 @@
   23.95      assert(end() == _orig_end, "sanity");
   23.96    }
   23.97  
   23.98 -  assert(capacity() == (size_t) HeapRegion::GrainBytes, "pre-condition");
   23.99 +  assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
  23.100    _humongous_type = NotHumongous;
  23.101    _humongous_start_region = NULL;
  23.102  }
  23.103 @@ -483,12 +487,13 @@
  23.104  HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
  23.105             MemRegion mr, bool is_zeroed)
  23.106    : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
  23.107 -    _next_fk(HeapRegionDCTOC::NoFilterKind), _hrs_index(hrs_index),
  23.108 +    _hrs_index(hrs_index),
  23.109      _humongous_type(NotHumongous), _humongous_start_region(NULL),
  23.110      _in_collection_set(false),
  23.111      _next_in_special_set(NULL), _orig_end(NULL),
  23.112      _claimed(InitialClaimValue), _evacuation_failed(false),
  23.113      _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
  23.114 +    _gc_efficiency(0.0),
  23.115      _young_type(NotYoung), _next_young_region(NULL),
  23.116      _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
  23.117  #ifdef ASSERT
    24.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Sep 30 22:54:43 2011 -0700
    24.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Oct 06 13:28:09 2011 -0400
    24.3 @@ -118,7 +118,6 @@
    24.4                    FilterKind fk);
    24.5  };
    24.6  
    24.7 -
    24.8  // The complicating factor is that BlockOffsetTable diverged
    24.9  // significantly, and we need functionality that is only in the G1 version.
   24.10  // So I copied that code, which led to an alternate G1 version of
   24.11 @@ -223,10 +222,6 @@
   24.12      ContinuesHumongous
   24.13    };
   24.14  
   24.15 -  // The next filter kind that should be used for a "new_dcto_cl" call with
   24.16 -  // the "traditional" signature.
   24.17 -  HeapRegionDCTOC::FilterKind _next_fk;
   24.18 -
   24.19    // Requires that the region "mr" be dense with objects, and begin and end
   24.20    // with an object.
   24.21    void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
   24.22 @@ -351,16 +346,17 @@
   24.23               G1BlockOffsetSharedArray* sharedOffsetArray,
   24.24               MemRegion mr, bool is_zeroed);
   24.25  
   24.26 -  static int LogOfHRGrainBytes;
   24.27 -  static int LogOfHRGrainWords;
   24.28 -  // The normal type of these should be size_t. However, they used to
   24.29 -  // be members of an enum before and they are assumed by the
   24.30 -  // compilers to be ints. To avoid going and fixing all their uses,
   24.31 -  // I'm declaring them as ints. I'm not anticipating heap region
   24.32 -  // sizes to reach anywhere near 2g, so using an int here is safe.
   24.33 -  static int GrainBytes;
   24.34 -  static int GrainWords;
   24.35 -  static int CardsPerRegion;
   24.36 +  static int    LogOfHRGrainBytes;
   24.37 +  static int    LogOfHRGrainWords;
   24.38 +
   24.39 +  static size_t GrainBytes;
   24.40 +  static size_t GrainWords;
   24.41 +  static size_t CardsPerRegion;
   24.42 +
   24.43 +  static size_t align_up_to_region_byte_size(size_t sz) {
   24.44 +    return (sz + (size_t) GrainBytes - 1) &
   24.45 +                                      ~((1 << (size_t) LogOfHRGrainBytes) - 1);
   24.46 +  }
   24.47  
   24.48    // It sets up the heap region size (GrainBytes / GrainWords), as
   24.49    // well as other related fields that are based on the heap region
   24.50 @@ -573,40 +569,14 @@
   24.51    // allocated in the current region before the last call to "save_mark".
   24.52    void oop_before_save_marks_iterate(OopClosure* cl);
   24.53  
   24.54 -  // This call determines the "filter kind" argument that will be used for
   24.55 -  // the next call to "new_dcto_cl" on this region with the "traditional"
   24.56 -  // signature (i.e., the call below.)  The default, in the absence of a
   24.57 -  // preceding call to this method, is "NoFilterKind", and a call to this
   24.58 -  // method is necessary for each such call, or else it reverts to the
   24.59 -  // default.
   24.60 -  // (This is really ugly, but all other methods I could think of changed a
   24.61 -  // lot of main-line code for G1.)
   24.62 -  void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
   24.63 -    _next_fk = nfk;
   24.64 -  }
   24.65 -
   24.66    DirtyCardToOopClosure*
   24.67    new_dcto_closure(OopClosure* cl,
   24.68                     CardTableModRefBS::PrecisionStyle precision,
   24.69                     HeapRegionDCTOC::FilterKind fk);
   24.70  
   24.71 -#if WHASSUP
   24.72 -  DirtyCardToOopClosure*
   24.73 -  new_dcto_closure(OopClosure* cl,
   24.74 -                   CardTableModRefBS::PrecisionStyle precision,
   24.75 -                   HeapWord* boundary) {
   24.76 -    assert(boundary == NULL, "This arg doesn't make sense here.");
   24.77 -    DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
   24.78 -    _next_fk = HeapRegionDCTOC::NoFilterKind;
   24.79 -    return res;
   24.80 -  }
   24.81 -#endif
   24.82 -
   24.83 -  //
   24.84    // Note the start or end of marking. This tells the heap region
   24.85    // that the collector is about to start or has finished (concurrently)
   24.86    // marking the heap.
   24.87 -  //
   24.88  
   24.89    // Note the start of a marking phase. Record the
   24.90    // start of the unmarked area of the region here.
    25.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Sep 30 22:54:43 2011 -0700
    25.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Oct 06 13:28:09 2011 -0400
    25.3 @@ -148,7 +148,7 @@
    25.4        CardIdx_t from_card = (CardIdx_t)
    25.5            hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
    25.6  
    25.7 -      assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
    25.8 +      assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
    25.9               "Must be in range.");
   25.10        add_card_work(from_card, par);
   25.11      }
   25.12 @@ -639,7 +639,7 @@
   25.13          uintptr_t(from_hr->bottom())
   25.14            >> CardTableModRefBS::card_shift;
   25.15        CardIdx_t card_index = from_card - from_hr_bot_card_index;
   25.16 -      assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
   25.17 +      assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
   25.18               "Must be in range.");
   25.19        if (G1HRRSUseSparseTable &&
   25.20            _sparse_table.add_card(from_hrs_ind, card_index)) {
   25.21 @@ -1066,7 +1066,7 @@
   25.22        uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
   25.23      assert(from_card >= hr_bot_card_index, "Inv");
   25.24      CardIdx_t card_index = from_card - hr_bot_card_index;
   25.25 -    assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
   25.26 +    assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
   25.27             "Must be in range.");
   25.28      return _sparse_table.contains_card(hr_ind, card_index);
   25.29    }
   25.30 @@ -1191,7 +1191,7 @@
   25.31    _is = Sparse;
   25.32    // Set these values so that we increment to the first region.
   25.33    _coarse_cur_region_index = -1;
   25.34 -  _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
   25.35 +  _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
   25.36  
   25.37    _cur_region_cur_card = 0;
   25.38  
   25.39 @@ -1270,7 +1270,7 @@
   25.40  bool HeapRegionRemSetIterator::fine_has_next() {
   25.41    return
   25.42      _fine_cur_prt != NULL &&
   25.43 -    _cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
   25.44 +    _cur_region_cur_card < HeapRegion::CardsPerRegion;
   25.45  }
   25.46  
   25.47  bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
    26.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Sep 30 22:54:43 2011 -0700
    26.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Oct 06 13:28:09 2011 -0400
    26.3 @@ -395,8 +395,8 @@
    26.4    // Coarse table iteration fields:
    26.5  
    26.6    // Current region index;
    26.7 -  int _coarse_cur_region_index;
    26.8 -  int _coarse_cur_region_cur_card;
    26.9 +  int    _coarse_cur_region_index;
   26.10 +  size_t _coarse_cur_region_cur_card;
   26.11  
   26.12    bool coarse_has_next(size_t& card_index);
   26.13  
    27.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Sep 30 22:54:43 2011 -0700
    27.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Oct 06 13:28:09 2011 -0400
    27.3 @@ -56,6 +56,7 @@
    27.4  // and maintain that: _length <= _allocated_length <= _max_length
    27.5  
    27.6  class HeapRegionSeq: public CHeapObj {
    27.7 +  friend class VMStructs;
    27.8  
    27.9    // The array that holds the HeapRegions.
   27.10    HeapRegion** _regions;
    28.1 --- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Fri Sep 30 22:54:43 2011 -0700
    28.2 +++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Thu Oct 06 13:28:09 2011 -0400
    28.3 @@ -29,6 +29,7 @@
    28.4  #include "memory/sharedHeap.hpp"
    28.5  #include "runtime/mutexLocker.hpp"
    28.6  #include "runtime/thread.hpp"
    28.7 +#include "runtime/vmThread.hpp"
    28.8  
    28.9  // This method removes entries from an SATB buffer that will not be
   28.10  // useful to the concurrent marking threads. An entry is removed if it
   28.11 @@ -252,9 +253,18 @@
   28.12        t->satb_mark_queue().apply_closure(_par_closures[worker]);
   28.13      }
   28.14    }
   28.15 -  // We'll have worker 0 do this one.
   28.16 -  if (worker == 0) {
   28.17 -    shared_satb_queue()->apply_closure(_par_closures[0]);
   28.18 +
   28.19 +  // We also need to claim the VMThread so that its parity is updated
   28.20 +  // otherwise the next call to Thread::possibly_parallel_oops_do inside
   28.21 +  // a StrongRootsScope might skip the VMThread because it has a stale
   28.22 +  // parity that matches the parity set by the StrongRootsScope
   28.23 +  //
   28.24 +  // Whichever worker succeeds in claiming the VMThread gets to do
   28.25 +  // the shared queue.
   28.26 +
   28.27 +  VMThread* vmt = VMThread::vm_thread();
   28.28 +  if (vmt->claim_oops_do(true, parity)) {
   28.29 +    shared_satb_queue()->apply_closure(_par_closures[worker]);
   28.30    }
   28.31  }
   28.32  
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Thu Oct 06 13:28:09 2011 -0400
    29.3 @@ -0,0 +1,65 @@
    29.4 +/*
    29.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    29.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    29.7 + *
    29.8 + * This code is free software; you can redistribute it and/or modify it
    29.9 + * under the terms of the GNU General Public License version 2 only, as
   29.10 + * published by the Free Software Foundation.
   29.11 + *
   29.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   29.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   29.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   29.15 + * version 2 for more details (a copy is included in the LICENSE file that
   29.16 + * accompanied this code).
   29.17 + *
   29.18 + * You should have received a copy of the GNU General Public License version
   29.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   29.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   29.21 + *
   29.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   29.23 + * or visit www.oracle.com if you need additional information or have any
   29.24 + * questions.
   29.25 + *
   29.26 + */
   29.27 +
   29.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
   29.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
   29.30 +
   29.31 +#include "gc_implementation/g1/heapRegion.hpp"
   29.32 +#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
   29.33 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
   29.34 +
   29.35 +#define VM_STRUCTS_G1(nonstatic_field, static_field)                          \
   29.36 +                                                                              \
   29.37 +  static_field(HeapRegion, GrainBytes, size_t)                                \
   29.38 +                                                                              \
   29.39 +  nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
   29.40 +  nonstatic_field(HeapRegionSeq,   _length,  size_t)                          \
   29.41 +                                                                              \
   29.42 +  nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
   29.43 +  nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
   29.44 +  nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
   29.45 +  nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   29.46 +                                                                              \
   29.47 +  nonstatic_field(G1MonitoringSupport, _eden_committed,     size_t)           \
   29.48 +  nonstatic_field(G1MonitoringSupport, _eden_used,          size_t)           \
   29.49 +  nonstatic_field(G1MonitoringSupport, _survivor_committed, size_t)           \
   29.50 +  nonstatic_field(G1MonitoringSupport, _survivor_used,      size_t)           \
   29.51 +  nonstatic_field(G1MonitoringSupport, _old_committed,      size_t)           \
   29.52 +  nonstatic_field(G1MonitoringSupport, _old_used,           size_t)           \
   29.53 +
   29.54 +
   29.55 +#define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
   29.56 +                                                                              \
   29.57 +  declare_type(G1CollectedHeap, SharedHeap)                                   \
   29.58 +                                                                              \
   29.59 +  declare_type(HeapRegion, ContiguousSpace)                                   \
   29.60 +  declare_toplevel_type(HeapRegionSeq)                                        \
   29.61 +  declare_toplevel_type(G1MonitoringSupport)                                  \
   29.62 +                                                                              \
   29.63 +  declare_toplevel_type(G1CollectedHeap*)                                     \
   29.64 +  declare_toplevel_type(HeapRegion*)                                          \
   29.65 +  declare_toplevel_type(G1MonitoringSupport*)                                 \
   29.66 +
   29.67 +
   29.68 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
    30.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Sep 30 22:54:43 2011 -0700
    30.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Oct 06 13:28:09 2011 -0400
    30.3 @@ -198,10 +198,9 @@
    30.4  
    30.5      allocate_stacks();
    30.6  
    30.7 -    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
    30.8      COMPILER2_PRESENT(DerivedPointerTable::clear());
    30.9  
   30.10 -    ref_processor()->enable_discovery();
   30.11 +    ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   30.12      ref_processor()->setup_policy(clear_all_softrefs);
   30.13  
   30.14      mark_sweep_phase1(clear_all_softrefs);
    31.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Sep 30 22:54:43 2011 -0700
    31.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Oct 06 13:28:09 2011 -0400
    31.3 @@ -2069,10 +2069,9 @@
    31.4      CodeCache::gc_prologue();
    31.5      Threads::gc_prologue();
    31.6  
    31.7 -    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
    31.8      COMPILER2_PRESENT(DerivedPointerTable::clear());
    31.9  
   31.10 -    ref_processor()->enable_discovery();
   31.11 +    ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   31.12      ref_processor()->setup_policy(maximum_heap_compaction);
   31.13  
   31.14      bool marked_for_unloading = false;
    32.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	Fri Sep 30 22:54:43 2011 -0700
    32.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	Thu Oct 06 13:28:09 2011 -0400
    32.3 @@ -102,17 +102,15 @@
    32.4    _state = flushed;
    32.5  }
    32.6  
    32.7 -bool PSPromotionLAB::unallocate_object(oop obj) {
    32.8 +bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) {
    32.9    assert(Universe::heap()->is_in(obj), "Object outside heap");
   32.10  
   32.11    if (contains(obj)) {
   32.12 -    HeapWord* object_end = (HeapWord*)obj + obj->size();
   32.13 -    assert(object_end <= top(), "Object crosses promotion LAB boundary");
   32.14 +    HeapWord* object_end = obj + obj_size;
   32.15 +    assert(object_end == top(), "Not matching last allocation");
   32.16  
   32.17 -    if (object_end == top()) {
   32.18 -      set_top((HeapWord*)obj);
   32.19 -      return true;
   32.20 -    }
   32.21 +    set_top(obj);
   32.22 +    return true;
   32.23    }
   32.24  
   32.25    return false;
    33.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Fri Sep 30 22:54:43 2011 -0700
    33.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Thu Oct 06 13:28:09 2011 -0400
    33.3 @@ -73,7 +73,7 @@
    33.4  
    33.5    bool is_flushed()                  { return _state == flushed; }
    33.6  
    33.7 -  bool unallocate_object(oop obj);
    33.8 +  bool unallocate_object(HeapWord* obj, size_t obj_size);
    33.9  
   33.10    // Returns a subregion containing all objects in this space.
   33.11    MemRegion used_region()            { return MemRegion(bottom(), top()); }
    34.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Sep 30 22:54:43 2011 -0700
    34.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Thu Oct 06 13:28:09 2011 -0400
    34.3 @@ -380,10 +380,10 @@
    34.4        // deallocate it, so we have to test.  If the deallocation fails,
    34.5        // overwrite with a filler object.
    34.6        if (new_obj_is_tenured) {
    34.7 -        if (!_old_lab.unallocate_object(new_obj)) {
    34.8 +        if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
    34.9            CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
   34.10          }
   34.11 -      } else if (!_young_lab.unallocate_object(new_obj)) {
   34.12 +      } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
   34.13          CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
   34.14        }
   34.15  
    35.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Sep 30 22:54:43 2011 -0700
    35.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu Oct 06 13:28:09 2011 -0400
    35.3 @@ -350,10 +350,9 @@
    35.4      }
    35.5      save_to_space_top_before_gc();
    35.6  
    35.7 -    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
    35.8      COMPILER2_PRESENT(DerivedPointerTable::clear());
    35.9  
   35.10 -    reference_processor()->enable_discovery();
   35.11 +    reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   35.12      reference_processor()->setup_policy(false);
   35.13  
   35.14      // We track how much was promoted to the next generation for
    36.1 --- a/src/share/vm/gc_implementation/shared/generationCounters.cpp	Fri Sep 30 22:54:43 2011 -0700
    36.2 +++ b/src/share/vm/gc_implementation/shared/generationCounters.cpp	Thu Oct 06 13:28:09 2011 -0400
    36.3 @@ -26,14 +26,10 @@
    36.4  #include "gc_implementation/shared/generationCounters.hpp"
    36.5  #include "memory/resourceArea.hpp"
    36.6  
    36.7 -
    36.8 -GenerationCounters::GenerationCounters(const char* name,
    36.9 -                                       int ordinal, int spaces,
   36.10 -                                       VirtualSpace* v):
   36.11 -                    _virtual_space(v) {
   36.12 -
   36.13 +void GenerationCounters::initialize(const char* name, int ordinal, int spaces,
   36.14 +                                    size_t min_capacity, size_t max_capacity,
   36.15 +                                    size_t curr_capacity) {
   36.16    if (UsePerfData) {
   36.17 -
   36.18      EXCEPTION_MARK;
   36.19      ResourceMark rm;
   36.20  
   36.21 @@ -51,18 +47,37 @@
   36.22  
   36.23      cname = PerfDataManager::counter_name(_name_space, "minCapacity");
   36.24      PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
   36.25 -                                     _virtual_space == NULL ? 0 :
   36.26 -                                     _virtual_space->committed_size(), CHECK);
   36.27 +                                     min_capacity, CHECK);
   36.28  
   36.29      cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
   36.30      PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
   36.31 -                                     _virtual_space == NULL ? 0 :
   36.32 -                                     _virtual_space->reserved_size(), CHECK);
   36.33 +                                     max_capacity, CHECK);
   36.34  
   36.35      cname = PerfDataManager::counter_name(_name_space, "capacity");
   36.36 -    _current_size = PerfDataManager::create_variable(SUN_GC, cname,
   36.37 -                                     PerfData::U_Bytes,
   36.38 -                                     _virtual_space == NULL ? 0 :
   36.39 -                                     _virtual_space->committed_size(), CHECK);
   36.40 +    _current_size =
   36.41 +      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
   36.42 +                                       curr_capacity, CHECK);
   36.43    }
   36.44  }
   36.45 +
   36.46 +GenerationCounters::GenerationCounters(const char* name,
   36.47 +                                       int ordinal, int spaces,
   36.48 +                                       VirtualSpace* v)
   36.49 +  : _virtual_space(v) {
   36.50 +  assert(v != NULL, "don't call this constructor if v == NULL");
   36.51 +  initialize(name, ordinal, spaces,
   36.52 +             v->committed_size(), v->reserved_size(), v->committed_size());
   36.53 +}
   36.54 +
   36.55 +GenerationCounters::GenerationCounters(const char* name,
   36.56 +                                       int ordinal, int spaces,
   36.57 +                                       size_t min_capacity, size_t max_capacity,
   36.58 +                                       size_t curr_capacity)
   36.59 +  : _virtual_space(NULL) {
   36.60 +  initialize(name, ordinal, spaces, min_capacity, max_capacity, curr_capacity);
   36.61 +}
   36.62 +
   36.63 +void GenerationCounters::update_all() {
   36.64 +  assert(_virtual_space != NULL, "otherwise, override this method");
   36.65 +  _current_size->set_value(_virtual_space->committed_size());
   36.66 +}
    37.1 --- a/src/share/vm/gc_implementation/shared/generationCounters.hpp	Fri Sep 30 22:54:43 2011 -0700
    37.2 +++ b/src/share/vm/gc_implementation/shared/generationCounters.hpp	Thu Oct 06 13:28:09 2011 -0400
    37.3 @@ -34,6 +34,11 @@
    37.4  class GenerationCounters: public CHeapObj {
    37.5    friend class VMStructs;
    37.6  
    37.7 +private:
    37.8 +  void initialize(const char* name, int ordinal, int spaces,
    37.9 +                  size_t min_capacity, size_t max_capacity,
   37.10 +                  size_t curr_capacity);
   37.11 +
   37.12   protected:
   37.13    PerfVariable*      _current_size;
   37.14    VirtualSpace*      _virtual_space;
   37.15 @@ -48,11 +53,18 @@
   37.16    char*              _name_space;
   37.17  
   37.18    // This constructor is only meant for use with the PSGenerationCounters
   37.19 -  // constructor.  The need for such an constructor should be eliminated
   37.20 +  // constructor. The need for such an constructor should be eliminated
   37.21    // when VirtualSpace and PSVirtualSpace are unified.
   37.22 -  GenerationCounters() : _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
   37.23 +  GenerationCounters()
   37.24 +             : _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
   37.25 +
   37.26 +  // This constructor is used for subclasses that do not have a space
   37.27 +  // associated with them (e.g, in G1).
   37.28 +  GenerationCounters(const char* name, int ordinal, int spaces,
   37.29 +                     size_t min_capacity, size_t max_capacity,
   37.30 +                     size_t curr_capacity);
   37.31 +
   37.32   public:
   37.33 -
   37.34    GenerationCounters(const char* name, int ordinal, int spaces,
   37.35                       VirtualSpace* v);
   37.36  
   37.37 @@ -60,10 +72,7 @@
   37.38      if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
   37.39    }
   37.40  
   37.41 -  virtual void update_all() {
   37.42 -    _current_size->set_value(_virtual_space == NULL ? 0 :
   37.43 -                             _virtual_space->committed_size());
   37.44 -  }
   37.45 +  virtual void update_all();
   37.46  
   37.47    const char* name_space() const        { return _name_space; }
   37.48  
    38.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Fri Sep 30 22:54:43 2011 -0700
    38.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Thu Oct 06 13:28:09 2011 -0400
    38.3 @@ -599,8 +599,7 @@
    38.4            // atomic wrt other collectors in this configuration, we
    38.5            // are guaranteed to have empty discovered ref lists.
    38.6            if (rp->discovery_is_atomic()) {
    38.7 -            rp->verify_no_references_recorded();
    38.8 -            rp->enable_discovery();
    38.9 +            rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   38.10              rp->setup_policy(do_clear_all_soft_refs);
   38.11            } else {
   38.12              // collect() below will enable discovery as appropriate
    39.1 --- a/src/share/vm/memory/referenceProcessor.cpp	Fri Sep 30 22:54:43 2011 -0700
    39.2 +++ b/src/share/vm/memory/referenceProcessor.cpp	Thu Oct 06 13:28:09 2011 -0400
    39.3 @@ -35,42 +35,8 @@
    39.4  
    39.5  ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
    39.6  ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
    39.7 -const int        subclasses_of_ref                = REF_PHANTOM - REF_OTHER;
    39.8  bool             ReferenceProcessor::_pending_list_uses_discovered_field = false;
    39.9  
   39.10 -// List of discovered references.
   39.11 -class DiscoveredList {
   39.12 -public:
   39.13 -  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
   39.14 -  oop head() const     {
   39.15 -     return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
   39.16 -                                _oop_head;
   39.17 -  }
   39.18 -  HeapWord* adr_head() {
   39.19 -    return UseCompressedOops ? (HeapWord*)&_compressed_head :
   39.20 -                               (HeapWord*)&_oop_head;
   39.21 -  }
   39.22 -  void   set_head(oop o) {
   39.23 -    if (UseCompressedOops) {
   39.24 -      // Must compress the head ptr.
   39.25 -      _compressed_head = oopDesc::encode_heap_oop(o);
   39.26 -    } else {
   39.27 -      _oop_head = o;
   39.28 -    }
   39.29 -  }
   39.30 -  bool   empty() const          { return head() == NULL; }
   39.31 -  size_t length()               { return _len; }
   39.32 -  void   set_length(size_t len) { _len = len;  }
   39.33 -  void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
   39.34 -  void   dec_length(size_t dec) { _len -= dec; }
   39.35 -private:
   39.36 -  // Set value depending on UseCompressedOops. This could be a template class
   39.37 -  // but then we have to fix all the instantiations and declarations that use this class.
   39.38 -  oop       _oop_head;
   39.39 -  narrowOop _compressed_head;
   39.40 -  size_t _len;
   39.41 -};
   39.42 -
   39.43  void referenceProcessor_init() {
   39.44    ReferenceProcessor::init_statics();
   39.45  }
   39.46 @@ -112,7 +78,8 @@
   39.47    _discovery_is_mt     = mt_discovery;
   39.48    _num_q               = MAX2(1, mt_processing_degree);
   39.49    _max_num_q           = MAX2(_num_q, mt_discovery_degree);
   39.50 -  _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
   39.51 +  _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList,
   39.52 +                                          _max_num_q * number_of_subclasses_of_ref());
   39.53    if (_discoveredSoftRefs == NULL) {
   39.54      vm_exit_during_initialization("Could not allocated RefProc Array");
   39.55    }
   39.56 @@ -120,7 +87,7 @@
   39.57    _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
   39.58    _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
   39.59    // Initialized all entries to NULL
   39.60 -  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
   39.61 +  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
   39.62      _discoveredSoftRefs[i].set_head(NULL);
   39.63      _discoveredSoftRefs[i].set_length(0);
   39.64    }
   39.65 @@ -134,19 +101,15 @@
   39.66  #ifndef PRODUCT
   39.67  void ReferenceProcessor::verify_no_references_recorded() {
   39.68    guarantee(!_discovering_refs, "Discovering refs?");
   39.69 -  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
   39.70 -    guarantee(_discoveredSoftRefs[i].empty(),
   39.71 +  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
   39.72 +    guarantee(_discoveredSoftRefs[i].is_empty(),
   39.73                "Found non-empty discovered list");
   39.74    }
   39.75  }
   39.76  #endif
   39.77  
   39.78  void ReferenceProcessor::weak_oops_do(OopClosure* f) {
   39.79 -  // Should this instead be
   39.80 -  // for (int i = 0; i < subclasses_of_ref; i++_ {
   39.81 -  //   for (int j = 0; j < _num_q; j++) {
   39.82 -  //     int index = i * _max_num_q + j;
   39.83 -  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
   39.84 +  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
   39.85      if (UseCompressedOops) {
   39.86        f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
   39.87      } else {
   39.88 @@ -404,7 +367,7 @@
   39.89      // allocated and are indexed into.
   39.90      assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
   39.91      for (int j = 0;
   39.92 -         j < subclasses_of_ref;
   39.93 +         j < ReferenceProcessor::number_of_subclasses_of_ref();
   39.94           j++, index += _n_queues) {
   39.95        _ref_processor.enqueue_discovered_reflist(
   39.96          _refs_lists[index], _pending_list_addr);
   39.97 @@ -424,7 +387,7 @@
   39.98      task_executor->execute(tsk);
   39.99    } else {
  39.100      // Serial code: call the parent class's implementation
  39.101 -    for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
  39.102 +    for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
  39.103        enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
  39.104        _discoveredSoftRefs[i].set_head(NULL);
  39.105        _discoveredSoftRefs[i].set_length(0);
  39.106 @@ -432,119 +395,7 @@
  39.107    }
  39.108  }
  39.109  
  39.110 -// Iterator for the list of discovered references.
  39.111 -class DiscoveredListIterator {
  39.112 -public:
  39.113 -  inline DiscoveredListIterator(DiscoveredList&    refs_list,
  39.114 -                                OopClosure*        keep_alive,
  39.115 -                                BoolObjectClosure* is_alive);
  39.116 -
  39.117 -  // End Of List.
  39.118 -  inline bool has_next() const { return _ref != NULL; }
  39.119 -
  39.120 -  // Get oop to the Reference object.
  39.121 -  inline oop obj() const { return _ref; }
  39.122 -
  39.123 -  // Get oop to the referent object.
  39.124 -  inline oop referent() const { return _referent; }
  39.125 -
  39.126 -  // Returns true if referent is alive.
  39.127 -  inline bool is_referent_alive() const;
  39.128 -
  39.129 -  // Loads data for the current reference.
  39.130 -  // The "allow_null_referent" argument tells us to allow for the possibility
  39.131 -  // of a NULL referent in the discovered Reference object. This typically
  39.132 -  // happens in the case of concurrent collectors that may have done the
  39.133 -  // discovery concurrently, or interleaved, with mutator execution.
  39.134 -  inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
  39.135 -
  39.136 -  // Move to the next discovered reference.
  39.137 -  inline void next();
  39.138 -
  39.139 -  // Remove the current reference from the list
  39.140 -  inline void remove();
  39.141 -
  39.142 -  // Make the Reference object active again.
  39.143 -  inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
  39.144 -
  39.145 -  // Make the referent alive.
  39.146 -  inline void make_referent_alive() {
  39.147 -    if (UseCompressedOops) {
  39.148 -      _keep_alive->do_oop((narrowOop*)_referent_addr);
  39.149 -    } else {
  39.150 -      _keep_alive->do_oop((oop*)_referent_addr);
  39.151 -    }
  39.152 -  }
  39.153 -
  39.154 -  // Update the discovered field.
  39.155 -  inline void update_discovered() {
  39.156 -    // First _prev_next ref actually points into DiscoveredList (gross).
  39.157 -    if (UseCompressedOops) {
  39.158 -      if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
  39.159 -        _keep_alive->do_oop((narrowOop*)_prev_next);
  39.160 -      }
  39.161 -    } else {
  39.162 -      if (!oopDesc::is_null(*(oop*)_prev_next)) {
  39.163 -        _keep_alive->do_oop((oop*)_prev_next);
  39.164 -      }
  39.165 -    }
  39.166 -  }
  39.167 -
  39.168 -  // NULL out referent pointer.
  39.169 -  inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
  39.170 -
  39.171 -  // Statistics
  39.172 -  NOT_PRODUCT(
  39.173 -  inline size_t processed() const { return _processed; }
  39.174 -  inline size_t removed() const   { return _removed; }
  39.175 -  )
  39.176 -
  39.177 -  inline void move_to_next();
  39.178 -
  39.179 -private:
  39.180 -  DiscoveredList&    _refs_list;
  39.181 -  HeapWord*          _prev_next;
  39.182 -  oop                _prev;
  39.183 -  oop                _ref;
  39.184 -  HeapWord*          _discovered_addr;
  39.185 -  oop                _next;
  39.186 -  HeapWord*          _referent_addr;
  39.187 -  oop                _referent;
  39.188 -  OopClosure*        _keep_alive;
  39.189 -  BoolObjectClosure* _is_alive;
  39.190 -  DEBUG_ONLY(
  39.191 -  oop                _first_seen; // cyclic linked list check
  39.192 -  )
  39.193 -  NOT_PRODUCT(
  39.194 -  size_t             _processed;
  39.195 -  size_t             _removed;
  39.196 -  )
  39.197 -};
  39.198 -
  39.199 -inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList&    refs_list,
  39.200 -                                                      OopClosure*        keep_alive,
  39.201 -                                                      BoolObjectClosure* is_alive)
  39.202 -  : _refs_list(refs_list),
  39.203 -    _prev_next(refs_list.adr_head()),
  39.204 -    _prev(NULL),
  39.205 -    _ref(refs_list.head()),
  39.206 -#ifdef ASSERT
  39.207 -    _first_seen(refs_list.head()),
  39.208 -#endif
  39.209 -#ifndef PRODUCT
  39.210 -    _processed(0),
  39.211 -    _removed(0),
  39.212 -#endif
  39.213 -    _next(NULL),
  39.214 -    _keep_alive(keep_alive),
  39.215 -    _is_alive(is_alive)
  39.216 -{ }
  39.217 -
  39.218 -inline bool DiscoveredListIterator::is_referent_alive() const {
  39.219 -  return _is_alive->do_object_b(_referent);
  39.220 -}
  39.221 -
  39.222 -inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
  39.223 +void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
  39.224    _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
  39.225    oop discovered = java_lang_ref_Reference::discovered(_ref);
  39.226    assert(_discovered_addr && discovered->is_oop_or_null(),
  39.227 @@ -560,13 +411,7 @@
  39.228           "bad referent");
  39.229  }
  39.230  
  39.231 -inline void DiscoveredListIterator::next() {
  39.232 -  _prev_next = _discovered_addr;
  39.233 -  _prev = _ref;
  39.234 -  move_to_next();
  39.235 -}
  39.236 -
  39.237 -inline void DiscoveredListIterator::remove() {
  39.238 +void DiscoveredListIterator::remove() {
  39.239    assert(_ref->is_oop(), "Dropping a bad reference");
  39.240    oop_store_raw(_discovered_addr, NULL);
  39.241  
  39.242 @@ -592,15 +437,29 @@
  39.243    _refs_list.dec_length(1);
  39.244  }
  39.245  
  39.246 -inline void DiscoveredListIterator::move_to_next() {
  39.247 -  if (_ref == _next) {
  39.248 -    // End of the list.
  39.249 -    _ref = NULL;
  39.250 +// Make the Reference object active again.
  39.251 +void DiscoveredListIterator::make_active() {
  39.252 +  // For G1 we don't want to use set_next - it
  39.253 +  // will dirty the card for the next field of
  39.254 +  // the reference object and will fail
  39.255 +  // CT verification.
  39.256 +  if (UseG1GC) {
  39.257 +    BarrierSet* bs = oopDesc::bs();
  39.258 +    HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
  39.259 +
  39.260 +    if (UseCompressedOops) {
  39.261 +      bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
  39.262 +    } else {
  39.263 +      bs->write_ref_field_pre((oop*)next_addr, NULL);
  39.264 +    }
  39.265 +    java_lang_ref_Reference::set_next_raw(_ref, NULL);
  39.266    } else {
  39.267 -    _ref = _next;
  39.268 +    java_lang_ref_Reference::set_next(_ref, NULL);
  39.269    }
  39.270 -  assert(_ref != _first_seen, "cyclic ref_list found");
  39.271 -  NOT_PRODUCT(_processed++);
  39.272 +}
  39.273 +
  39.274 +void DiscoveredListIterator::clear_referent() {
  39.275 +  oop_store_raw(_referent_addr, NULL);
  39.276  }
  39.277  
  39.278  // NOTE: process_phase*() are largely similar, and at a high level
  39.279 @@ -786,10 +645,9 @@
  39.280  
  39.281  void ReferenceProcessor::abandon_partial_discovery() {
  39.282    // loop over the lists
  39.283 -  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
  39.284 +  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
  39.285      if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
  39.286 -      gclog_or_tty->print_cr("\nAbandoning %s discovered list",
  39.287 -                             list_name(i));
  39.288 +      gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
  39.289      }
  39.290      abandon_partial_discovered_list(_discoveredSoftRefs[i]);
  39.291    }
  39.292 @@ -858,6 +716,14 @@
  39.293    bool _clear_referent;
  39.294  };
  39.295  
  39.296 +void ReferenceProcessor::set_discovered(oop ref, oop value) {
  39.297 +  if (_discovered_list_needs_barrier) {
  39.298 +    java_lang_ref_Reference::set_discovered(ref, value);
  39.299 +  } else {
  39.300 +    java_lang_ref_Reference::set_discovered_raw(ref, value);
  39.301 +  }
  39.302 +}
  39.303 +
  39.304  // Balances reference queues.
  39.305  // Move entries from all queues[0, 1, ..., _max_num_q-1] to
  39.306  // queues[0, 1, ..., _num_q-1] because only the first _num_q
  39.307 @@ -915,9 +781,9 @@
  39.308          // Add the chain to the to list.
  39.309          if (ref_lists[to_idx].head() == NULL) {
  39.310            // to list is empty. Make a loop at the end.
  39.311 -          java_lang_ref_Reference::set_discovered(move_tail, move_tail);
  39.312 +          set_discovered(move_tail, move_tail);
  39.313          } else {
  39.314 -          java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
  39.315 +          set_discovered(move_tail, ref_lists[to_idx].head());
  39.316          }
  39.317          ref_lists[to_idx].set_head(move_head);
  39.318          ref_lists[to_idx].inc_length(refs_to_move);
  39.319 @@ -1038,11 +904,7 @@
  39.320  
  39.321  void ReferenceProcessor::clean_up_discovered_references() {
  39.322    // loop over the lists
  39.323 -  // Should this instead be
  39.324 -  // for (int i = 0; i < subclasses_of_ref; i++_ {
  39.325 -  //   for (int j = 0; j < _num_q; j++) {
  39.326 -  //     int index = i * _max_num_q + j;
  39.327 -  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
  39.328 +  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
  39.329      if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
  39.330        gclog_or_tty->print_cr(
  39.331          "\nScrubbing %s discovered list of Null referents",
  39.332 @@ -1260,6 +1122,8 @@
  39.333      }
  39.334    }
  39.335  
  39.336 +  ResourceMark rm;      // Needed for tracing.
  39.337 +
  39.338    HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
  39.339    const oop  discovered = java_lang_ref_Reference::discovered(obj);
  39.340    assert(discovered->is_oop_or_null(), "bad discovered field");
  39.341 @@ -1472,7 +1336,9 @@
  39.342  }
  39.343  
  39.344  const char* ReferenceProcessor::list_name(int i) {
  39.345 -   assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");
  39.346 +   assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
  39.347 +          "Out of bounds index");
  39.348 +
  39.349     int j = i / _max_num_q;
  39.350     switch (j) {
  39.351       case 0: return "SoftRef";
  39.352 @@ -1493,7 +1359,7 @@
  39.353  #ifndef PRODUCT
  39.354  void ReferenceProcessor::clear_discovered_references() {
  39.355    guarantee(!_discovering_refs, "Discovering refs?");
  39.356 -  for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
  39.357 +  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
  39.358      clear_discovered_references(_discoveredSoftRefs[i]);
  39.359    }
  39.360  }
    40.1 --- a/src/share/vm/memory/referenceProcessor.hpp	Fri Sep 30 22:54:43 2011 -0700
    40.2 +++ b/src/share/vm/memory/referenceProcessor.hpp	Thu Oct 06 13:28:09 2011 -0400
    40.3 @@ -48,18 +48,175 @@
    40.4  // forward references
    40.5  class ReferencePolicy;
    40.6  class AbstractRefProcTaskExecutor;
    40.7 -class DiscoveredList;
    40.8 +
    40.9 +// List of discovered references.
   40.10 +class DiscoveredList {
   40.11 +public:
   40.12 +  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
   40.13 +  oop head() const     {
   40.14 +     return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
   40.15 +                                _oop_head;
   40.16 +  }
   40.17 +  HeapWord* adr_head() {
   40.18 +    return UseCompressedOops ? (HeapWord*)&_compressed_head :
   40.19 +                               (HeapWord*)&_oop_head;
   40.20 +  }
   40.21 +  void set_head(oop o) {
   40.22 +    if (UseCompressedOops) {
   40.23 +      // Must compress the head ptr.
   40.24 +      _compressed_head = oopDesc::encode_heap_oop(o);
   40.25 +    } else {
   40.26 +      _oop_head = o;
   40.27 +    }
   40.28 +  }
   40.29 +  bool   is_empty() const       { return head() == NULL; }
   40.30 +  size_t length()               { return _len; }
   40.31 +  void   set_length(size_t len) { _len = len;  }
   40.32 +  void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
   40.33 +  void   dec_length(size_t dec) { _len -= dec; }
   40.34 +private:
   40.35 +  // Set value depending on UseCompressedOops. This could be a template class
   40.36 +  // but then we have to fix all the instantiations and declarations that use this class.
   40.37 +  oop       _oop_head;
   40.38 +  narrowOop _compressed_head;
   40.39 +  size_t _len;
   40.40 +};
   40.41 +
   40.42 +// Iterator for the list of discovered references.
   40.43 +class DiscoveredListIterator {
   40.44 +private:
   40.45 +  DiscoveredList&    _refs_list;
   40.46 +  HeapWord*          _prev_next;
   40.47 +  oop                _prev;
   40.48 +  oop                _ref;
   40.49 +  HeapWord*          _discovered_addr;
   40.50 +  oop                _next;
   40.51 +  HeapWord*          _referent_addr;
   40.52 +  oop                _referent;
   40.53 +  OopClosure*        _keep_alive;
   40.54 +  BoolObjectClosure* _is_alive;
   40.55 +
   40.56 +  DEBUG_ONLY(
   40.57 +  oop                _first_seen; // cyclic linked list check
   40.58 +  )
   40.59 +
   40.60 +  NOT_PRODUCT(
   40.61 +  size_t             _processed;
   40.62 +  size_t             _removed;
   40.63 +  )
   40.64 +
   40.65 +public:
   40.66 +  inline DiscoveredListIterator(DiscoveredList&    refs_list,
   40.67 +                                OopClosure*        keep_alive,
   40.68 +                                BoolObjectClosure* is_alive):
   40.69 +    _refs_list(refs_list),
   40.70 +    _prev_next(refs_list.adr_head()),
   40.71 +    _prev(NULL),
   40.72 +    _ref(refs_list.head()),
   40.73 +#ifdef ASSERT
   40.74 +    _first_seen(refs_list.head()),
   40.75 +#endif
   40.76 +#ifndef PRODUCT
   40.77 +    _processed(0),
   40.78 +    _removed(0),
   40.79 +#endif
   40.80 +    _next(NULL),
   40.81 +    _keep_alive(keep_alive),
   40.82 +    _is_alive(is_alive)
   40.83 +{ }
   40.84 +
   40.85 +  // End Of List.
   40.86 +  inline bool has_next() const { return _ref != NULL; }
   40.87 +
   40.88 +  // Get oop to the Reference object.
   40.89 +  inline oop obj() const { return _ref; }
   40.90 +
   40.91 +  // Get oop to the referent object.
   40.92 +  inline oop referent() const { return _referent; }
   40.93 +
   40.94 +  // Returns true if referent is alive.
   40.95 +  inline bool is_referent_alive() const {
   40.96 +    return _is_alive->do_object_b(_referent);
   40.97 +  }
   40.98 +
   40.99 +  // Loads data for the current reference.
  40.100 +  // The "allow_null_referent" argument tells us to allow for the possibility
  40.101 +  // of a NULL referent in the discovered Reference object. This typically
  40.102 +  // happens in the case of concurrent collectors that may have done the
  40.103 +  // discovery concurrently, or interleaved, with mutator execution.
  40.104 +  void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
  40.105 +
  40.106 +  // Move to the next discovered reference.
  40.107 +  inline void next() {
  40.108 +    _prev_next = _discovered_addr;
  40.109 +    _prev = _ref;
  40.110 +    move_to_next();
  40.111 +  }
  40.112 +
  40.113 +  // Remove the current reference from the list
  40.114 +  void remove();
  40.115 +
  40.116 +  // Make the Reference object active again.
  40.117 +  void make_active();
  40.118 +
  40.119 +  // Make the referent alive.
  40.120 +  inline void make_referent_alive() {
  40.121 +    if (UseCompressedOops) {
  40.122 +      _keep_alive->do_oop((narrowOop*)_referent_addr);
  40.123 +    } else {
  40.124 +      _keep_alive->do_oop((oop*)_referent_addr);
  40.125 +    }
  40.126 +  }
  40.127 +
  40.128 +  // Update the discovered field.
  40.129 +  inline void update_discovered() {
  40.130 +    // First _prev_next ref actually points into DiscoveredList (gross).
  40.131 +    if (UseCompressedOops) {
  40.132 +      if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
  40.133 +        _keep_alive->do_oop((narrowOop*)_prev_next);
  40.134 +      }
  40.135 +    } else {
  40.136 +      if (!oopDesc::is_null(*(oop*)_prev_next)) {
  40.137 +        _keep_alive->do_oop((oop*)_prev_next);
  40.138 +      }
  40.139 +    }
  40.140 +  }
  40.141 +
  40.142 +  // NULL out referent pointer.
  40.143 +  void clear_referent();
  40.144 +
  40.145 +  // Statistics
  40.146 +  NOT_PRODUCT(
  40.147 +  inline size_t processed() const { return _processed; }
  40.148 +  inline size_t removed() const   { return _removed; }
  40.149 +  )
  40.150 +
  40.151 +  inline void move_to_next() {
  40.152 +    if (_ref == _next) {
  40.153 +      // End of the list.
  40.154 +      _ref = NULL;
  40.155 +    } else {
  40.156 +      _ref = _next;
  40.157 +    }
  40.158 +    assert(_ref != _first_seen, "cyclic ref_list found");
  40.159 +    NOT_PRODUCT(_processed++);
  40.160 +  }
  40.161 +
  40.162 +};
  40.163  
  40.164  class ReferenceProcessor : public CHeapObj {
  40.165   protected:
  40.166    // Compatibility with pre-4965777 JDK's
  40.167    static bool _pending_list_uses_discovered_field;
  40.168 -  MemRegion   _span; // (right-open) interval of heap
  40.169 -                     // subject to wkref discovery
  40.170 -  bool        _discovering_refs;      // true when discovery enabled
  40.171 -  bool        _discovery_is_atomic;   // if discovery is atomic wrt
  40.172 -                                      // other collectors in configuration
  40.173 -  bool        _discovery_is_mt;       // true if reference discovery is MT.
  40.174 +
  40.175 +  MemRegion   _span;                    // (right-open) interval of heap
  40.176 +                                        // subject to wkref discovery
  40.177 +
  40.178 +  bool        _discovering_refs;        // true when discovery enabled
  40.179 +  bool        _discovery_is_atomic;     // if discovery is atomic wrt
  40.180 +                                        // other collectors in configuration
  40.181 +  bool        _discovery_is_mt;         // true if reference discovery is MT.
  40.182 +
  40.183    // If true, setting "next" field of a discovered refs list requires
  40.184    // write barrier(s).  (Must be true if used in a collector in which
  40.185    // elements of a discovered list may be moved during discovery: for
  40.186 @@ -67,18 +224,19 @@
  40.187    // long-term concurrent marking phase that does weak reference
  40.188    // discovery.)
  40.189    bool        _discovered_list_needs_barrier;
  40.190 -  BarrierSet* _bs;                    // Cached copy of BarrierSet.
  40.191 -  bool        _enqueuing_is_done;     // true if all weak references enqueued
  40.192 -  bool        _processing_is_mt;      // true during phases when
  40.193 -                                      // reference processing is MT.
  40.194 -  int         _next_id;               // round-robin mod _num_q counter in
  40.195 -                                      // support of work distribution
  40.196  
  40.197 -  // For collectors that do not keep GC marking information
  40.198 +  BarrierSet* _bs;                      // Cached copy of BarrierSet.
  40.199 +  bool        _enqueuing_is_done;       // true if all weak references enqueued
  40.200 +  bool        _processing_is_mt;        // true during phases when
  40.201 +                                        // reference processing is MT.
  40.202 +  int         _next_id;                 // round-robin mod _num_q counter in
  40.203 +                                        // support of work distribution
  40.204 +
  40.205 +  // For collectors that do not keep GC liveness information
  40.206    // in the object header, this field holds a closure that
  40.207    // helps the reference processor determine the reachability
  40.208 -  // of an oop (the field is currently initialized to NULL for
  40.209 -  // all collectors but the CMS collector).
  40.210 +  // of an oop. It is currently initialized to NULL for all
  40.211 +  // collectors except for CMS and G1.
  40.212    BoolObjectClosure* _is_alive_non_header;
  40.213  
  40.214    // Soft ref clearing policies
  40.215 @@ -102,10 +260,13 @@
  40.216    DiscoveredList* _discoveredPhantomRefs;
  40.217  
  40.218   public:
  40.219 -  int num_q()                            { return _num_q; }
  40.220 -  int max_num_q()                        { return _max_num_q; }
  40.221 -  void set_active_mt_degree(int v)       { _num_q = v; }
  40.222 -  DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
  40.223 +  static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
  40.224 +
  40.225 +  int num_q()                              { return _num_q; }
  40.226 +  int max_num_q()                          { return _max_num_q; }
  40.227 +  void set_active_mt_degree(int v)         { _num_q = v; }
  40.228 +  DiscoveredList* discovered_soft_refs()   { return _discoveredSoftRefs; }
  40.229 +
  40.230    ReferencePolicy* setup_policy(bool always_clear) {
  40.231      _current_soft_ref_policy = always_clear ?
  40.232        _always_clear_soft_ref_policy : _default_soft_ref_policy;
  40.233 @@ -205,6 +366,11 @@
  40.234    void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
  40.235  
  40.236   protected:
  40.237 +  // Set the 'discovered' field of the given reference to
  40.238 +  // the given value - emitting barriers depending upon
  40.239 +  // the value of _discovered_list_needs_barrier.
  40.240 +  void set_discovered(oop ref, oop value);
  40.241 +
  40.242    // "Preclean" the given discovered reference list
  40.243    // by removing references with strongly reachable referents.
  40.244    // Currently used in support of CMS only.
  40.245 @@ -290,7 +456,19 @@
  40.246    void      set_span(MemRegion span) { _span = span; }
  40.247  
  40.248    // start and stop weak ref discovery
  40.249 -  void enable_discovery()   { _discovering_refs = true;  }
  40.250 +  void enable_discovery(bool verify_disabled, bool check_no_refs) {
  40.251 +#ifdef ASSERT
  40.252 +    // Verify that we're not currently discovering refs
  40.253 +    assert(!verify_disabled || !_discovering_refs, "nested call?");
  40.254 +
  40.255 +    if (check_no_refs) {
  40.256 +      // Verify that the discovered lists are empty
  40.257 +      verify_no_references_recorded();
  40.258 +    }
  40.259 +#endif // ASSERT
  40.260 +    _discovering_refs = true;
  40.261 +  }
  40.262 +
  40.263    void disable_discovery()  { _discovering_refs = false; }
  40.264    bool discovery_enabled()  { return _discovering_refs;  }
  40.265  
  40.266 @@ -365,7 +543,7 @@
  40.267  
  40.268    ~NoRefDiscovery() {
  40.269      if (_was_discovering_refs) {
  40.270 -      _rp->enable_discovery();
  40.271 +      _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
  40.272      }
  40.273    }
  40.274  };
    41.1 --- a/src/share/vm/runtime/thread.cpp	Fri Sep 30 22:54:43 2011 -0700
    41.2 +++ b/src/share/vm/runtime/thread.cpp	Thu Oct 06 13:28:09 2011 -0400
    41.3 @@ -753,8 +753,9 @@
    41.4    jint thread_parity = _oops_do_parity;
    41.5    if (thread_parity != strong_roots_parity) {
    41.6      jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
    41.7 -    if (res == thread_parity) return true;
    41.8 -    else {
    41.9 +    if (res == thread_parity) {
   41.10 +      return true;
   41.11 +    } else {
   41.12        guarantee(res == strong_roots_parity, "Or else what?");
   41.13        assert(SharedHeap::heap()->n_par_threads() > 0,
   41.14               "Should only fail when parallel.");
   41.15 @@ -3909,8 +3910,9 @@
   41.16      }
   41.17    }
   41.18    VMThread* vmt = VMThread::vm_thread();
   41.19 -  if (vmt->claim_oops_do(is_par, cp))
   41.20 +  if (vmt->claim_oops_do(is_par, cp)) {
   41.21      vmt->oops_do(f, cf);
   41.22 +  }
   41.23  }
   41.24  
   41.25  #ifndef SERIALGC
    42.1 --- a/src/share/vm/runtime/vmStructs.cpp	Fri Sep 30 22:54:43 2011 -0700
    42.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Thu Oct 06 13:28:09 2011 -0400
    42.3 @@ -182,6 +182,7 @@
    42.4  #include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
    42.5  #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    42.6  #include "gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp"
    42.7 +#include "gc_implementation/g1/vmStructs_g1.hpp"
    42.8  #endif
    42.9  #ifdef COMPILER2
   42.10  #include "opto/addnode.hpp"
   42.11 @@ -2878,6 +2879,9 @@
   42.12    VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
   42.13                   GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
   42.14                   GENERATE_STATIC_VM_STRUCT_ENTRY)
   42.15 +
   42.16 +  VM_STRUCTS_G1(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
   42.17 +                GENERATE_STATIC_VM_STRUCT_ENTRY)
   42.18  #endif // SERIALGC
   42.19  
   42.20    VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
   42.21 @@ -2921,6 +2925,9 @@
   42.22                 GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
   42.23  
   42.24    VM_TYPES_PARNEW(GENERATE_VM_TYPE_ENTRY)
   42.25 +
   42.26 +  VM_TYPES_G1(GENERATE_VM_TYPE_ENTRY,
   42.27 +              GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
   42.28  #endif // SERIALGC
   42.29  
   42.30    VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY,
   42.31 @@ -3020,6 +3027,9 @@
   42.32    VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
   42.33               CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
   42.34               CHECK_STATIC_VM_STRUCT_ENTRY);
   42.35 +
   42.36 +  VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
   42.37 +                CHECK_STATIC_VM_STRUCT_ENTRY);
   42.38  #endif // SERIALGC
   42.39  
   42.40    VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
   42.41 @@ -3060,6 +3070,9 @@
   42.42                 CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
   42.43  
   42.44    VM_TYPES_PARNEW(CHECK_VM_TYPE_ENTRY)
   42.45 +
   42.46 +  VM_TYPES_G1(CHECK_VM_TYPE_ENTRY,
   42.47 +              CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
   42.48  #endif // SERIALGC
   42.49  
   42.50    VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
   42.51 @@ -3125,6 +3138,8 @@
   42.52    debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT, \
   42.53                              ENSURE_FIELD_TYPE_PRESENT, \
   42.54                              ENSURE_FIELD_TYPE_PRESENT));
   42.55 +  debug_only(VM_STRUCTS_G1(ENSURE_FIELD_TYPE_PRESENT, \
   42.56 +                           ENSURE_FIELD_TYPE_PRESENT));
   42.57  #endif // SERIALGC
   42.58    debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, \
   42.59                              ENSURE_FIELD_TYPE_PRESENT, \
    43.1 --- a/src/share/vm/services/g1MemoryPool.cpp	Fri Sep 30 22:54:43 2011 -0700
    43.2 +++ b/src/share/vm/services/g1MemoryPool.cpp	Thu Oct 06 13:28:09 2011 -0400
    43.3 @@ -32,71 +32,44 @@
    43.4  G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
    43.5                                       const char* name,
    43.6                                       size_t init_size,
    43.7 +                                     size_t max_size,
    43.8                                       bool support_usage_threshold) :
    43.9 -  _g1h(g1h), CollectedMemoryPool(name,
   43.10 -                                   MemoryPool::Heap,
   43.11 -                                   init_size,
   43.12 -                                   undefined_max(),
   43.13 -                                   support_usage_threshold) {
   43.14 +  _g1mm(g1h->g1mm()), CollectedMemoryPool(name,
   43.15 +                                          MemoryPool::Heap,
   43.16 +                                          init_size,
   43.17 +                                          max_size,
   43.18 +                                          support_usage_threshold) {
   43.19    assert(UseG1GC, "sanity");
   43.20  }
   43.21  
   43.22 -// See the comment at the top of g1MemoryPool.hpp
   43.23 -size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
   43.24 -  return MAX2(eden_space_used(g1h), (size_t) HeapRegion::GrainBytes);
   43.25 -}
   43.26 -
   43.27 -// See the comment at the top of g1MemoryPool.hpp
   43.28 -size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
   43.29 -  return g1h->g1mm()->eden_space_used();
   43.30 -}
   43.31 -
   43.32 -// See the comment at the top of g1MemoryPool.hpp
   43.33 -size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
   43.34 -  return g1h->g1mm()->survivor_space_committed();
   43.35 -}
   43.36 -
   43.37 -// See the comment at the top of g1MemoryPool.hpp
   43.38 -size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
   43.39 -  return g1h->g1mm()->survivor_space_used();
   43.40 -}
   43.41 -
   43.42 -// See the comment at the top of g1MemoryPool.hpp
   43.43 -size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
   43.44 -  return g1h->g1mm()->old_space_committed();
   43.45 -}
   43.46 -
   43.47 -// See the comment at the top of g1MemoryPool.hpp
   43.48 -size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
   43.49 -  return g1h->g1mm()->old_space_used();
   43.50 -}
   43.51 -
   43.52  G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
   43.53    G1MemoryPoolSuper(g1h,
   43.54 -                    "G1 Eden",
   43.55 -                    eden_space_committed(g1h), /* init_size */
   43.56 +                    "G1 Eden Space",
   43.57 +                    g1h->g1mm()->eden_space_committed(), /* init_size */
   43.58 +                    _undefined_max,
   43.59                      false /* support_usage_threshold */) { }
   43.60  
   43.61  MemoryUsage G1EdenPool::get_memory_usage() {
   43.62    size_t initial_sz = initial_size();
   43.63    size_t max_sz     = max_size();
   43.64    size_t used       = used_in_bytes();
   43.65 -  size_t committed  = eden_space_committed(_g1h);
   43.66 +  size_t committed  = _g1mm->eden_space_committed();
   43.67  
   43.68    return MemoryUsage(initial_sz, used, committed, max_sz);
   43.69  }
   43.70  
   43.71  G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) :
   43.72    G1MemoryPoolSuper(g1h,
   43.73 -                    "G1 Survivor",
   43.74 -                    survivor_space_committed(g1h), /* init_size */
   43.75 +                    "G1 Survivor Space",
   43.76 +                    g1h->g1mm()->survivor_space_committed(), /* init_size */
   43.77 +                    _undefined_max,
   43.78                      false /* support_usage_threshold */) { }
   43.79  
   43.80  MemoryUsage G1SurvivorPool::get_memory_usage() {
   43.81    size_t initial_sz = initial_size();
   43.82    size_t max_sz     = max_size();
   43.83    size_t used       = used_in_bytes();
   43.84 -  size_t committed  = survivor_space_committed(_g1h);
   43.85 +  size_t committed  = _g1mm->survivor_space_committed();
   43.86  
   43.87    return MemoryUsage(initial_sz, used, committed, max_sz);
   43.88  }
   43.89 @@ -104,14 +77,15 @@
   43.90  G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) :
   43.91    G1MemoryPoolSuper(g1h,
   43.92                      "G1 Old Gen",
   43.93 -                    old_space_committed(g1h), /* init_size */
   43.94 +                    g1h->g1mm()->old_space_committed(), /* init_size */
   43.95 +                    _undefined_max,
   43.96                      true /* support_usage_threshold */) { }
   43.97  
   43.98  MemoryUsage G1OldGenPool::get_memory_usage() {
   43.99    size_t initial_sz = initial_size();
  43.100    size_t max_sz     = max_size();
  43.101    size_t used       = used_in_bytes();
  43.102 -  size_t committed  = old_space_committed(_g1h);
  43.103 +  size_t committed  = _g1mm->old_space_committed();
  43.104  
  43.105    return MemoryUsage(initial_sz, used, committed, max_sz);
  43.106  }
    44.1 --- a/src/share/vm/services/g1MemoryPool.hpp	Fri Sep 30 22:54:43 2011 -0700
    44.2 +++ b/src/share/vm/services/g1MemoryPool.hpp	Thu Oct 06 13:28:09 2011 -0400
    44.3 @@ -26,12 +26,11 @@
    44.4  #define SHARE_VM_SERVICES_G1MEMORYPOOL_HPP
    44.5  
    44.6  #ifndef SERIALGC
    44.7 +#include "gc_implementation/g1/g1MonitoringSupport.hpp"
    44.8  #include "services/memoryPool.hpp"
    44.9  #include "services/memoryUsage.hpp"
   44.10  #endif
   44.11  
   44.12 -class G1CollectedHeap;
   44.13 -
   44.14  // This file contains the three classes that represent the memory
   44.15  // pools of the G1 spaces: G1EdenPool, G1SurvivorPool, and
   44.16  // G1OldGenPool. In G1, unlike our other GCs, we do not have a
   44.17 @@ -50,37 +49,19 @@
   44.18  // on this model.
   44.19  //
   44.20  
   44.21 -
   44.22  // This class is shared by the three G1 memory pool classes
   44.23 -// (G1EdenPool, G1SurvivorPool, G1OldGenPool). Given that the way we
   44.24 -// calculate used / committed bytes for these three pools is related
   44.25 -// (see comment above), we put the calculations in this class so that
   44.26 -// we can easily share them among the subclasses.
   44.27 +// (G1EdenPool, G1SurvivorPool, G1OldGenPool).
   44.28  class G1MemoryPoolSuper : public CollectedMemoryPool {
   44.29  protected:
   44.30 -  G1CollectedHeap* _g1h;
   44.31 +  const static size_t _undefined_max = (size_t) -1;
   44.32 +  G1MonitoringSupport* _g1mm;
   44.33  
   44.34    // Would only be called from subclasses.
   44.35    G1MemoryPoolSuper(G1CollectedHeap* g1h,
   44.36                      const char* name,
   44.37                      size_t init_size,
   44.38 +                    size_t max_size,
   44.39                      bool support_usage_threshold);
   44.40 -
   44.41 -  // The reason why all the code is in static methods is so that it
   44.42 -  // can be safely called from the constructors of the subclasses.
   44.43 -
   44.44 -  static size_t undefined_max() {
   44.45 -    return (size_t) -1;
   44.46 -  }
   44.47 -
   44.48 -  static size_t eden_space_committed(G1CollectedHeap* g1h);
   44.49 -  static size_t eden_space_used(G1CollectedHeap* g1h);
   44.50 -
   44.51 -  static size_t survivor_space_committed(G1CollectedHeap* g1h);
   44.52 -  static size_t survivor_space_used(G1CollectedHeap* g1h);
   44.53 -
   44.54 -  static size_t old_space_committed(G1CollectedHeap* g1h);
   44.55 -  static size_t old_space_used(G1CollectedHeap* g1h);
   44.56  };
   44.57  
   44.58  // Memory pool that represents the G1 eden.
   44.59 @@ -89,10 +70,10 @@
   44.60    G1EdenPool(G1CollectedHeap* g1h);
   44.61  
   44.62    size_t used_in_bytes() {
   44.63 -    return eden_space_used(_g1h);
   44.64 +    return _g1mm->eden_space_used();
   44.65    }
   44.66    size_t max_size() const {
   44.67 -    return undefined_max();
   44.68 +    return _undefined_max;
   44.69    }
   44.70    MemoryUsage get_memory_usage();
   44.71  };
   44.72 @@ -103,10 +84,10 @@
   44.73    G1SurvivorPool(G1CollectedHeap* g1h);
   44.74  
   44.75    size_t used_in_bytes() {
   44.76 -    return survivor_space_used(_g1h);
   44.77 +    return _g1mm->survivor_space_used();
   44.78    }
   44.79    size_t max_size() const {
   44.80 -    return undefined_max();
   44.81 +    return _undefined_max;
   44.82    }
   44.83    MemoryUsage get_memory_usage();
   44.84  };
   44.85 @@ -117,10 +98,10 @@
   44.86    G1OldGenPool(G1CollectedHeap* g1h);
   44.87  
   44.88    size_t used_in_bytes() {
   44.89 -    return old_space_used(_g1h);
   44.90 +    return _g1mm->old_space_used();
   44.91    }
   44.92    size_t max_size() const {
   44.93 -    return undefined_max();
   44.94 +    return _undefined_max;
   44.95    }
   44.96    MemoryUsage get_memory_usage();
   44.97  };
    45.1 --- a/src/share/vm/utilities/quickSort.cpp	Fri Sep 30 22:54:43 2011 -0700
    45.2 +++ b/src/share/vm/utilities/quickSort.cpp	Thu Oct 06 13:28:09 2011 -0400
    45.3 @@ -54,16 +54,18 @@
    45.4    return 1;
    45.5  }
    45.6  
    45.7 -static int test_stdlib_comparator(const void* a, const void* b) {
    45.8 -  int ai = *(int*)a;
    45.9 -  int bi = *(int*)b;
   45.10 -  if (ai == bi) {
   45.11 -    return 0;
   45.12 +extern "C" {
   45.13 +  static int test_stdlib_comparator(const void* a, const void* b) {
   45.14 +    int ai = *(int*)a;
   45.15 +    int bi = *(int*)b;
   45.16 +    if (ai == bi) {
   45.17 +      return 0;
   45.18 +    }
   45.19 +    if (ai < bi) {
   45.20 +      return -1;
   45.21 +    }
   45.22 +    return 1;
   45.23    }
   45.24 -  if (ai < bi) {
   45.25 -    return -1;
   45.26 -  }
   45.27 -  return 1;
   45.28  }
   45.29  
   45.30  void QuickSort::print_array(const char* prefix, int* array, int length) {
   45.31 @@ -92,7 +94,6 @@
   45.32  }
   45.33  
   45.34  bool QuickSort::test_quick_sort() {
   45.35 -#if 0
   45.36    tty->print_cr("test_quick_sort\n");
   45.37    {
   45.38      int* test_array = NULL;
   45.39 @@ -213,7 +214,6 @@
   45.40      delete[] test_array;
   45.41      delete[] expected_array;
   45.42    }
   45.43 -#endif
   45.44    return true;
   45.45  }
   45.46  

mercurial