Merge

Fri, 18 Oct 2013 19:44:40 -0700

author
ccheung
date
Fri, 18 Oct 2013 19:44:40 -0700
changeset 5967
ee99e1a7c5fb
parent 5966
d0453d2fd045
parent 5947
c51cd6af7e61
child 5968
996d1f2f056f

Merge

src/share/vm/interpreter/linkResolver.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metablock.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metablock.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmStructs.cpp file | annotate | diff | comparison | revisions
test/compiler/8013496/Test8013496.sh file | annotate | diff | comparison | revisions
test/gc/7168848/HumongousAlloc.java file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Fri Oct 18 10:37:26 2013 +0000
     1.2 +++ b/.hgtags	Fri Oct 18 19:44:40 2013 -0700
     1.3 @@ -383,3 +383,5 @@
     1.4  58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
     1.5  6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110
     1.6  562a3d356de67670b4172b82aca2d30743449e04 hs25-b53
     1.7 +f6962730bbde82f279a0ae3a1c14bc5e58096c6e jdk8-b111
     1.8 +4a845c7a463844cead9e1e1641d6bcfb8a77f1c7 hs25-b54
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Fri Oct 18 19:44:40 2013 -0700
     2.3 @@ -0,0 +1,119 @@
     2.4 +/*
     2.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
     2.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.7 + *
     2.8 + * This code is free software; you can redistribute it and/or modify it
     2.9 + * under the terms of the GNU General Public License version 2 only, as
    2.10 + * published by the Free Software Foundation.
    2.11 + *
    2.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    2.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    2.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    2.15 + * version 2 for more details (a copy is included in the LICENSE file that
    2.16 + * accompanied this code).
    2.17 + *
    2.18 + * You should have received a copy of the GNU General Public License version
    2.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    2.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    2.21 + *
    2.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    2.23 + * or visit www.oracle.com if you need additional information or have any
    2.24 + * questions.
    2.25 + *
    2.26 + */
    2.27 +
    2.28 +package sun.jvm.hotspot.gc_implementation.g1;
    2.29 +
    2.30 +import java.util.Iterator;
    2.31 +import java.util.Observable;
    2.32 +import java.util.Observer;
    2.33 +
    2.34 +import sun.jvm.hotspot.debugger.Address;
    2.35 +import sun.jvm.hotspot.runtime.VM;
    2.36 +import sun.jvm.hotspot.runtime.VMObject;
    2.37 +import sun.jvm.hotspot.runtime.VMObjectFactory;
    2.38 +import sun.jvm.hotspot.types.AddressField;
    2.39 +import sun.jvm.hotspot.types.CIntegerField;
    2.40 +import sun.jvm.hotspot.types.Type;
    2.41 +import sun.jvm.hotspot.types.TypeDataBase;
    2.42 +
    2.43 +// Mirror class for G1HeapRegionTable. It's essentially an index -> HeapRegion map.
    2.44 +
    2.45 +public class G1HeapRegionTable extends VMObject {
    2.46 +    // HeapRegion** _base;
    2.47 +    static private AddressField baseField;
    2.48 +    // uint _length;
    2.49 +    static private CIntegerField lengthField;
    2.50 +    // HeapRegion** _biased_base
    2.51 +    static private AddressField biasedBaseField;
    2.52 +    // size_t _bias
    2.53 +    static private CIntegerField biasField;
    2.54 +    // uint _shift_by
    2.55 +    static private CIntegerField shiftByField;
    2.56 +
    2.57 +    static {
    2.58 +        VM.registerVMInitializedObserver(new Observer() {
    2.59 +                public void update(Observable o, Object data) {
    2.60 +                    initialize(VM.getVM().getTypeDataBase());
    2.61 +                }
    2.62 +            });
    2.63 +    }
    2.64 +
    2.65 +    static private synchronized void initialize(TypeDataBase db) {
    2.66 +        Type type = db.lookupType("G1HeapRegionTable");
    2.67 +
    2.68 +        baseField = type.getAddressField("_base");
    2.69 +        lengthField = type.getCIntegerField("_length");
    2.70 +        biasedBaseField = type.getAddressField("_biased_base");
    2.71 +        biasField = type.getCIntegerField("_bias");
    2.72 +        shiftByField = type.getCIntegerField("_shift_by");
    2.73 +    }
    2.74 +
    2.75 +    private HeapRegion at(long index) {
    2.76 +        Address arrayAddr = baseField.getValue(addr);
    2.77 +        // Offset of &_base[index]
    2.78 +        long offset = index * VM.getVM().getAddressSize();
    2.79 +        Address regionAddr = arrayAddr.getAddressAt(offset);
    2.80 +        return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
    2.81 +                                                      regionAddr);
    2.82 +    }
    2.83 +
    2.84 +    public long length() {
    2.85 +        return lengthField.getValue(addr);
    2.86 +    }
    2.87 +
    2.88 +    public long bias() {
    2.89 +        return biasField.getValue(addr);
    2.90 +    }
    2.91 +
    2.92 +    public long shiftBy() {
    2.93 +        return shiftByField.getValue(addr);
    2.94 +    }
    2.95 +
    2.96 +    private class HeapRegionIterator implements Iterator<HeapRegion> {
    2.97 +        private long index;
    2.98 +        private long length;
    2.99 +
   2.100 +        @Override
   2.101 +        public boolean hasNext() { return index < length; }
   2.102 +
   2.103 +        @Override
   2.104 +        public HeapRegion next() { return at(index++);    }
   2.105 +
   2.106 +        @Override
   2.107 +        public void remove()     { /* not supported */    }
   2.108 +
   2.109 +        HeapRegionIterator(Address addr) {
   2.110 +            index = 0;
   2.111 +            length = length();
   2.112 +        }
   2.113 +    }
   2.114 +
   2.115 +    public Iterator<HeapRegion> heapRegionIterator() {
   2.116 +        return new HeapRegionIterator(addr);
   2.117 +    }
   2.118 +
   2.119 +    public G1HeapRegionTable(Address addr) {
   2.120 +        super(addr);
   2.121 +    }
   2.122 +}
     3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Fri Oct 18 10:37:26 2013 +0000
     3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Fri Oct 18 19:44:40 2013 -0700
     3.3 @@ -1,5 +1,5 @@
     3.4  /*
     3.5 - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
     3.6 + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
     3.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.8   *
     3.9   * This code is free software; you can redistribute it and/or modify it
    3.10 @@ -37,13 +37,11 @@
    3.11  import sun.jvm.hotspot.types.Type;
    3.12  import sun.jvm.hotspot.types.TypeDataBase;
    3.13  
    3.14 -// Mirror class for HeapRegionSeq. It's essentially an index -> HeapRegion map.
    3.15 +// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable.
    3.16  
    3.17  public class HeapRegionSeq extends VMObject {
    3.18 -    // HeapRegion** _regions;
    3.19 -    static private AddressField regionsField;
    3.20 -    // uint _length;
    3.21 -    static private CIntegerField lengthField;
    3.22 +    // G1HeapRegionTable _regions
    3.23 +    static private long regionsFieldOffset;
    3.24  
    3.25      static {
    3.26          VM.registerVMInitializedObserver(new Observer() {
    3.27 @@ -56,44 +54,21 @@
    3.28      static private synchronized void initialize(TypeDataBase db) {
    3.29          Type type = db.lookupType("HeapRegionSeq");
    3.30  
    3.31 -        regionsField = type.getAddressField("_regions");
    3.32 -        lengthField = type.getCIntegerField("_length");
    3.33 +        regionsFieldOffset = type.getField("_regions").getOffset();
    3.34      }
    3.35  
    3.36 -    private HeapRegion at(long index) {
    3.37 -        Address arrayAddr = regionsField.getValue(addr);
    3.38 -        // Offset of &_region[index]
    3.39 -        long offset = index * VM.getVM().getAddressSize();
    3.40 -        Address regionAddr = arrayAddr.getAddressAt(offset);
    3.41 -        return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
    3.42 -                                                      regionAddr);
    3.43 +    private G1HeapRegionTable regions() {
    3.44 +        Address regionsAddr = addr.addOffsetTo(regionsFieldOffset);
    3.45 +        return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class,
    3.46 +                                                             regionsAddr);
    3.47      }
    3.48  
    3.49      public long length() {
    3.50 -        return lengthField.getValue(addr);
    3.51 -    }
    3.52 -
    3.53 -    private class HeapRegionIterator implements Iterator<HeapRegion> {
    3.54 -        private long index;
    3.55 -        private long length;
    3.56 -
    3.57 -        @Override
    3.58 -        public boolean hasNext() { return index < length; }
    3.59 -
    3.60 -        @Override
    3.61 -        public HeapRegion next() { return at(index++);    }
    3.62 -
    3.63 -        @Override
    3.64 -        public void remove()     { /* not supported */    }
    3.65 -
    3.66 -        HeapRegionIterator(Address addr) {
    3.67 -            index = 0;
    3.68 -            length = length();
    3.69 -        }
    3.70 +        return regions().length();
    3.71      }
    3.72  
    3.73      public Iterator<HeapRegion> heapRegionIterator() {
    3.74 -        return new HeapRegionIterator(addr);
    3.75 +        return regions().heapRegionIterator();
    3.76      }
    3.77  
    3.78      public HeapRegionSeq(Address addr) {
     4.1 --- a/make/hotspot_version	Fri Oct 18 10:37:26 2013 +0000
     4.2 +++ b/make/hotspot_version	Fri Oct 18 19:44:40 2013 -0700
     4.3 @@ -35,7 +35,7 @@
     4.4  
     4.5  HS_MAJOR_VER=25
     4.6  HS_MINOR_VER=0
     4.7 -HS_BUILD_NUMBER=54
     4.8 +HS_BUILD_NUMBER=55
     4.9  
    4.10  JDK_MAJOR_VER=1
    4.11  JDK_MINOR_VER=8
     5.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Oct 18 10:37:26 2013 +0000
     5.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Oct 18 19:44:40 2013 -0700
     5.3 @@ -3100,6 +3100,10 @@
     5.4    }
     5.5  }
     5.6  
     5.7 +void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
     5.8 +  fatal("Type profiling not implemented on this platform");
     5.9 +}
    5.10 +
    5.11  void LIR_Assembler::align_backward_branch_target() {
    5.12    __ align(OptoLoopAlignment);
    5.13  }
     6.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Oct 18 10:37:26 2013 +0000
     6.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Oct 18 19:44:40 2013 -0700
     6.3 @@ -1076,6 +1076,25 @@
     6.4  
     6.5    __ verify_not_null_oop(Oexception);
     6.6  
     6.7 +#ifdef ASSERT
     6.8 +  // check that fields in JavaThread for exception oop and issuing pc are
     6.9 +  // empty before writing to them
    6.10 +  Label oop_empty;
    6.11 +  Register scratch = I7;  // We can use I7 here because it's overwritten later anyway.
    6.12 +  __ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch);
    6.13 +  __ br_null(scratch, false, Assembler::pt, oop_empty);
    6.14 +  __ delayed()->nop();
    6.15 +  __ stop("exception oop already set");
    6.16 +  __ bind(oop_empty);
    6.17 +
    6.18 +  Label pc_empty;
    6.19 +  __ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch);
    6.20 +  __ br_null(scratch, false, Assembler::pt, pc_empty);
    6.21 +  __ delayed()->nop();
    6.22 +  __ stop("exception pc already set");
    6.23 +  __ bind(pc_empty);
    6.24 +#endif
    6.25 +
    6.26    // save the exception and issuing pc in the thread
    6.27    __ st_ptr(Oexception,  G2_thread, in_bytes(JavaThread::exception_oop_offset()));
    6.28    __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
     7.1 --- a/src/cpu/sparc/vm/globals_sparc.hpp	Fri Oct 18 10:37:26 2013 +0000
     7.2 +++ b/src/cpu/sparc/vm/globals_sparc.hpp	Fri Oct 18 19:44:40 2013 -0700
     7.3 @@ -76,6 +76,8 @@
     7.4  // GC Ergo Flags
     7.5  define_pd_global(uintx, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
     7.6  
     7.7 +define_pd_global(uintx, TypeProfileLevel, 0);
     7.8 +
     7.9  #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
    7.10                                                                              \
    7.11    product(intx, UseVIS, 99,                                                 \
     8.1 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Oct 18 10:37:26 2013 +0000
     8.2 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Oct 18 19:44:40 2013 -0700
     8.3 @@ -3581,6 +3581,7 @@
     8.4    // the pending exception will be picked up the interpreter.
     8.5    __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
     8.6    __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
     8.7 +  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
     8.8    __ bind(noException);
     8.9  
    8.10    // deallocate the deoptimization frame taking care to preserve the return values
     9.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Oct 18 10:37:26 2013 +0000
     9.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Oct 18 19:44:40 2013 -0700
     9.3 @@ -3632,6 +3632,161 @@
     9.4    }
     9.5  }
     9.6  
     9.7 +void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
     9.8 +  Register obj = op->obj()->as_register();
     9.9 +  Register tmp = op->tmp()->as_pointer_register();
    9.10 +  Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
    9.11 +  ciKlass* exact_klass = op->exact_klass();
    9.12 +  intptr_t current_klass = op->current_klass();
    9.13 +  bool not_null = op->not_null();
    9.14 +  bool no_conflict = op->no_conflict();
    9.15 +
    9.16 +  Label update, next, none;
    9.17 +
    9.18 +  bool do_null = !not_null;
    9.19 +  bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
    9.20 +  bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
    9.21 +
    9.22 +  assert(do_null || do_update, "why are we here?");
    9.23 +  assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
    9.24 +
    9.25 +  __ verify_oop(obj);
    9.26 +
    9.27 +  if (tmp != obj) {
    9.28 +    __ mov(tmp, obj);
    9.29 +  }
    9.30 +  if (do_null) {
    9.31 +    __ testptr(tmp, tmp);
    9.32 +    __ jccb(Assembler::notZero, update);
    9.33 +    if (!TypeEntries::was_null_seen(current_klass)) {
    9.34 +      __ orptr(mdo_addr, TypeEntries::null_seen);
    9.35 +    }
    9.36 +    if (do_update) {
    9.37 +#ifndef ASSERT
    9.38 +      __ jmpb(next);
    9.39 +    }
    9.40 +#else
    9.41 +      __ jmp(next);
    9.42 +    }
    9.43 +  } else {
    9.44 +    __ testptr(tmp, tmp);
    9.45 +    __ jccb(Assembler::notZero, update);
    9.46 +    __ stop("unexpect null obj");
    9.47 +#endif
    9.48 +  }
    9.49 +
    9.50 +  __ bind(update);
    9.51 +
    9.52 +  if (do_update) {
    9.53 +#ifdef ASSERT
    9.54 +    if (exact_klass != NULL) {
    9.55 +      Label ok;
    9.56 +      __ load_klass(tmp, tmp);
    9.57 +      __ push(tmp);
    9.58 +      __ mov_metadata(tmp, exact_klass->constant_encoding());
    9.59 +      __ cmpptr(tmp, Address(rsp, 0));
    9.60 +      __ jccb(Assembler::equal, ok);
    9.61 +      __ stop("exact klass and actual klass differ");
    9.62 +      __ bind(ok);
    9.63 +      __ pop(tmp);
    9.64 +    }
    9.65 +#endif
    9.66 +    if (!no_conflict) {
    9.67 +      if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
    9.68 +        if (exact_klass != NULL) {
    9.69 +          __ mov_metadata(tmp, exact_klass->constant_encoding());
    9.70 +        } else {
    9.71 +          __ load_klass(tmp, tmp);
    9.72 +        }
    9.73 +
    9.74 +        __ xorptr(tmp, mdo_addr);
    9.75 +        __ testptr(tmp, TypeEntries::type_klass_mask);
    9.76 +        // klass seen before, nothing to do. The unknown bit may have been
    9.77 +        // set already but no need to check.
    9.78 +        __ jccb(Assembler::zero, next);
    9.79 +
    9.80 +        __ testptr(tmp, TypeEntries::type_unknown);
    9.81 +        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
    9.82 +
    9.83 +        if (TypeEntries::is_type_none(current_klass)) {
    9.84 +          __ cmpptr(mdo_addr, 0);
    9.85 +          __ jccb(Assembler::equal, none);
    9.86 +          __ cmpptr(mdo_addr, TypeEntries::null_seen);
    9.87 +          __ jccb(Assembler::equal, none);
    9.88 +          // There is a chance that the checks above (re-reading profiling
    9.89 +          // data from memory) fail if another thread has just set the
    9.90 +          // profiling to this obj's klass
    9.91 +          __ xorptr(tmp, mdo_addr);
    9.92 +          __ testptr(tmp, TypeEntries::type_klass_mask);
    9.93 +          __ jccb(Assembler::zero, next);
    9.94 +        }
    9.95 +      } else {
    9.96 +        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
    9.97 +               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
    9.98 +
    9.99 +        __ movptr(tmp, mdo_addr);
   9.100 +        __ testptr(tmp, TypeEntries::type_unknown);
   9.101 +        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
   9.102 +      }
   9.103 +
   9.104 +      // different than before. Cannot keep accurate profile.
   9.105 +      __ orptr(mdo_addr, TypeEntries::type_unknown);
   9.106 +
   9.107 +      if (TypeEntries::is_type_none(current_klass)) {
   9.108 +        __ jmpb(next);
   9.109 +
   9.110 +        __ bind(none);
   9.111 +        // first time here. Set profile type.
   9.112 +        __ movptr(mdo_addr, tmp);
   9.113 +      }
   9.114 +    } else {
   9.115 +      // There's a single possible klass at this profile point
   9.116 +      assert(exact_klass != NULL, "should be");
   9.117 +      if (TypeEntries::is_type_none(current_klass)) {
   9.118 +        __ mov_metadata(tmp, exact_klass->constant_encoding());
   9.119 +        __ xorptr(tmp, mdo_addr);
   9.120 +        __ testptr(tmp, TypeEntries::type_klass_mask);
   9.121 +#ifdef ASSERT
   9.122 +        __ jcc(Assembler::zero, next);
   9.123 +
   9.124 +        {
   9.125 +          Label ok;
   9.126 +          __ push(tmp);
   9.127 +          __ cmpptr(mdo_addr, 0);
   9.128 +          __ jcc(Assembler::equal, ok);
   9.129 +          __ cmpptr(mdo_addr, TypeEntries::null_seen);
   9.130 +          __ jcc(Assembler::equal, ok);
   9.131 +          // may have been set by another thread
   9.132 +          __ mov_metadata(tmp, exact_klass->constant_encoding());
   9.133 +          __ xorptr(tmp, mdo_addr);
   9.134 +          __ testptr(tmp, TypeEntries::type_mask);
   9.135 +          __ jcc(Assembler::zero, ok);
   9.136 +
   9.137 +          __ stop("unexpected profiling mismatch");
   9.138 +          __ bind(ok);
   9.139 +          __ pop(tmp);
   9.140 +        }
   9.141 +#else
   9.142 +        __ jccb(Assembler::zero, next);
   9.143 +#endif
   9.144 +        // first time here. Set profile type.
   9.145 +        __ movptr(mdo_addr, tmp);
   9.146 +      } else {
   9.147 +        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
   9.148 +               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
   9.149 +
   9.150 +        __ movptr(tmp, mdo_addr);
   9.151 +        __ testptr(tmp, TypeEntries::type_unknown);
   9.152 +        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
   9.153 +
   9.154 +        __ orptr(mdo_addr, TypeEntries::type_unknown);
   9.155 +      }
   9.156 +    }
   9.157 +
   9.158 +    __ bind(next);
   9.159 +  }
   9.160 +}
   9.161 +
   9.162  void LIR_Assembler::emit_delay(LIR_OpDelay*) {
   9.163    Unimplemented();
   9.164  }
    10.1 --- a/src/cpu/x86/vm/globals_x86.hpp	Fri Oct 18 10:37:26 2013 +0000
    10.2 +++ b/src/cpu/x86/vm/globals_x86.hpp	Fri Oct 18 19:44:40 2013 -0700
    10.3 @@ -79,6 +79,8 @@
    10.4  // GC Ergo Flags
    10.5  define_pd_global(uintx, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
    10.6  
    10.7 +define_pd_global(uintx, TypeProfileLevel, 11);
    10.8 +
    10.9  #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
   10.10                                                                              \
   10.11    develop(bool, IEEEPrecision, true,                                        \
    11.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Fri Oct 18 10:37:26 2013 +0000
    11.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Fri Oct 18 19:44:40 2013 -0700
    11.3 @@ -1046,6 +1046,158 @@
    11.4    }
    11.5  }
    11.6  
    11.7 +void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
    11.8 +  Label update, next, none;
    11.9 +
   11.10 +  verify_oop(obj);
   11.11 +
   11.12 +  testptr(obj, obj);
   11.13 +  jccb(Assembler::notZero, update);
   11.14 +  orptr(mdo_addr, TypeEntries::null_seen);
   11.15 +  jmpb(next);
   11.16 +
   11.17 +  bind(update);
   11.18 +  load_klass(obj, obj);
   11.19 +
   11.20 +  xorptr(obj, mdo_addr);
   11.21 +  testptr(obj, TypeEntries::type_klass_mask);
   11.22 +  jccb(Assembler::zero, next); // klass seen before, nothing to
   11.23 +                               // do. The unknown bit may have been
   11.24 +                               // set already but no need to check.
   11.25 +
   11.26 +  testptr(obj, TypeEntries::type_unknown);
   11.27 +  jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
   11.28 +
   11.29 +  cmpptr(mdo_addr, 0);
   11.30 +  jccb(Assembler::equal, none);
   11.31 +  cmpptr(mdo_addr, TypeEntries::null_seen);
   11.32 +  jccb(Assembler::equal, none);
   11.33 +  // There is a chance that the checks above (re-reading profiling
   11.34 +  // data from memory) fail if another thread has just set the
   11.35 +  // profiling to this obj's klass
   11.36 +  xorptr(obj, mdo_addr);
   11.37 +  testptr(obj, TypeEntries::type_klass_mask);
   11.38 +  jccb(Assembler::zero, next);
   11.39 +
   11.40 +  // different than before. Cannot keep accurate profile.
   11.41 +  orptr(mdo_addr, TypeEntries::type_unknown);
   11.42 +  jmpb(next);
   11.43 +
   11.44 +  bind(none);
   11.45 +  // first time here. Set profile type.
   11.46 +  movptr(mdo_addr, obj);
   11.47 +
   11.48 +  bind(next);
   11.49 +}
   11.50 +
   11.51 +void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
   11.52 +  if (!ProfileInterpreter) {
   11.53 +    return;
   11.54 +  }
   11.55 +
   11.56 +  if (MethodData::profile_arguments() || MethodData::profile_return()) {
   11.57 +    Label profile_continue;
   11.58 +
   11.59 +    test_method_data_pointer(mdp, profile_continue);
   11.60 +
   11.61 +    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
   11.62 +
   11.63 +    cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
   11.64 +    jcc(Assembler::notEqual, profile_continue);
   11.65 +
   11.66 +    if (MethodData::profile_arguments()) {
   11.67 +      Label done;
   11.68 +      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
   11.69 +      addptr(mdp, off_to_args);
   11.70 +
   11.71 +      for (int i = 0; i < TypeProfileArgsLimit; i++) {
   11.72 +        if (i > 0 || MethodData::profile_return()) {
   11.73 +          // If return value type is profiled we may have no argument to profile
   11.74 +          movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
   11.75 +          subl(tmp, i*TypeStackSlotEntries::per_arg_count());
   11.76 +          cmpl(tmp, TypeStackSlotEntries::per_arg_count());
   11.77 +          jcc(Assembler::less, done);
   11.78 +        }
   11.79 +        movptr(tmp, Address(callee, Method::const_offset()));
   11.80 +        load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
   11.81 +        // stack offset o (zero based) from the start of the argument
   11.82 +        // list, for n arguments translates into offset n - o - 1 from
   11.83 +        // the end of the argument list
   11.84 +        subl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
   11.85 +        subl(tmp, 1);
   11.86 +        Address arg_addr = argument_address(tmp);
   11.87 +        movptr(tmp, arg_addr);
   11.88 +
   11.89 +        Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
   11.90 +        profile_obj_type(tmp, mdo_arg_addr);
   11.91 +
   11.92 +        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
   11.93 +        addptr(mdp, to_add);
   11.94 +        off_to_args += to_add;
   11.95 +      }
   11.96 +
   11.97 +      if (MethodData::profile_return()) {
   11.98 +        movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
   11.99 +        subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
  11.100 +      }
  11.101 +
  11.102 +      bind(done);
  11.103 +
  11.104 +      if (MethodData::profile_return()) {
  11.105 +        // We're right after the type profile for the last
  11.106 +        // argument. tmp is the number of cell left in the
  11.107 +        // CallTypeData/VirtualCallTypeData to reach its end. Non null
  11.108 +        // if there's a return to profile.
  11.109 +        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
  11.110 +        shll(tmp, exact_log2(DataLayout::cell_size));
  11.111 +        addptr(mdp, tmp);
  11.112 +      }
  11.113 +      movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
  11.114 +    } else {
  11.115 +      assert(MethodData::profile_return(), "either profile call args or call ret");
  11.116 +      update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
  11.117 +    }
  11.118 +
  11.119 +    // mdp points right after the end of the
  11.120 +    // CallTypeData/VirtualCallTypeData, right after the cells for the
  11.121 +    // return value type if there's one
  11.122 +
  11.123 +    bind(profile_continue);
  11.124 +  }
  11.125 +}
  11.126 +
  11.127 +void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
  11.128 +  assert_different_registers(mdp, ret, tmp, rsi);
  11.129 +  if (ProfileInterpreter && MethodData::profile_return()) {
  11.130 +    Label profile_continue, done;
  11.131 +
  11.132 +    test_method_data_pointer(mdp, profile_continue);
  11.133 +
  11.134 +    if (MethodData::profile_return_jsr292_only()) {
  11.135 +      // If we don't profile all invoke bytecodes we must make sure
  11.136 +      // it's a bytecode we indeed profile. We can't go back to the
  11.137 +      // begining of the ProfileData we intend to update to check its
  11.138 +      // type because we're right after it and we don't known its
  11.139 +      // length
  11.140 +      Label do_profile;
  11.141 +      cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
  11.142 +      jcc(Assembler::equal, do_profile);
  11.143 +      cmpb(Address(rsi, 0), Bytecodes::_invokehandle);
  11.144 +      jcc(Assembler::equal, do_profile);
  11.145 +      get_method(tmp);
  11.146 +      cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
  11.147 +      jcc(Assembler::notEqual, profile_continue);
  11.148 +
  11.149 +      bind(do_profile);
  11.150 +    }
  11.151 +
  11.152 +    Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
  11.153 +    mov(tmp, ret);
  11.154 +    profile_obj_type(tmp, mdo_ret_addr);
  11.155 +
  11.156 +    bind(profile_continue);
  11.157 +  }
  11.158 +}
  11.159  
  11.160  void InterpreterMacroAssembler::profile_call(Register mdp) {
  11.161    if (ProfileInterpreter) {
    12.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Fri Oct 18 10:37:26 2013 +0000
    12.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Fri Oct 18 19:44:40 2013 -0700
    12.3 @@ -215,6 +215,9 @@
    12.4  
    12.5    void profile_taken_branch(Register mdp, Register bumped_count);
    12.6    void profile_not_taken_branch(Register mdp);
    12.7 +  void profile_obj_type(Register obj, const Address& mdo_addr);
    12.8 +  void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
    12.9 +  void profile_return_type(Register mdp, Register ret, Register tmp);
   12.10    void profile_call(Register mdp);
   12.11    void profile_final_call(Register mdp);
   12.12    void profile_virtual_call(Register receiver, Register mdp, Register scratch2,
    13.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Oct 18 10:37:26 2013 +0000
    13.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Oct 18 19:44:40 2013 -0700
    13.3 @@ -1067,6 +1067,159 @@
    13.4    }
    13.5  }
    13.6  
    13.7 +void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
    13.8 +  Label update, next, none;
    13.9 +
   13.10 +  verify_oop(obj);
   13.11 +
   13.12 +  testptr(obj, obj);
   13.13 +  jccb(Assembler::notZero, update);
   13.14 +  orptr(mdo_addr, TypeEntries::null_seen);
   13.15 +  jmpb(next);
   13.16 +
   13.17 +  bind(update);
   13.18 +  load_klass(obj, obj);
   13.19 +
   13.20 +  xorptr(obj, mdo_addr);
   13.21 +  testptr(obj, TypeEntries::type_klass_mask);
   13.22 +  jccb(Assembler::zero, next); // klass seen before, nothing to
   13.23 +                               // do. The unknown bit may have been
   13.24 +                               // set already but no need to check.
   13.25 +
   13.26 +  testptr(obj, TypeEntries::type_unknown);
   13.27 +  jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
   13.28 +
   13.29 +  // There is a chance that by the time we do these checks (re-reading
   13.30 +  // profiling data from memory) another thread has set the profling
   13.31 +  // to this obj's klass and we set the profiling as unknow
   13.32 +  // erroneously
   13.33 +  cmpptr(mdo_addr, 0);
   13.34 +  jccb(Assembler::equal, none);
   13.35 +  cmpptr(mdo_addr, TypeEntries::null_seen);
   13.36 +  jccb(Assembler::equal, none);
   13.37 +  // There is a chance that the checks above (re-reading profiling
   13.38 +  // data from memory) fail if another thread has just set the
   13.39 +  // profiling to this obj's klass
   13.40 +  xorptr(obj, mdo_addr);
   13.41 +  testptr(obj, TypeEntries::type_klass_mask);
   13.42 +  jccb(Assembler::zero, next);
   13.43 +
   13.44 +  // different than before. Cannot keep accurate profile.
   13.45 +  orptr(mdo_addr, TypeEntries::type_unknown);
   13.46 +  jmpb(next);
   13.47 +
   13.48 +  bind(none);
   13.49 +  // first time here. Set profile type.
   13.50 +  movptr(mdo_addr, obj);
   13.51 +
   13.52 +  bind(next);
   13.53 +}
   13.54 +
   13.55 +void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
   13.56 +  if (!ProfileInterpreter) {
   13.57 +    return;
   13.58 +  }
   13.59 +
   13.60 +  if (MethodData::profile_arguments() || MethodData::profile_return()) {
   13.61 +    Label profile_continue;
   13.62 +
   13.63 +    test_method_data_pointer(mdp, profile_continue);
   13.64 +
   13.65 +    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
   13.66 +
   13.67 +    cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
   13.68 +    jcc(Assembler::notEqual, profile_continue);
   13.69 +
   13.70 +    if (MethodData::profile_arguments()) {
   13.71 +      Label done;
   13.72 +      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
   13.73 +      addptr(mdp, off_to_args);
   13.74 +
   13.75 +      for (int i = 0; i < TypeProfileArgsLimit; i++) {
   13.76 +        if (i > 0 || MethodData::profile_return()) {
   13.77 +          // If return value type is profiled we may have no argument to profile
   13.78 +          movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
   13.79 +          subl(tmp, i*TypeStackSlotEntries::per_arg_count());
   13.80 +          cmpl(tmp, TypeStackSlotEntries::per_arg_count());
   13.81 +          jcc(Assembler::less, done);
   13.82 +        }
   13.83 +        movptr(tmp, Address(callee, Method::const_offset()));
   13.84 +        load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
   13.85 +        subq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
   13.86 +        subl(tmp, 1);
   13.87 +        Address arg_addr = argument_address(tmp);
   13.88 +        movptr(tmp, arg_addr);
   13.89 +
   13.90 +        Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
   13.91 +        profile_obj_type(tmp, mdo_arg_addr);
   13.92 +
   13.93 +        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
   13.94 +        addptr(mdp, to_add);
   13.95 +        off_to_args += to_add;
   13.96 +      }
   13.97 +
   13.98 +      if (MethodData::profile_return()) {
   13.99 +        movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
  13.100 +        subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
  13.101 +      }
  13.102 +
  13.103 +      bind(done);
  13.104 +
  13.105 +      if (MethodData::profile_return()) {
  13.106 +        // We're right after the type profile for the last
  13.107 +        // argument. tmp is the number of cell left in the
  13.108 +        // CallTypeData/VirtualCallTypeData to reach its end. Non null
  13.109 +        // if there's a return to profile.
  13.110 +        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
  13.111 +        shll(tmp, exact_log2(DataLayout::cell_size));
  13.112 +        addptr(mdp, tmp);
  13.113 +      }
  13.114 +      movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
  13.115 +    } else {
  13.116 +      assert(MethodData::profile_return(), "either profile call args or call ret");
  13.117 +      update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
  13.118 +    }
  13.119 +
  13.120 +    // mdp points right after the end of the
  13.121 +    // CallTypeData/VirtualCallTypeData, right after the cells for the
  13.122 +    // return value type if there's one
  13.123 +
  13.124 +    bind(profile_continue);
  13.125 +  }
  13.126 +}
  13.127 +
  13.128 +void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
  13.129 +  assert_different_registers(mdp, ret, tmp, r13);
  13.130 +  if (ProfileInterpreter && MethodData::profile_return()) {
  13.131 +    Label profile_continue, done;
  13.132 +
  13.133 +    test_method_data_pointer(mdp, profile_continue);
  13.134 +
  13.135 +    if (MethodData::profile_return_jsr292_only()) {
  13.136 +      // If we don't profile all invoke bytecodes we must make sure
  13.137 +      // it's a bytecode we indeed profile. We can't go back to the
  13.138 +      // begining of the ProfileData we intend to update to check its
  13.139 +      // type because we're right after it and we don't known its
  13.140 +      // length
  13.141 +      Label do_profile;
  13.142 +      cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
  13.143 +      jcc(Assembler::equal, do_profile);
  13.144 +      cmpb(Address(r13, 0), Bytecodes::_invokehandle);
  13.145 +      jcc(Assembler::equal, do_profile);
  13.146 +      get_method(tmp);
  13.147 +      cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
  13.148 +      jcc(Assembler::notEqual, profile_continue);
  13.149 +
  13.150 +      bind(do_profile);
  13.151 +    }
  13.152 +
  13.153 +    Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
  13.154 +    mov(tmp, ret);
  13.155 +    profile_obj_type(tmp, mdo_ret_addr);
  13.156 +
  13.157 +    bind(profile_continue);
  13.158 +  }
  13.159 +}
  13.160  
  13.161  void InterpreterMacroAssembler::profile_call(Register mdp) {
  13.162    if (ProfileInterpreter) {
    14.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Fri Oct 18 10:37:26 2013 +0000
    14.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Fri Oct 18 19:44:40 2013 -0700
    14.3 @@ -224,6 +224,9 @@
    14.4  
    14.5    void profile_taken_branch(Register mdp, Register bumped_count);
    14.6    void profile_not_taken_branch(Register mdp);
    14.7 +  void profile_obj_type(Register obj, const Address& mdo_addr);
    14.8 +  void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
    14.9 +  void profile_return_type(Register mdp, Register ret, Register tmp);
   14.10    void profile_call(Register mdp);
   14.11    void profile_final_call(Register mdp);
   14.12    void profile_virtual_call(Register receiver, Register mdp,
    15.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Oct 18 10:37:26 2013 +0000
    15.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Oct 18 19:44:40 2013 -0700
    15.3 @@ -773,6 +773,7 @@
    15.4    void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
    15.5    void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
    15.6    void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
    15.7 +  void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
    15.8  
    15.9    void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
   15.10    void testptr(Register src1, Register src2);
    16.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Oct 18 10:37:26 2013 +0000
    16.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Oct 18 19:44:40 2013 -0700
    16.3 @@ -194,6 +194,12 @@
    16.4    __ restore_bcp();
    16.5    __ restore_locals();
    16.6  
    16.7 +  if (incoming_state == atos) {
    16.8 +    Register mdp = rbx;
    16.9 +    Register tmp = rcx;
   16.10 +    __ profile_return_type(mdp, rax, tmp);
   16.11 +  }
   16.12 +
   16.13    Label L_got_cache, L_giant_index;
   16.14    if (EnableInvokeDynamic) {
   16.15      __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
    17.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Oct 18 10:37:26 2013 +0000
    17.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Oct 18 19:44:40 2013 -0700
    17.3 @@ -177,6 +177,12 @@
    17.4    __ restore_bcp();
    17.5    __ restore_locals();
    17.6  
    17.7 +  if (state == atos) {
    17.8 +    Register mdp = rbx;
    17.9 +    Register tmp = rcx;
   17.10 +    __ profile_return_type(mdp, rax, tmp);
   17.11 +  }
   17.12 +
   17.13    Label L_got_cache, L_giant_index;
   17.14    if (EnableInvokeDynamic) {
   17.15      __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
    18.1 --- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Oct 18 10:37:26 2013 +0000
    18.2 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Oct 18 19:44:40 2013 -0700
    18.3 @@ -2970,6 +2970,7 @@
    18.4  
    18.5    // profile this call
    18.6    __ profile_final_call(rax);
    18.7 +  __ profile_arguments_type(rax, method, rsi, true);
    18.8  
    18.9    __ jump_from_interpreted(method, rax);
   18.10  
   18.11 @@ -2984,6 +2985,7 @@
   18.12  
   18.13    // get target Method* & entry point
   18.14    __ lookup_virtual_method(rax, index, method);
   18.15 +  __ profile_arguments_type(rdx, method, rsi, true);
   18.16    __ jump_from_interpreted(method, rdx);
   18.17  }
   18.18  
   18.19 @@ -3013,6 +3015,7 @@
   18.20    __ null_check(rcx);
   18.21    // do the call
   18.22    __ profile_call(rax);
   18.23 +  __ profile_arguments_type(rax, rbx, rsi, false);
   18.24    __ jump_from_interpreted(rbx, rax);
   18.25  }
   18.26  
   18.27 @@ -3023,6 +3026,7 @@
   18.28    prepare_invoke(byte_no, rbx);  // get f1 Method*
   18.29    // do the call
   18.30    __ profile_call(rax);
   18.31 +  __ profile_arguments_type(rax, rbx, rsi, false);
   18.32    __ jump_from_interpreted(rbx, rax);
   18.33  }
   18.34  
   18.35 @@ -3082,6 +3086,8 @@
   18.36    __ testptr(rbx, rbx);
   18.37    __ jcc(Assembler::zero, no_such_method);
   18.38  
   18.39 +  __ profile_arguments_type(rdx, rbx, rsi, true);
   18.40 +
   18.41    // do the call
   18.42    // rcx: receiver
   18.43    // rbx,: Method*
   18.44 @@ -3138,6 +3144,7 @@
   18.45  
   18.46    // FIXME: profile the LambdaForm also
   18.47    __ profile_final_call(rax);
   18.48 +  __ profile_arguments_type(rdx, rbx_method, rsi, true);
   18.49  
   18.50    __ jump_from_interpreted(rbx_method, rdx);
   18.51  }
   18.52 @@ -3171,6 +3178,7 @@
   18.53    // %%% should make a type profile for any invokedynamic that takes a ref argument
   18.54    // profile this call
   18.55    __ profile_call(rsi);
   18.56 +  __ profile_arguments_type(rdx, rbx, rsi, false);
   18.57  
   18.58    __ verify_oop(rax_callsite);
   18.59  
    19.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Oct 18 10:37:26 2013 +0000
    19.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Oct 18 19:44:40 2013 -0700
    19.3 @@ -3026,6 +3026,7 @@
    19.4  
    19.5    // profile this call
    19.6    __ profile_final_call(rax);
    19.7 +  __ profile_arguments_type(rax, method, r13, true);
    19.8  
    19.9    __ jump_from_interpreted(method, rax);
   19.10  
   19.11 @@ -3040,6 +3041,7 @@
   19.12  
   19.13    // get target Method* & entry point
   19.14    __ lookup_virtual_method(rax, index, method);
   19.15 +  __ profile_arguments_type(rdx, method, r13, true);
   19.16    __ jump_from_interpreted(method, rdx);
   19.17  }
   19.18  
   19.19 @@ -3069,6 +3071,7 @@
   19.20    __ null_check(rcx);
   19.21    // do the call
   19.22    __ profile_call(rax);
   19.23 +  __ profile_arguments_type(rax, rbx, r13, false);
   19.24    __ jump_from_interpreted(rbx, rax);
   19.25  }
   19.26  
   19.27 @@ -3079,6 +3082,7 @@
   19.28    prepare_invoke(byte_no, rbx);  // get f1 Method*
   19.29    // do the call
   19.30    __ profile_call(rax);
   19.31 +  __ profile_arguments_type(rax, rbx, r13, false);
   19.32    __ jump_from_interpreted(rbx, rax);
   19.33  }
   19.34  
   19.35 @@ -3136,6 +3140,8 @@
   19.36    __ testptr(rbx, rbx);
   19.37    __ jcc(Assembler::zero, no_such_method);
   19.38  
   19.39 +  __ profile_arguments_type(rdx, rbx, r13, true);
   19.40 +
   19.41    // do the call
   19.42    // rcx: receiver
   19.43    // rbx,: Method*
   19.44 @@ -3193,6 +3199,7 @@
   19.45  
   19.46    // FIXME: profile the LambdaForm also
   19.47    __ profile_final_call(rax);
   19.48 +  __ profile_arguments_type(rdx, rbx_method, r13, true);
   19.49  
   19.50    __ jump_from_interpreted(rbx_method, rdx);
   19.51  }
   19.52 @@ -3226,6 +3233,7 @@
   19.53    // %%% should make a type profile for any invokedynamic that takes a ref argument
   19.54    // profile this call
   19.55    __ profile_call(r13);
   19.56 +  __ profile_arguments_type(rdx, rbx_method, r13, false);
   19.57  
   19.58    __ verify_oop(rax_callsite);
   19.59  
    20.1 --- a/src/share/vm/c1/c1_Canonicalizer.cpp	Fri Oct 18 10:37:26 2013 +0000
    20.2 +++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Fri Oct 18 19:44:40 2013 -0700
    20.3 @@ -935,6 +935,7 @@
    20.4  void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
    20.5  void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
    20.6  void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
    20.7 +void Canonicalizer::do_ProfileReturnType(ProfileReturnType* x) {}
    20.8  void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
    20.9  void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
   20.10  void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
    21.1 --- a/src/share/vm/c1/c1_Canonicalizer.hpp	Fri Oct 18 10:37:26 2013 +0000
    21.2 +++ b/src/share/vm/c1/c1_Canonicalizer.hpp	Fri Oct 18 19:44:40 2013 -0700
    21.3 @@ -104,6 +104,7 @@
    21.4    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
    21.5    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
    21.6    virtual void do_ProfileCall    (ProfileCall*     x);
    21.7 +  virtual void do_ProfileReturnType (ProfileReturnType*  x);
    21.8    virtual void do_ProfileInvoke  (ProfileInvoke*   x);
    21.9    virtual void do_RuntimeCall    (RuntimeCall*     x);
   21.10    virtual void do_MemBar         (MemBar*          x);
    22.1 --- a/src/share/vm/c1/c1_Compilation.cpp	Fri Oct 18 10:37:26 2013 +0000
    22.2 +++ b/src/share/vm/c1/c1_Compilation.cpp	Fri Oct 18 19:44:40 2013 -0700
    22.3 @@ -601,6 +601,17 @@
    22.4    }
    22.5  }
    22.6  
    22.7 +ciKlass* Compilation::cha_exact_type(ciType* type) {
    22.8 +  if (type != NULL && type->is_loaded() && type->is_instance_klass()) {
    22.9 +    ciInstanceKlass* ik = type->as_instance_klass();
   22.10 +    assert(ik->exact_klass() == NULL, "no cha for final klass");
   22.11 +    if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
   22.12 +      dependency_recorder()->assert_leaf_type(ik);
   22.13 +      return ik;
   22.14 +    }
   22.15 +  }
   22.16 +  return NULL;
   22.17 +}
   22.18  
   22.19  void Compilation::print_timers() {
   22.20    // tty->print_cr("    Native methods         : %6.3f s, Average : %2.3f", CompileBroker::_t_native_compilation.seconds(), CompileBroker::_t_native_compilation.seconds() / CompileBroker::_total_native_compile_count);
    23.1 --- a/src/share/vm/c1/c1_Compilation.hpp	Fri Oct 18 10:37:26 2013 +0000
    23.2 +++ b/src/share/vm/c1/c1_Compilation.hpp	Fri Oct 18 19:44:40 2013 -0700
    23.3 @@ -246,6 +246,8 @@
    23.4        (RangeCheckElimination || UseLoopInvariantCodeMotion) &&
    23.5        method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
    23.6    }
    23.7 +
    23.8 +  ciKlass* cha_exact_type(ciType* type);
    23.9  };
   23.10  
   23.11  
    24.1 --- a/src/share/vm/c1/c1_Compiler.cpp	Fri Oct 18 10:37:26 2013 +0000
    24.2 +++ b/src/share/vm/c1/c1_Compiler.cpp	Fri Oct 18 19:44:40 2013 -0700
    24.3 @@ -42,26 +42,16 @@
    24.4  #include "runtime/interfaceSupport.hpp"
    24.5  #include "runtime/sharedRuntime.hpp"
    24.6  
    24.7 -volatile int Compiler::_runtimes = uninitialized;
    24.8  
    24.9 -Compiler::Compiler() {
   24.10 -}
   24.11 +Compiler::Compiler () {}
   24.12  
   24.13 -
   24.14 -Compiler::~Compiler() {
   24.15 -  Unimplemented();
   24.16 -}
   24.17 -
   24.18 -
   24.19 -void Compiler::initialize_all() {
   24.20 +void Compiler::init_c1_runtime() {
   24.21    BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   24.22    Arena* arena = new (mtCompiler) Arena();
   24.23    Runtime1::initialize(buffer_blob);
   24.24    FrameMap::initialize();
   24.25    // initialize data structures
   24.26    ValueType::initialize(arena);
   24.27 -  // Instruction::initialize();
   24.28 -  // BlockBegin::initialize();
   24.29    GraphBuilder::initialize();
   24.30    // note: to use more than one instance of LinearScan at a time this function call has to
   24.31    //       be moved somewhere outside of this constructor:
   24.32 @@ -70,32 +60,33 @@
   24.33  
   24.34  
   24.35  void Compiler::initialize() {
   24.36 -  if (_runtimes != initialized) {
   24.37 -    initialize_runtimes( initialize_all, &_runtimes);
   24.38 +  // Buffer blob must be allocated per C1 compiler thread at startup
   24.39 +  BufferBlob* buffer_blob = init_buffer_blob();
   24.40 +
   24.41 +  if (should_perform_init()) {
   24.42 +    if (buffer_blob == NULL) {
   24.43 +      // When we come here we are in state 'initializing'; entire C1 compilation
   24.44 +      // can be shut down.
   24.45 +      set_state(failed);
   24.46 +    } else {
   24.47 +      init_c1_runtime();
   24.48 +      set_state(initialized);
   24.49 +    }
   24.50    }
   24.51 -  mark_initialized();
   24.52  }
   24.53  
   24.54 -
   24.55 -BufferBlob* Compiler::get_buffer_blob(ciEnv* env) {
   24.56 +BufferBlob* Compiler::init_buffer_blob() {
   24.57    // Allocate buffer blob once at startup since allocation for each
   24.58    // compilation seems to be too expensive (at least on Intel win32).
   24.59 -  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   24.60 -  if (buffer_blob != NULL) {
   24.61 -    return buffer_blob;
   24.62 -  }
   24.63 +  assert (CompilerThread::current()->get_buffer_blob() == NULL, "Should initialize only once");
   24.64  
   24.65    // setup CodeBuffer.  Preallocate a BufferBlob of size
   24.66    // NMethodSizeLimit plus some extra space for constants.
   24.67    int code_buffer_size = Compilation::desired_max_code_buffer_size() +
   24.68      Compilation::desired_max_constant_size();
   24.69  
   24.70 -  buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
   24.71 -                                   code_buffer_size);
   24.72 -  if (buffer_blob == NULL) {
   24.73 -    CompileBroker::handle_full_code_cache();
   24.74 -    env->record_failure("CodeCache is full");
   24.75 -  } else {
   24.76 +  BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size);
   24.77 +  if (buffer_blob != NULL) {
   24.78      CompilerThread::current()->set_buffer_blob(buffer_blob);
   24.79    }
   24.80  
   24.81 @@ -104,15 +95,8 @@
   24.82  
   24.83  
   24.84  void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
   24.85 -  BufferBlob* buffer_blob = Compiler::get_buffer_blob(env);
   24.86 -  if (buffer_blob == NULL) {
   24.87 -    return;
   24.88 -  }
   24.89 -
   24.90 -  if (!is_initialized()) {
   24.91 -    initialize();
   24.92 -  }
   24.93 -
   24.94 +  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   24.95 +  assert(buffer_blob != NULL, "Must exist");
   24.96    // invoke compilation
   24.97    {
   24.98      // We are nested here because we need for the destructor
    25.1 --- a/src/share/vm/c1/c1_Compiler.hpp	Fri Oct 18 10:37:26 2013 +0000
    25.2 +++ b/src/share/vm/c1/c1_Compiler.hpp	Fri Oct 18 19:44:40 2013 -0700
    25.3 @@ -30,11 +30,9 @@
    25.4  // There is one instance of the Compiler per CompilerThread.
    25.5  
    25.6  class Compiler: public AbstractCompiler {
    25.7 -
    25.8   private:
    25.9 -
   25.10 - // Tracks whether runtime has been initialized
   25.11 - static volatile int _runtimes;
   25.12 +  static void init_c1_runtime();
   25.13 +  BufferBlob* init_buffer_blob();
   25.14  
   25.15   public:
   25.16    // Creation
   25.17 @@ -46,19 +44,12 @@
   25.18  
   25.19    virtual bool is_c1()                           { return true; };
   25.20  
   25.21 -  BufferBlob* get_buffer_blob(ciEnv* env);
   25.22 -
   25.23    // Missing feature tests
   25.24    virtual bool supports_native()                 { return true; }
   25.25    virtual bool supports_osr   ()                 { return true; }
   25.26  
   25.27 -  // Customization
   25.28 -  virtual bool needs_adapters         ()         { return false; }
   25.29 -  virtual bool needs_stubs            ()         { return false; }
   25.30 -
   25.31    // Initialization
   25.32    virtual void initialize();
   25.33 -  static  void initialize_all();
   25.34  
   25.35    // Compilation entry point for methods
   25.36    virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
    26.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Oct 18 10:37:26 2013 +0000
    26.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Oct 18 19:44:40 2013 -0700
    26.3 @@ -1466,9 +1466,22 @@
    26.4      // State at end of inlined method is the state of the caller
    26.5      // without the method parameters on stack, including the
    26.6      // return value, if any, of the inlined method on operand stack.
    26.7 +    int invoke_bci = state()->caller_state()->bci();
    26.8      set_state(state()->caller_state()->copy_for_parsing());
    26.9      if (x != NULL) {
   26.10        state()->push(x->type(), x);
   26.11 +      if (profile_calls() && MethodData::profile_return() && x->type()->is_object_kind()) {
   26.12 +        ciMethod* caller = state()->scope()->method();
   26.13 +        ciMethodData* md = caller->method_data_or_null();
   26.14 +        ciProfileData* data = md->bci_to_data(invoke_bci);
   26.15 +        if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
   26.16 +          bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return();
   26.17 +          // May not be true in case of an inlined call through a method handle intrinsic.
   26.18 +          if (has_return) {
   26.19 +            profile_return_type(x, method(), caller, invoke_bci);
   26.20 +          }
   26.21 +        }
   26.22 +      }
   26.23      }
   26.24      Goto* goto_callee = new Goto(continuation(), false);
   26.25  
   26.26 @@ -1658,6 +1671,42 @@
   26.27    return compilation()->dependency_recorder();
   26.28  }
   26.29  
   26.30 +// How many arguments do we want to profile?
   26.31 +Values* GraphBuilder::args_list_for_profiling(int& start, bool may_have_receiver) {
   26.32 +  int n = 0;
   26.33 +  assert(start == 0, "should be initialized");
   26.34 +  if (MethodData::profile_arguments()) {
   26.35 +    ciProfileData* data = method()->method_data()->bci_to_data(bci());
   26.36 +    if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
   26.37 +      n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
   26.38 +      bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
   26.39 +      start = has_receiver ? 1 : 0;
   26.40 +    }
   26.41 +  }
   26.42 +  if (n > 0) {
   26.43 +    return new Values(n);
   26.44 +  }
   26.45 +  return NULL;
   26.46 +}
   26.47 +
   26.48 +// Collect arguments that we want to profile in a list
   26.49 +Values* GraphBuilder::collect_args_for_profiling(Values* args, bool may_have_receiver) {
   26.50 +  int start = 0;
   26.51 +  Values* obj_args = args_list_for_profiling(start, may_have_receiver);
   26.52 +  if (obj_args == NULL) {
   26.53 +    return NULL;
   26.54 +  }
   26.55 +  int s = obj_args->size();
   26.56 +  for (int i = start, j = 0; j < s; i++) {
   26.57 +    if (args->at(i)->type()->is_object_kind()) {
   26.58 +      obj_args->push(args->at(i));
   26.59 +      j++;
   26.60 +    }
   26.61 +  }
   26.62 +  assert(s == obj_args->length(), "missed on arg?");
   26.63 +  return obj_args;
   26.64 +}
   26.65 +
   26.66  
   26.67  void GraphBuilder::invoke(Bytecodes::Code code) {
   26.68    bool will_link;
   26.69 @@ -1957,7 +2006,7 @@
   26.70        } else if (exact_target != NULL) {
   26.71          target_klass = exact_target->holder();
   26.72        }
   26.73 -      profile_call(target, recv, target_klass);
   26.74 +      profile_call(target, recv, target_klass, collect_args_for_profiling(args, false), false);
   26.75      }
   26.76    }
   26.77  
   26.78 @@ -1972,6 +2021,9 @@
   26.79        push(result_type, result);
   26.80      }
   26.81    }
   26.82 +  if (profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
   26.83 +    profile_return_type(result, target);
   26.84 +  }
   26.85  }
   26.86  
   26.87  
   26.88 @@ -3509,7 +3561,7 @@
   26.89            recv = args->at(0);
   26.90            null_check(recv);
   26.91          }
   26.92 -        profile_call(callee, recv, NULL);
   26.93 +        profile_call(callee, recv, NULL, collect_args_for_profiling(args, true), true);
   26.94        }
   26.95      }
   26.96    }
   26.97 @@ -3520,6 +3572,10 @@
   26.98    Value value = append_split(result);
   26.99    if (result_type != voidType) push(result_type, value);
  26.100  
  26.101 +  if (callee != method() && profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
  26.102 +    profile_return_type(result, callee);
  26.103 +  }
  26.104 +
  26.105    // done
  26.106    return true;
  26.107  }
  26.108 @@ -3763,7 +3819,28 @@
  26.109      compilation()->set_would_profile(true);
  26.110  
  26.111      if (profile_calls()) {
  26.112 -      profile_call(callee, recv, holder_known ? callee->holder() : NULL);
  26.113 +      int start = 0;
  26.114 +      Values* obj_args = args_list_for_profiling(start, has_receiver);
  26.115 +      if (obj_args != NULL) {
  26.116 +        int s = obj_args->size();
  26.117 +        // if called through method handle invoke, some arguments may have been popped
  26.118 +        for (int i = args_base+start, j = 0; j < obj_args->size() && i < state()->stack_size(); ) {
  26.119 +          Value v = state()->stack_at_inc(i);
  26.120 +          if (v->type()->is_object_kind()) {
  26.121 +            obj_args->push(v);
  26.122 +            j++;
  26.123 +          }
  26.124 +        }
  26.125 +#ifdef ASSERT
  26.126 +        {
  26.127 +          bool ignored_will_link;
  26.128 +          ciSignature* declared_signature = NULL;
  26.129 +          ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
  26.130 +          assert(s == obj_args->length() || real_target->is_method_handle_intrinsic(), "missed on arg?");
  26.131 +        }
  26.132 +#endif
  26.133 +      }
  26.134 +      profile_call(callee, recv, holder_known ? callee->holder() : NULL, obj_args, true);
  26.135      }
  26.136    }
  26.137  
  26.138 @@ -4251,8 +4328,23 @@
  26.139  }
  26.140  #endif // PRODUCT
  26.141  
  26.142 -void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder) {
  26.143 -  append(new ProfileCall(method(), bci(), callee, recv, known_holder));
  26.144 +void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
  26.145 +  append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
  26.146 +}
  26.147 +
  26.148 +void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) {
  26.149 +  assert((m == NULL) == (invoke_bci < 0), "invalid method and invalid bci together");
  26.150 +  if (m == NULL) {
  26.151 +    m = method();
  26.152 +  }
  26.153 +  if (invoke_bci < 0) {
  26.154 +    invoke_bci = bci();
  26.155 +  }
  26.156 +  ciMethodData* md = m->method_data_or_null();
  26.157 +  ciProfileData* data = md->bci_to_data(invoke_bci);
  26.158 +  if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
  26.159 +    append(new ProfileReturnType(m , invoke_bci, callee, ret));
  26.160 +  }
  26.161  }
  26.162  
  26.163  void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
    27.1 --- a/src/share/vm/c1/c1_GraphBuilder.hpp	Fri Oct 18 10:37:26 2013 +0000
    27.2 +++ b/src/share/vm/c1/c1_GraphBuilder.hpp	Fri Oct 18 19:44:40 2013 -0700
    27.3 @@ -374,7 +374,8 @@
    27.4  
    27.5    void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true);
    27.6  
    27.7 -  void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder);
    27.8 +  void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined);
    27.9 +  void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = NULL, int bci = -1);
   27.10    void profile_invocation(ciMethod* inlinee, ValueStack* state);
   27.11  
   27.12    // Shortcuts to profiling control.
   27.13 @@ -386,6 +387,9 @@
   27.14    bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
   27.15    bool profile_checkcasts()    { return _compilation->profile_checkcasts();    }
   27.16  
   27.17 +  Values* args_list_for_profiling(int& start, bool may_have_receiver);
   27.18 +  Values* collect_args_for_profiling(Values* args, bool may_have_receiver);
   27.19 +
   27.20   public:
   27.21    NOT_PRODUCT(void print_stats();)
   27.22  
    28.1 --- a/src/share/vm/c1/c1_Instruction.cpp	Fri Oct 18 10:37:26 2013 +0000
    28.2 +++ b/src/share/vm/c1/c1_Instruction.cpp	Fri Oct 18 19:44:40 2013 -0700
    28.3 @@ -104,6 +104,14 @@
    28.4    }
    28.5  }
    28.6  
    28.7 +ciType* Instruction::exact_type() const {
    28.8 +  ciType* t =  declared_type();
    28.9 +  if (t != NULL && t->is_klass()) {
   28.10 +    return t->as_klass()->exact_klass();
   28.11 +  }
   28.12 +  return NULL;
   28.13 +}
   28.14 +
   28.15  
   28.16  #ifndef PRODUCT
   28.17  void Instruction::check_state(ValueStack* state) {
   28.18 @@ -135,9 +143,7 @@
   28.19  
   28.20  // perform constant and interval tests on index value
   28.21  bool AccessIndexed::compute_needs_range_check() {
   28.22 -
   28.23    if (length()) {
   28.24 -
   28.25      Constant* clength = length()->as_Constant();
   28.26      Constant* cindex = index()->as_Constant();
   28.27      if (clength && cindex) {
   28.28 @@ -157,34 +163,8 @@
   28.29  }
   28.30  
   28.31  
   28.32 -ciType* Local::exact_type() const {
   28.33 -  ciType* type = declared_type();
   28.34 -
   28.35 -  // for primitive arrays, the declared type is the exact type
   28.36 -  if (type->is_type_array_klass()) {
   28.37 -    return type;
   28.38 -  } else if (type->is_instance_klass()) {
   28.39 -    ciInstanceKlass* ik = (ciInstanceKlass*)type;
   28.40 -    if (ik->is_loaded() && ik->is_final() && !ik->is_interface()) {
   28.41 -      return type;
   28.42 -    }
   28.43 -  } else if (type->is_obj_array_klass()) {
   28.44 -    ciObjArrayKlass* oak = (ciObjArrayKlass*)type;
   28.45 -    ciType* base = oak->base_element_type();
   28.46 -    if (base->is_instance_klass()) {
   28.47 -      ciInstanceKlass* ik = base->as_instance_klass();
   28.48 -      if (ik->is_loaded() && ik->is_final()) {
   28.49 -        return type;
   28.50 -      }
   28.51 -    } else if (base->is_primitive_type()) {
   28.52 -      return type;
   28.53 -    }
   28.54 -  }
   28.55 -  return NULL;
   28.56 -}
   28.57 -
   28.58  ciType* Constant::exact_type() const {
   28.59 -  if (type()->is_object()) {
   28.60 +  if (type()->is_object() && type()->as_ObjectType()->is_loaded()) {
   28.61      return type()->as_ObjectType()->exact_type();
   28.62    }
   28.63    return NULL;
   28.64 @@ -192,19 +172,18 @@
   28.65  
   28.66  ciType* LoadIndexed::exact_type() const {
   28.67    ciType* array_type = array()->exact_type();
   28.68 -  if (array_type == NULL) {
   28.69 -    return NULL;
   28.70 -  }
   28.71 -  assert(array_type->is_array_klass(), "what else?");
   28.72 -  ciArrayKlass* ak = (ciArrayKlass*)array_type;
   28.73 +  if (array_type != NULL) {
   28.74 +    assert(array_type->is_array_klass(), "what else?");
   28.75 +    ciArrayKlass* ak = (ciArrayKlass*)array_type;
   28.76  
   28.77 -  if (ak->element_type()->is_instance_klass()) {
   28.78 -    ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
   28.79 -    if (ik->is_loaded() && ik->is_final()) {
   28.80 -      return ik;
   28.81 +    if (ak->element_type()->is_instance_klass()) {
   28.82 +      ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
   28.83 +      if (ik->is_loaded() && ik->is_final()) {
   28.84 +        return ik;
   28.85 +      }
   28.86      }
   28.87    }
   28.88 -  return NULL;
   28.89 +  return Instruction::exact_type();
   28.90  }
   28.91  
   28.92  
   28.93 @@ -224,22 +203,6 @@
   28.94  }
   28.95  
   28.96  
   28.97 -ciType* LoadField::exact_type() const {
   28.98 -  ciType* type = declared_type();
   28.99 -  // for primitive arrays, the declared type is the exact type
  28.100 -  if (type->is_type_array_klass()) {
  28.101 -    return type;
  28.102 -  }
  28.103 -  if (type->is_instance_klass()) {
  28.104 -    ciInstanceKlass* ik = (ciInstanceKlass*)type;
  28.105 -    if (ik->is_loaded() && ik->is_final()) {
  28.106 -      return type;
  28.107 -    }
  28.108 -  }
  28.109 -  return NULL;
  28.110 -}
  28.111 -
  28.112 -
  28.113  ciType* NewTypeArray::exact_type() const {
  28.114    return ciTypeArrayKlass::make(elt_type());
  28.115  }
  28.116 @@ -264,16 +227,6 @@
  28.117    return klass();
  28.118  }
  28.119  
  28.120 -ciType* CheckCast::exact_type() const {
  28.121 -  if (klass()->is_instance_klass()) {
  28.122 -    ciInstanceKlass* ik = (ciInstanceKlass*)klass();
  28.123 -    if (ik->is_loaded() && ik->is_final()) {
  28.124 -      return ik;
  28.125 -    }
  28.126 -  }
  28.127 -  return NULL;
  28.128 -}
  28.129 -
  28.130  // Implementation of ArithmeticOp
  28.131  
  28.132  bool ArithmeticOp::is_commutative() const {
    29.1 --- a/src/share/vm/c1/c1_Instruction.hpp	Fri Oct 18 10:37:26 2013 +0000
    29.2 +++ b/src/share/vm/c1/c1_Instruction.hpp	Fri Oct 18 19:44:40 2013 -0700
    29.3 @@ -107,6 +107,7 @@
    29.4  class         UnsafePrefetchRead;
    29.5  class         UnsafePrefetchWrite;
    29.6  class   ProfileCall;
    29.7 +class   ProfileReturnType;
    29.8  class   ProfileInvoke;
    29.9  class   RuntimeCall;
   29.10  class   MemBar;
   29.11 @@ -211,6 +212,7 @@
   29.12    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) = 0;
   29.13    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
   29.14    virtual void do_ProfileCall    (ProfileCall*     x) = 0;
   29.15 +  virtual void do_ProfileReturnType (ProfileReturnType*  x) = 0;
   29.16    virtual void do_ProfileInvoke  (ProfileInvoke*   x) = 0;
   29.17    virtual void do_RuntimeCall    (RuntimeCall*     x) = 0;
   29.18    virtual void do_MemBar         (MemBar*          x) = 0;
   29.19 @@ -322,6 +324,36 @@
   29.20      _type = type;
   29.21    }
   29.22  
   29.23 +  // Helper class to keep track of which arguments need a null check
   29.24 +  class ArgsNonNullState {
   29.25 +  private:
   29.26 +    int _nonnull_state; // mask identifying which args are nonnull
   29.27 +  public:
   29.28 +    ArgsNonNullState()
   29.29 +      : _nonnull_state(AllBits) {}
   29.30 +
   29.31 +    // Does argument number i needs a null check?
   29.32 +    bool arg_needs_null_check(int i) const {
   29.33 +      // No data is kept for arguments starting at position 33 so
   29.34 +      // conservatively assume that they need a null check.
   29.35 +      if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
   29.36 +        return is_set_nth_bit(_nonnull_state, i);
   29.37 +      }
   29.38 +      return true;
   29.39 +    }
   29.40 +
   29.41 +    // Set whether argument number i needs a null check or not
   29.42 +    void set_arg_needs_null_check(int i, bool check) {
   29.43 +      if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
   29.44 +        if (check) {
   29.45 +          _nonnull_state |= nth_bit(i);
   29.46 +        } else {
   29.47 +          _nonnull_state &= ~(nth_bit(i));
   29.48 +        }
   29.49 +      }
   29.50 +    }
   29.51 +  };
   29.52 +
   29.53   public:
   29.54    void* operator new(size_t size) throw() {
   29.55      Compilation* c = Compilation::current();
   29.56 @@ -566,7 +598,7 @@
   29.57    virtual void other_values_do(ValueVisitor* f)   { /* usually no other - override on demand */ }
   29.58            void       values_do(ValueVisitor* f)   { input_values_do(f); state_values_do(f); other_values_do(f); }
   29.59  
   29.60 -  virtual ciType* exact_type() const             { return NULL; }
   29.61 +  virtual ciType* exact_type() const;
   29.62    virtual ciType* declared_type() const          { return NULL; }
   29.63  
   29.64    // hashing
   29.65 @@ -689,7 +721,6 @@
   29.66    int java_index() const                         { return _java_index; }
   29.67  
   29.68    virtual ciType* declared_type() const          { return _declared_type; }
   29.69 -  virtual ciType* exact_type() const;
   29.70  
   29.71    // generic
   29.72    virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
   29.73 @@ -806,7 +837,6 @@
   29.74    {}
   29.75  
   29.76    ciType* declared_type() const;
   29.77 -  ciType* exact_type() const;
   29.78  
   29.79    // generic
   29.80    HASHING2(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset())  // cannot be eliminated if needs patching or if volatile
   29.81 @@ -1299,6 +1329,7 @@
   29.82  
   29.83    virtual bool needs_exception_state() const     { return false; }
   29.84  
   29.85 +  ciType* exact_type() const                     { return NULL; }
   29.86    ciType* declared_type() const;
   29.87  
   29.88    // generic
   29.89 @@ -1422,7 +1453,6 @@
   29.90    }
   29.91  
   29.92    ciType* declared_type() const;
   29.93 -  ciType* exact_type() const;
   29.94  };
   29.95  
   29.96  
   29.97 @@ -1490,7 +1520,7 @@
   29.98    vmIntrinsics::ID _id;
   29.99    Values*          _args;
  29.100    Value            _recv;
  29.101 -  int              _nonnull_state; // mask identifying which args are nonnull
  29.102 +  ArgsNonNullState _nonnull_state;
  29.103  
  29.104   public:
  29.105    // preserves_state can be set to true for Intrinsics
  29.106 @@ -1511,7 +1541,6 @@
  29.107    , _id(id)
  29.108    , _args(args)
  29.109    , _recv(NULL)
  29.110 -  , _nonnull_state(AllBits)
  29.111    {
  29.112      assert(args != NULL, "args must exist");
  29.113      ASSERT_VALUES
  29.114 @@ -1537,21 +1566,12 @@
  29.115    Value receiver() const                         { assert(has_receiver(), "must have receiver"); return _recv; }
  29.116    bool preserves_state() const                   { return check_flag(PreservesStateFlag); }
  29.117  
  29.118 -  bool arg_needs_null_check(int i) {
  29.119 -    if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
  29.120 -      return is_set_nth_bit(_nonnull_state, i);
  29.121 -    }
  29.122 -    return true;
  29.123 +  bool arg_needs_null_check(int i) const {
  29.124 +    return _nonnull_state.arg_needs_null_check(i);
  29.125    }
  29.126  
  29.127    void set_arg_needs_null_check(int i, bool check) {
  29.128 -    if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
  29.129 -      if (check) {
  29.130 -        _nonnull_state |= nth_bit(i);
  29.131 -      } else {
  29.132 -        _nonnull_state &= ~(nth_bit(i));
  29.133 -      }
  29.134 -    }
  29.135 +    _nonnull_state.set_arg_needs_null_check(i, check);
  29.136    }
  29.137  
  29.138    // generic
  29.139 @@ -2450,34 +2470,87 @@
  29.140  
  29.141  LEAF(ProfileCall, Instruction)
  29.142   private:
  29.143 -  ciMethod* _method;
  29.144 -  int       _bci_of_invoke;
  29.145 -  ciMethod* _callee;         // the method that is called at the given bci
  29.146 -  Value     _recv;
  29.147 -  ciKlass*  _known_holder;
  29.148 +  ciMethod*        _method;
  29.149 +  int              _bci_of_invoke;
  29.150 +  ciMethod*        _callee;         // the method that is called at the given bci
  29.151 +  Value            _recv;
  29.152 +  ciKlass*         _known_holder;
  29.153 +  Values*          _obj_args;       // arguments for type profiling
  29.154 +  ArgsNonNullState _nonnull_state;  // Do we know whether some arguments are never null?
  29.155 +  bool             _inlined;        // Are we profiling a call that is inlined
  29.156  
  29.157   public:
  29.158 -  ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder)
  29.159 +  ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined)
  29.160      : Instruction(voidType)
  29.161      , _method(method)
  29.162      , _bci_of_invoke(bci)
  29.163      , _callee(callee)
  29.164      , _recv(recv)
  29.165      , _known_holder(known_holder)
  29.166 +    , _obj_args(obj_args)
  29.167 +    , _inlined(inlined)
  29.168    {
  29.169      // The ProfileCall has side-effects and must occur precisely where located
  29.170      pin();
  29.171    }
  29.172  
  29.173 -  ciMethod* method()      { return _method; }
  29.174 -  int bci_of_invoke()     { return _bci_of_invoke; }
  29.175 -  ciMethod* callee()      { return _callee; }
  29.176 -  Value recv()            { return _recv; }
  29.177 -  ciKlass* known_holder() { return _known_holder; }
  29.178 -
  29.179 -  virtual void input_values_do(ValueVisitor* f)   { if (_recv != NULL) f->visit(&_recv); }
  29.180 +  ciMethod* method()             const { return _method; }
  29.181 +  int bci_of_invoke()            const { return _bci_of_invoke; }
  29.182 +  ciMethod* callee()             const { return _callee; }
  29.183 +  Value recv()                   const { return _recv; }
  29.184 +  ciKlass* known_holder()        const { return _known_holder; }
  29.185 +  int nb_profiled_args()         const { return _obj_args == NULL ? 0 : _obj_args->length(); }
  29.186 +  Value profiled_arg_at(int i)   const { return _obj_args->at(i); }
  29.187 +  bool arg_needs_null_check(int i) const {
  29.188 +    return _nonnull_state.arg_needs_null_check(i);
  29.189 +  }
  29.190 +  bool inlined()                 const { return _inlined; }
  29.191 +
  29.192 +  void set_arg_needs_null_check(int i, bool check) {
  29.193 +    _nonnull_state.set_arg_needs_null_check(i, check);
  29.194 +  }
  29.195 +
  29.196 +  virtual void input_values_do(ValueVisitor* f)   {
  29.197 +    if (_recv != NULL) {
  29.198 +      f->visit(&_recv);
  29.199 +    }
  29.200 +    for (int i = 0; i < nb_profiled_args(); i++) {
  29.201 +      f->visit(_obj_args->adr_at(i));
  29.202 +    }
  29.203 +  }
  29.204  };
  29.205  
  29.206 +LEAF(ProfileReturnType, Instruction)
  29.207 + private:
  29.208 +  ciMethod*        _method;
  29.209 +  ciMethod*        _callee;
  29.210 +  int              _bci_of_invoke;
  29.211 +  Value            _ret;
  29.212 +
  29.213 + public:
  29.214 +  ProfileReturnType(ciMethod* method, int bci, ciMethod* callee, Value ret)
  29.215 +    : Instruction(voidType)
  29.216 +    , _method(method)
  29.217 +    , _callee(callee)
  29.218 +    , _bci_of_invoke(bci)
  29.219 +    , _ret(ret)
  29.220 +  {
  29.221 +    set_needs_null_check(true);
  29.222 +    // The ProfileType has side-effects and must occur precisely where located
  29.223 +    pin();
  29.224 +  }
  29.225 +
  29.226 +  ciMethod* method()             const { return _method; }
  29.227 +  ciMethod* callee()             const { return _callee; }
  29.228 +  int bci_of_invoke()            const { return _bci_of_invoke; }
  29.229 +  Value ret()                    const { return _ret; }
  29.230 +
  29.231 +  virtual void input_values_do(ValueVisitor* f)   {
  29.232 +    if (_ret != NULL) {
  29.233 +      f->visit(&_ret);
  29.234 +    }
  29.235 +  }
  29.236 +};
  29.237  
  29.238  // Call some C runtime function that doesn't safepoint,
  29.239  // optionally passing the current thread as the first argument.
    30.1 --- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Fri Oct 18 10:37:26 2013 +0000
    30.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Fri Oct 18 19:44:40 2013 -0700
    30.3 @@ -892,10 +892,24 @@
    30.4    if (x->known_holder() != NULL) {
    30.5      output()->print(", ");
    30.6      print_klass(x->known_holder());
    30.7 +    output()->print(" ");
    30.8 +  }
    30.9 +  for (int i = 0; i < x->nb_profiled_args(); i++) {
   30.10 +    if (i > 0) output()->print(", ");
   30.11 +    print_value(x->profiled_arg_at(i));
   30.12 +    if (x->arg_needs_null_check(i)) {
   30.13 +      output()->print(" [NC]");
   30.14 +    }
   30.15    }
   30.16    output()->put(')');
   30.17  }
   30.18  
   30.19 +void InstructionPrinter::do_ProfileReturnType(ProfileReturnType* x) {
   30.20 +  output()->print("profile ret type ");
   30.21 +  print_value(x->ret());
   30.22 +  output()->print(" %s.%s", x->method()->holder()->name()->as_utf8(), x->method()->name()->as_utf8());
   30.23 +  output()->put(')');
   30.24 +}
   30.25  void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
   30.26    output()->print("profile_invoke ");
   30.27    output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
    31.1 --- a/src/share/vm/c1/c1_InstructionPrinter.hpp	Fri Oct 18 10:37:26 2013 +0000
    31.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.hpp	Fri Oct 18 19:44:40 2013 -0700
    31.3 @@ -132,6 +132,7 @@
    31.4    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
    31.5    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
    31.6    virtual void do_ProfileCall    (ProfileCall*     x);
    31.7 +  virtual void do_ProfileReturnType (ProfileReturnType*  x);
    31.8    virtual void do_ProfileInvoke  (ProfileInvoke*   x);
    31.9    virtual void do_RuntimeCall    (RuntimeCall*     x);
   31.10    virtual void do_MemBar         (MemBar*          x);
    32.1 --- a/src/share/vm/c1/c1_LIR.cpp	Fri Oct 18 10:37:26 2013 +0000
    32.2 +++ b/src/share/vm/c1/c1_LIR.cpp	Fri Oct 18 19:44:40 2013 -0700
    32.3 @@ -1001,6 +1001,17 @@
    32.4        assert(opProfileCall->_tmp1->is_valid(), "used");  do_temp(opProfileCall->_tmp1);
    32.5        break;
    32.6      }
    32.7 +
    32.8 +// LIR_OpProfileType:
    32.9 +    case lir_profile_type: {
   32.10 +      assert(op->as_OpProfileType() != NULL, "must be");
   32.11 +      LIR_OpProfileType* opProfileType = (LIR_OpProfileType*)op;
   32.12 +
   32.13 +      do_input(opProfileType->_mdp); do_temp(opProfileType->_mdp);
   32.14 +      do_input(opProfileType->_obj);
   32.15 +      do_temp(opProfileType->_tmp);
   32.16 +      break;
   32.17 +    }
   32.18    default:
   32.19      ShouldNotReachHere();
   32.20    }
   32.21 @@ -1151,6 +1162,10 @@
   32.22    masm->emit_profile_call(this);
   32.23  }
   32.24  
   32.25 +void LIR_OpProfileType::emit_code(LIR_Assembler* masm) {
   32.26 +  masm->emit_profile_type(this);
   32.27 +}
   32.28 +
   32.29  // LIR_List
   32.30  LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
   32.31    : _operations(8)
   32.32 @@ -1803,6 +1818,8 @@
   32.33       case lir_cas_int:               s = "cas_int";      break;
   32.34       // LIR_OpProfileCall
   32.35       case lir_profile_call:          s = "profile_call";  break;
   32.36 +     // LIR_OpProfileType
   32.37 +     case lir_profile_type:          s = "profile_type";  break;
   32.38       // LIR_OpAssert
   32.39  #ifdef ASSERT
   32.40       case lir_assert:                s = "assert";        break;
   32.41 @@ -2086,6 +2103,15 @@
   32.42    tmp1()->print(out);          out->print(" ");
   32.43  }
   32.44  
   32.45 +// LIR_OpProfileType
   32.46 +void LIR_OpProfileType::print_instr(outputStream* out) const {
   32.47 +  out->print("exact = "); exact_klass()->print_name_on(out);
   32.48 +  out->print("current = "); ciTypeEntries::print_ciklass(out, current_klass());
   32.49 +  mdp()->print(out);          out->print(" ");
   32.50 +  obj()->print(out);          out->print(" ");
   32.51 +  tmp()->print(out);          out->print(" ");
   32.52 +}
   32.53 +
   32.54  #endif // PRODUCT
   32.55  
   32.56  // Implementation of LIR_InsertionBuffer
    33.1 --- a/src/share/vm/c1/c1_LIR.hpp	Fri Oct 18 10:37:26 2013 +0000
    33.2 +++ b/src/share/vm/c1/c1_LIR.hpp	Fri Oct 18 19:44:40 2013 -0700
    33.3 @@ -882,6 +882,7 @@
    33.4  class    LIR_OpTypeCheck;
    33.5  class    LIR_OpCompareAndSwap;
    33.6  class    LIR_OpProfileCall;
    33.7 +class    LIR_OpProfileType;
    33.8  #ifdef ASSERT
    33.9  class    LIR_OpAssert;
   33.10  #endif
   33.11 @@ -1005,6 +1006,7 @@
   33.12    , end_opCompareAndSwap
   33.13    , begin_opMDOProfile
   33.14      , lir_profile_call
   33.15 +    , lir_profile_type
   33.16    , end_opMDOProfile
   33.17    , begin_opAssert
   33.18      , lir_assert
   33.19 @@ -1145,6 +1147,7 @@
   33.20    virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
   33.21    virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
   33.22    virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
   33.23 +  virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
   33.24  #ifdef ASSERT
   33.25    virtual LIR_OpAssert* as_OpAssert() { return NULL; }
   33.26  #endif
   33.27 @@ -1925,8 +1928,8 @@
   33.28  
   33.29   public:
   33.30    // Destroys recv
   33.31 -  LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
   33.32 -    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
   33.33 +  LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
   33.34 +    : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL)  // no result, no info
   33.35      , _profiled_method(profiled_method)
   33.36      , _profiled_bci(profiled_bci)
   33.37      , _profiled_callee(profiled_callee)
   33.38 @@ -1948,6 +1951,45 @@
   33.39    virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
   33.40  };
   33.41  
   33.42 +// LIR_OpProfileType
   33.43 +class LIR_OpProfileType : public LIR_Op {
   33.44 + friend class LIR_OpVisitState;
   33.45 +
   33.46 + private:
   33.47 +  LIR_Opr      _mdp;
   33.48 +  LIR_Opr      _obj;
   33.49 +  LIR_Opr      _tmp;
   33.50 +  ciKlass*     _exact_klass;   // non NULL if we know the klass statically (no need to load it from _obj)
   33.51 +  intptr_t     _current_klass; // what the profiling currently reports
   33.52 +  bool         _not_null;      // true if we know statically that _obj cannot be null
   33.53 +  bool         _no_conflict;   // true if we're profling parameters, _exact_klass is not NULL and we know
   33.54 +                               // _exact_klass it the only possible type for this parameter in any context.
   33.55 +
   33.56 + public:
   33.57 +  // Destroys recv
   33.58 +  LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
   33.59 +    : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
   33.60 +    , _mdp(mdp)
   33.61 +    , _obj(obj)
   33.62 +    , _exact_klass(exact_klass)
   33.63 +    , _current_klass(current_klass)
   33.64 +    , _tmp(tmp)
   33.65 +    , _not_null(not_null)
   33.66 +    , _no_conflict(no_conflict) { }
   33.67 +
   33.68 +  LIR_Opr      mdp()              const             { return _mdp;              }
   33.69 +  LIR_Opr      obj()              const             { return _obj;              }
   33.70 +  LIR_Opr      tmp()              const             { return _tmp;              }
   33.71 +  ciKlass*     exact_klass()      const             { return _exact_klass;      }
   33.72 +  intptr_t     current_klass()    const             { return _current_klass;    }
   33.73 +  bool         not_null()         const             { return _not_null;         }
   33.74 +  bool         no_conflict()      const             { return _no_conflict;      }
   33.75 +
   33.76 +  virtual void emit_code(LIR_Assembler* masm);
   33.77 +  virtual LIR_OpProfileType* as_OpProfileType() { return this; }
   33.78 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
   33.79 +};
   33.80 +
   33.81  class LIR_InsertionBuffer;
   33.82  
   33.83  //--------------------------------LIR_List---------------------------------------------------
   33.84 @@ -2247,7 +2289,10 @@
   33.85                    ciMethod* profiled_method, int profiled_bci);
   33.86    // MethodData* profiling
   33.87    void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
   33.88 -    append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass));
   33.89 +    append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
   33.90 +  }
   33.91 +  void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
   33.92 +    append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
   33.93    }
   33.94  
   33.95    void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
    34.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp	Fri Oct 18 10:37:26 2013 +0000
    34.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Fri Oct 18 19:44:40 2013 -0700
    34.3 @@ -208,6 +208,7 @@
    34.4    void emit_call(LIR_OpJavaCall* op);
    34.5    void emit_rtcall(LIR_OpRTCall* op);
    34.6    void emit_profile_call(LIR_OpProfileCall* op);
    34.7 +  void emit_profile_type(LIR_OpProfileType* op);
    34.8    void emit_delay(LIR_OpDelay* op);
    34.9  
   34.10    void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
    35.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Oct 18 10:37:26 2013 +0000
    35.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Oct 18 19:44:40 2013 -0700
    35.3 @@ -2571,6 +2571,78 @@
    35.4  }
    35.5  
    35.6  
    35.7 +ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k) {
    35.8 +  ciKlass* result = NULL;
    35.9 +  bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
   35.10 +  bool do_update = !TypeEntries::is_type_unknown(profiled_k);
   35.11 +  // known not to be null or null bit already set and already set to
   35.12 +  // unknown: nothing we can do to improve profiling
   35.13 +  if (!do_null && !do_update) {
   35.14 +    return result;
   35.15 +  }
   35.16 +
   35.17 +  ciKlass* exact_klass = NULL;
   35.18 +  Compilation* comp = Compilation::current();
   35.19 +  if (do_update) {
   35.20 +    // try to find exact type, using CHA if possible, so that loading
   35.21 +    // the klass from the object can be avoided
   35.22 +    ciType* type = arg->exact_type();
   35.23 +    if (type == NULL) {
   35.24 +      type = arg->declared_type();
   35.25 +      type = comp->cha_exact_type(type);
   35.26 +    }
   35.27 +    assert(type == NULL || type->is_klass(), "type should be class");
   35.28 +    exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
   35.29 +
   35.30 +    do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
   35.31 +  }
   35.32 +
   35.33 +  if (!do_null && !do_update) {
   35.34 +    return result;
   35.35 +  }
   35.36 +
   35.37 +  ciKlass* exact_signature_k = NULL;
   35.38 +  if (do_update) {
   35.39 +    // Is the type from the signature exact (the only one possible)?
   35.40 +    exact_signature_k = signature_k->exact_klass();
   35.41 +    if (exact_signature_k == NULL) {
   35.42 +      exact_signature_k = comp->cha_exact_type(signature_k);
   35.43 +    } else {
   35.44 +      result = exact_signature_k;
   35.45 +      do_update = false;
   35.46 +      // Known statically. No need to emit any code: prevent
   35.47 +      // LIR_Assembler::emit_profile_type() from emitting useless code
   35.48 +      profiled_k = ciTypeEntries::with_status(result, profiled_k);
   35.49 +    }
   35.50 +    if (exact_signature_k != NULL && exact_klass != exact_signature_k) {
   35.51 +      assert(exact_klass == NULL, "arg and signature disagree?");
   35.52 +      // sometimes the type of the signature is better than the best type
   35.53 +      // the compiler has
   35.54 +      exact_klass = exact_signature_k;
   35.55 +      do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
   35.56 +    }
   35.57 +  }
   35.58 +
   35.59 +  if (!do_null && !do_update) {
   35.60 +    return result;
   35.61 +  }
   35.62 +
   35.63 +  if (mdp == LIR_OprFact::illegalOpr) {
   35.64 +    mdp = new_register(T_METADATA);
   35.65 +    __ metadata2reg(md->constant_encoding(), mdp);
   35.66 +    if (md_base_offset != 0) {
   35.67 +      LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
   35.68 +      mdp = new_pointer_register();
   35.69 +      __ leal(LIR_OprFact::address(base_type_address), mdp);
   35.70 +    }
   35.71 +  }
   35.72 +  LIRItem value(arg, this);
   35.73 +  value.load_item();
   35.74 +  __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
   35.75 +                  value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
   35.76 +  return result;
   35.77 +}
   35.78 +
   35.79  void LIRGenerator::do_Base(Base* x) {
   35.80    __ std_entry(LIR_OprFact::illegalOpr);
   35.81    // Emit moves from physical registers / stack slots to virtual registers
   35.82 @@ -3004,12 +3076,52 @@
   35.83    }
   35.84  }
   35.85  
   35.86 +void LIRGenerator::profile_arguments(ProfileCall* x) {
   35.87 +  if (MethodData::profile_arguments()) {
   35.88 +    int bci = x->bci_of_invoke();
   35.89 +    ciMethodData* md = x->method()->method_data_or_null();
   35.90 +    ciProfileData* data = md->bci_to_data(bci);
   35.91 +    if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
   35.92 +      ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
   35.93 +      int base_offset = md->byte_offset_of_slot(data, extra);
   35.94 +      LIR_Opr mdp = LIR_OprFact::illegalOpr;
   35.95 +      ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
   35.96 +
   35.97 +      Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
   35.98 +      int start = 0;
   35.99 +      int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
  35.100 +      if (x->nb_profiled_args() < stop) {
  35.101 +        // if called through method handle invoke, some arguments may have been popped
  35.102 +        stop = x->nb_profiled_args();
  35.103 +      }
  35.104 +      ciSignature* sig = x->callee()->signature();
  35.105 +      // method handle call to virtual method
  35.106 +      bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
  35.107 +      ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
  35.108 +      for (int i = 0; i < stop; i++) {
  35.109 +        int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
  35.110 +        ciKlass* exact = profile_arg_type(md, base_offset, off,
  35.111 +                                          args->type(i), x->profiled_arg_at(i+start), mdp,
  35.112 +                                          !x->arg_needs_null_check(i+start), sig_stream.next_klass());
  35.113 +        if (exact != NULL) {
  35.114 +          md->set_argument_type(bci, i, exact);
  35.115 +        }
  35.116 +      }
  35.117 +    }
  35.118 +  }
  35.119 +}
  35.120 +
  35.121  void LIRGenerator::do_ProfileCall(ProfileCall* x) {
  35.122    // Need recv in a temporary register so it interferes with the other temporaries
  35.123    LIR_Opr recv = LIR_OprFact::illegalOpr;
  35.124    LIR_Opr mdo = new_register(T_OBJECT);
  35.125    // tmp is used to hold the counters on SPARC
  35.126    LIR_Opr tmp = new_pointer_register();
  35.127 +
  35.128 +  if (x->nb_profiled_args() > 0) {
  35.129 +    profile_arguments(x);
  35.130 +  }
  35.131 +
  35.132    if (x->recv() != NULL) {
  35.133      LIRItem value(x->recv(), this);
  35.134      value.load_item();
  35.135 @@ -3019,6 +3131,21 @@
  35.136    __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
  35.137  }
  35.138  
  35.139 +void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
  35.140 +  int bci = x->bci_of_invoke();
  35.141 +  ciMethodData* md = x->method()->method_data_or_null();
  35.142 +  ciProfileData* data = md->bci_to_data(bci);
  35.143 +  assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
  35.144 +  ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
  35.145 +  LIR_Opr mdp = LIR_OprFact::illegalOpr;
  35.146 +  ciKlass* exact = profile_arg_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
  35.147 +                                    ret->type(), x->ret(), mdp,
  35.148 +                                    !x->needs_null_check(), x->callee()->signature()->return_type()->as_klass());
  35.149 +  if (exact != NULL) {
  35.150 +    md->set_return_type(bci, exact);
  35.151 +  }
  35.152 +}
  35.153 +
  35.154  void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
  35.155    // We can safely ignore accessors here, since c2 will inline them anyway,
  35.156    // accessors are also always mature.
  35.157 @@ -3053,7 +3180,11 @@
  35.158    int offset = -1;
  35.159    LIR_Opr counter_holder;
  35.160    if (level == CompLevel_limited_profile) {
  35.161 -    address counters_adr = method->ensure_method_counters();
  35.162 +    MethodCounters* counters_adr = method->ensure_method_counters();
  35.163 +    if (counters_adr == NULL) {
  35.164 +      bailout("method counters allocation failed");
  35.165 +      return;
  35.166 +    }
  35.167      counter_holder = new_pointer_register();
  35.168      __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
  35.169      offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
    36.1 --- a/src/share/vm/c1/c1_LIRGenerator.hpp	Fri Oct 18 10:37:26 2013 +0000
    36.2 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Fri Oct 18 19:44:40 2013 -0700
    36.3 @@ -434,6 +434,8 @@
    36.4    void do_ThreadIDIntrinsic(Intrinsic* x);
    36.5    void do_ClassIDIntrinsic(Intrinsic* x);
    36.6  #endif
    36.7 +  ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k);
    36.8 +  void profile_arguments(ProfileCall* x);
    36.9  
   36.10   public:
   36.11    Compilation*  compilation() const              { return _compilation; }
   36.12 @@ -534,6 +536,7 @@
   36.13    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   36.14    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   36.15    virtual void do_ProfileCall    (ProfileCall*     x);
   36.16 +  virtual void do_ProfileReturnType (ProfileReturnType* x);
   36.17    virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   36.18    virtual void do_RuntimeCall    (RuntimeCall*     x);
   36.19    virtual void do_MemBar         (MemBar*          x);
    37.1 --- a/src/share/vm/c1/c1_Optimizer.cpp	Fri Oct 18 10:37:26 2013 +0000
    37.2 +++ b/src/share/vm/c1/c1_Optimizer.cpp	Fri Oct 18 19:44:40 2013 -0700
    37.3 @@ -531,6 +531,7 @@
    37.4    void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
    37.5    void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
    37.6    void do_ProfileCall    (ProfileCall*     x);
    37.7 +  void do_ProfileReturnType (ProfileReturnType*  x);
    37.8    void do_ProfileInvoke  (ProfileInvoke*   x);
    37.9    void do_RuntimeCall    (RuntimeCall*     x);
   37.10    void do_MemBar         (MemBar*          x);
   37.11 @@ -657,6 +658,8 @@
   37.12    void handle_Intrinsic       (Intrinsic* x);
   37.13    void handle_ExceptionObject (ExceptionObject* x);
   37.14    void handle_Phi             (Phi* x);
   37.15 +  void handle_ProfileCall     (ProfileCall* x);
   37.16 +  void handle_ProfileReturnType (ProfileReturnType* x);
   37.17  };
   37.18  
   37.19  
   37.20 @@ -715,7 +718,9 @@
   37.21  void NullCheckVisitor::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {}
   37.22  void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
   37.23  void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
   37.24 -void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check(); }
   37.25 +void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check();
   37.26 +                                                                nce()->handle_ProfileCall(x); }
   37.27 +void NullCheckVisitor::do_ProfileReturnType (ProfileReturnType* x) { nce()->handle_ProfileReturnType(x); }
   37.28  void NullCheckVisitor::do_ProfileInvoke  (ProfileInvoke*   x) {}
   37.29  void NullCheckVisitor::do_RuntimeCall    (RuntimeCall*     x) {}
   37.30  void NullCheckVisitor::do_MemBar         (MemBar*          x) {}
   37.31 @@ -1134,6 +1139,15 @@
   37.32    }
   37.33  }
   37.34  
   37.35 +void NullCheckEliminator::handle_ProfileCall(ProfileCall* x) {
   37.36 +  for (int i = 0; i < x->nb_profiled_args(); i++) {
   37.37 +    x->set_arg_needs_null_check(i, !set_contains(x->profiled_arg_at(i)));
   37.38 +  }
   37.39 +}
   37.40 +
   37.41 +void NullCheckEliminator::handle_ProfileReturnType(ProfileReturnType* x) {
   37.42 +  x->set_needs_null_check(!set_contains(x->ret()));
   37.43 +}
   37.44  
   37.45  void Optimizer::eliminate_null_checks() {
   37.46    ResourceMark rm;
    38.1 --- a/src/share/vm/c1/c1_RangeCheckElimination.hpp	Fri Oct 18 10:37:26 2013 +0000
    38.2 +++ b/src/share/vm/c1/c1_RangeCheckElimination.hpp	Fri Oct 18 19:44:40 2013 -0700
    38.3 @@ -162,7 +162,8 @@
    38.4      void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ };
    38.5      void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
    38.6      void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ };
    38.7 -    void do_ProfileInvoke  (ProfileInvoke*  x)  { /* nothing to do */ };
    38.8 +    void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ };
    38.9 +    void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
   38.10      void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
   38.11      void do_MemBar         (MemBar*          x) { /* nothing to do */ };
   38.12      void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
    39.1 --- a/src/share/vm/c1/c1_Runtime1.cpp	Fri Oct 18 10:37:26 2013 +0000
    39.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp	Fri Oct 18 19:44:40 2013 -0700
    39.3 @@ -542,8 +542,7 @@
    39.4      // exception handler can cause class loading, which might throw an
    39.5      // exception and those fields are expected to be clear during
    39.6      // normal bytecode execution.
    39.7 -    thread->set_exception_oop(NULL);
    39.8 -    thread->set_exception_pc(NULL);
    39.9 +    thread->clear_exception_oop_and_pc();
   39.10  
   39.11      continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
   39.12      // If an exception was thrown during exception dispatch, the exception oop may have changed
    40.1 --- a/src/share/vm/c1/c1_ValueMap.hpp	Fri Oct 18 10:37:26 2013 +0000
    40.2 +++ b/src/share/vm/c1/c1_ValueMap.hpp	Fri Oct 18 19:44:40 2013 -0700
    40.3 @@ -203,6 +203,7 @@
    40.4    void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ }
    40.5    void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
    40.6    void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ }
    40.7 +  void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ }
    40.8    void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
    40.9    void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
   40.10    void do_MemBar         (MemBar*          x) { /* nothing to do */ };
    41.1 --- a/src/share/vm/ci/ciClassList.hpp	Fri Oct 18 10:37:26 2013 +0000
    41.2 +++ b/src/share/vm/ci/ciClassList.hpp	Fri Oct 18 19:44:40 2013 -0700
    41.3 @@ -102,6 +102,7 @@
    41.4  friend class ciMethodHandle;           \
    41.5  friend class ciMethodType;             \
    41.6  friend class ciReceiverTypeData;       \
    41.7 +friend class ciTypeEntries;            \
    41.8  friend class ciSymbol;                 \
    41.9  friend class ciArray;                  \
   41.10  friend class ciObjArray;               \
    42.1 --- a/src/share/vm/ci/ciEnv.cpp	Fri Oct 18 10:37:26 2013 +0000
    42.2 +++ b/src/share/vm/ci/ciEnv.cpp	Fri Oct 18 19:44:40 2013 -0700
    42.3 @@ -1154,9 +1154,12 @@
    42.4    GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
    42.5  }
    42.6  
    42.7 -void ciEnv::dump_replay_data(outputStream* out) {
    42.8 -  VM_ENTRY_MARK;
    42.9 -  MutexLocker ml(Compile_lock);
   42.10 +// ------------------------------------------------------------------
   42.11 +// ciEnv::dump_replay_data*
   42.12 +
   42.13 +// Don't change thread state and acquire any locks.
   42.14 +// Safe to call from VM error reporter.
   42.15 +void ciEnv::dump_replay_data_unsafe(outputStream* out) {
   42.16    ResourceMark rm;
   42.17  #if INCLUDE_JVMTI
   42.18    out->print_cr("JvmtiExport can_access_local_variables %d",     _jvmti_can_access_local_variables);
   42.19 @@ -1181,3 +1184,10 @@
   42.20                  entry_bci, comp_level);
   42.21    out->flush();
   42.22  }
   42.23 +
   42.24 +void ciEnv::dump_replay_data(outputStream* out) {
   42.25 +  GUARDED_VM_ENTRY(
   42.26 +    MutexLocker ml(Compile_lock);
   42.27 +    dump_replay_data_unsafe(out);
   42.28 +  )
   42.29 +}
    43.1 --- a/src/share/vm/ci/ciEnv.hpp	Fri Oct 18 10:37:26 2013 +0000
    43.2 +++ b/src/share/vm/ci/ciEnv.hpp	Fri Oct 18 19:44:40 2013 -0700
    43.3 @@ -452,6 +452,7 @@
    43.4  
    43.5    // Dump the compilation replay data for the ciEnv to the stream.
    43.6    void dump_replay_data(outputStream* out);
    43.7 +  void dump_replay_data_unsafe(outputStream* out);
    43.8  };
    43.9  
   43.10  #endif // SHARE_VM_CI_CIENV_HPP
    44.1 --- a/src/share/vm/ci/ciInstanceKlass.cpp	Fri Oct 18 10:37:26 2013 +0000
    44.2 +++ b/src/share/vm/ci/ciInstanceKlass.cpp	Fri Oct 18 19:44:40 2013 -0700
    44.3 @@ -671,7 +671,6 @@
    44.4  
    44.5  
    44.6  void ciInstanceKlass::dump_replay_data(outputStream* out) {
    44.7 -  ASSERT_IN_VM;
    44.8    ResourceMark rm;
    44.9  
   44.10    InstanceKlass* ik = get_instanceKlass();
    45.1 --- a/src/share/vm/ci/ciInstanceKlass.hpp	Fri Oct 18 10:37:26 2013 +0000
    45.2 +++ b/src/share/vm/ci/ciInstanceKlass.hpp	Fri Oct 18 19:44:40 2013 -0700
    45.3 @@ -235,6 +235,13 @@
    45.4    bool is_instance_klass() const { return true; }
    45.5    bool is_java_klass() const     { return true; }
    45.6  
    45.7 +  virtual ciKlass* exact_klass() {
    45.8 +    if (is_loaded() && is_final() && !is_interface()) {
    45.9 +      return this;
   45.10 +    }
   45.11 +    return NULL;
   45.12 +  }
   45.13 +
   45.14    // Dump the current state of this klass for compilation replay.
   45.15    virtual void dump_replay_data(outputStream* out);
   45.16  };
    46.1 --- a/src/share/vm/ci/ciKlass.cpp	Fri Oct 18 10:37:26 2013 +0000
    46.2 +++ b/src/share/vm/ci/ciKlass.cpp	Fri Oct 18 19:44:40 2013 -0700
    46.3 @@ -66,7 +66,9 @@
    46.4  // ------------------------------------------------------------------
    46.5  // ciKlass::is_subtype_of
    46.6  bool ciKlass::is_subtype_of(ciKlass* that) {
    46.7 -  assert(is_loaded() && that->is_loaded(), "must be loaded");
    46.8 +  assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
    46.9 +  assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
   46.10 +
   46.11    // Check to see if the klasses are identical.
   46.12    if (this == that) {
   46.13      return true;
   46.14 @@ -83,8 +85,8 @@
   46.15  // ------------------------------------------------------------------
   46.16  // ciKlass::is_subclass_of
   46.17  bool ciKlass::is_subclass_of(ciKlass* that) {
   46.18 -  assert(is_loaded() && that->is_loaded(), "must be loaded");
   46.19 -  // Check to see if the klasses are identical.
   46.20 +  assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
   46.21 +  assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
   46.22  
   46.23    VM_ENTRY_MARK;
   46.24    Klass* this_klass = get_Klass();
    47.1 --- a/src/share/vm/ci/ciKlass.hpp	Fri Oct 18 10:37:26 2013 +0000
    47.2 +++ b/src/share/vm/ci/ciKlass.hpp	Fri Oct 18 19:44:40 2013 -0700
    47.3 @@ -41,6 +41,7 @@
    47.4    friend class ciEnv;
    47.5    friend class ciField;
    47.6    friend class ciMethod;
    47.7 +  friend class ciMethodData;
    47.8    friend class ciObjArrayKlass;
    47.9  
   47.10  private:
   47.11 @@ -121,6 +122,8 @@
   47.12    // What kind of ciObject is this?
   47.13    bool is_klass() const { return true; }
   47.14  
   47.15 +  virtual ciKlass* exact_klass() = 0;
   47.16 +
   47.17    void print_name_on(outputStream* st);
   47.18  };
   47.19  
    48.1 --- a/src/share/vm/ci/ciMethod.cpp	Fri Oct 18 10:37:26 2013 +0000
    48.2 +++ b/src/share/vm/ci/ciMethod.cpp	Fri Oct 18 19:44:40 2013 -0700
    48.3 @@ -846,7 +846,9 @@
    48.4  // Return true if allocation was successful or no MDO is required.
    48.5  bool ciMethod::ensure_method_data(methodHandle h_m) {
    48.6    EXCEPTION_CONTEXT;
    48.7 -  if (is_native() || is_abstract() || h_m()->is_accessor()) return true;
    48.8 +  if (is_native() || is_abstract() || h_m()->is_accessor()) {
    48.9 +    return true;
   48.10 +  }
   48.11    if (h_m()->method_data() == NULL) {
   48.12      Method::build_interpreter_method_data(h_m, THREAD);
   48.13      if (HAS_PENDING_EXCEPTION) {
   48.14 @@ -903,22 +905,21 @@
   48.15  // NULL otherwise.
   48.16  ciMethodData* ciMethod::method_data_or_null() {
   48.17    ciMethodData *md = method_data();
   48.18 -  if (md->is_empty()) return NULL;
   48.19 +  if (md->is_empty()) {
   48.20 +    return NULL;
   48.21 +  }
   48.22    return md;
   48.23  }
   48.24  
   48.25  // ------------------------------------------------------------------
   48.26  // ciMethod::ensure_method_counters
   48.27  //
   48.28 -address ciMethod::ensure_method_counters() {
   48.29 +MethodCounters* ciMethod::ensure_method_counters() {
   48.30    check_is_loaded();
   48.31    VM_ENTRY_MARK;
   48.32    methodHandle mh(THREAD, get_Method());
   48.33 -  MethodCounters *counter = mh->method_counters();
   48.34 -  if (counter == NULL) {
   48.35 -    counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL);
   48.36 -  }
   48.37 -  return (address)counter;
   48.38 +  MethodCounters* method_counters = mh->get_method_counters(CHECK_NULL);
   48.39 +  return method_counters;
   48.40  }
   48.41  
   48.42  // ------------------------------------------------------------------
   48.43 @@ -1247,7 +1248,6 @@
   48.44  #undef FETCH_FLAG_FROM_VM
   48.45  
   48.46  void ciMethod::dump_replay_data(outputStream* st) {
   48.47 -  ASSERT_IN_VM;
   48.48    ResourceMark rm;
   48.49    Method* method = get_Method();
   48.50    MethodCounters* mcs = method->method_counters();
    49.1 --- a/src/share/vm/ci/ciMethod.hpp	Fri Oct 18 10:37:26 2013 +0000
    49.2 +++ b/src/share/vm/ci/ciMethod.hpp	Fri Oct 18 19:44:40 2013 -0700
    49.3 @@ -265,7 +265,7 @@
    49.4    bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const;
    49.5    bool check_call(int refinfo_index, bool is_static) const;
    49.6    bool ensure_method_data();  // make sure it exists in the VM also
    49.7 -  address ensure_method_counters();
    49.8 +  MethodCounters* ensure_method_counters();
    49.9    int instructions_size();
   49.10    int scale_count(int count, float prof_factor = 1.);  // make MDO count commensurate with IIC
   49.11  
    50.1 --- a/src/share/vm/ci/ciMethodData.cpp	Fri Oct 18 10:37:26 2013 +0000
    50.2 +++ b/src/share/vm/ci/ciMethodData.cpp	Fri Oct 18 19:44:40 2013 -0700
    50.3 @@ -78,7 +78,9 @@
    50.4  
    50.5  void ciMethodData::load_data() {
    50.6    MethodData* mdo = get_MethodData();
    50.7 -  if (mdo == NULL) return;
    50.8 +  if (mdo == NULL) {
    50.9 +    return;
   50.10 +  }
   50.11  
   50.12    // To do: don't copy the data if it is not "ripe" -- require a minimum #
   50.13    // of invocations.
   50.14 @@ -123,7 +125,7 @@
   50.15  #endif
   50.16  }
   50.17  
   50.18 -void ciReceiverTypeData::translate_receiver_data_from(ProfileData* data) {
   50.19 +void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
   50.20    for (uint row = 0; row < row_limit(); row++) {
   50.21      Klass* k = data->as_ReceiverTypeData()->receiver(row);
   50.22      if (k != NULL) {
   50.23 @@ -134,6 +136,18 @@
   50.24  }
   50.25  
   50.26  
   50.27 +void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries* entries) {
   50.28 +  for (int i = 0; i < _number_of_entries; i++) {
   50.29 +    intptr_t k = entries->type(i);
   50.30 +    TypeStackSlotEntries::set_type(i, translate_klass(k));
   50.31 +  }
   50.32 +}
   50.33 +
   50.34 +void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
   50.35 +  intptr_t k = ret->type();
   50.36 +  set_type(translate_klass(k));
   50.37 +}
   50.38 +
   50.39  // Get the data at an arbitrary (sort of) data index.
   50.40  ciProfileData* ciMethodData::data_at(int data_index) {
   50.41    if (out_of_bounds(data_index)) {
   50.42 @@ -164,6 +178,10 @@
   50.43      return new ciMultiBranchData(data_layout);
   50.44    case DataLayout::arg_info_data_tag:
   50.45      return new ciArgInfoData(data_layout);
   50.46 +  case DataLayout::call_type_data_tag:
   50.47 +    return new ciCallTypeData(data_layout);
   50.48 +  case DataLayout::virtual_call_type_data_tag:
   50.49 +    return new ciVirtualCallTypeData(data_layout);
   50.50    };
   50.51  }
   50.52  
   50.53 @@ -286,6 +304,34 @@
   50.54    }
   50.55  }
   50.56  
   50.57 +void ciMethodData::set_argument_type(int bci, int i, ciKlass* k) {
   50.58 +  VM_ENTRY_MARK;
   50.59 +  MethodData* mdo = get_MethodData();
   50.60 +  if (mdo != NULL) {
   50.61 +    ProfileData* data = mdo->bci_to_data(bci);
   50.62 +    if (data->is_CallTypeData()) {
   50.63 +      data->as_CallTypeData()->set_argument_type(i, k->get_Klass());
   50.64 +    } else {
   50.65 +      assert(data->is_VirtualCallTypeData(), "no arguments!");
   50.66 +      data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass());
   50.67 +    }
   50.68 +  }
   50.69 +}
   50.70 +
   50.71 +void ciMethodData::set_return_type(int bci, ciKlass* k) {
   50.72 +  VM_ENTRY_MARK;
   50.73 +  MethodData* mdo = get_MethodData();
   50.74 +  if (mdo != NULL) {
   50.75 +    ProfileData* data = mdo->bci_to_data(bci);
   50.76 +    if (data->is_CallTypeData()) {
   50.77 +      data->as_CallTypeData()->set_return_type(k->get_Klass());
   50.78 +    } else {
   50.79 +      assert(data->is_VirtualCallTypeData(), "no arguments!");
   50.80 +      data->as_VirtualCallTypeData()->set_return_type(k->get_Klass());
   50.81 +    }
   50.82 +  }
   50.83 +}
   50.84 +
   50.85  bool ciMethodData::has_escape_info() {
   50.86    return eflag_set(MethodData::estimated);
   50.87  }
   50.88 @@ -373,7 +419,6 @@
   50.89  }
   50.90  
   50.91  void ciMethodData::dump_replay_data(outputStream* out) {
   50.92 -  ASSERT_IN_VM;
   50.93    ResourceMark rm;
   50.94    MethodData* mdo = get_MethodData();
   50.95    Method* method = mdo->method();
   50.96 @@ -477,7 +522,50 @@
   50.97    }
   50.98  }
   50.99  
  50.100 -void ciReceiverTypeData::print_receiver_data_on(outputStream* st) {
  50.101 +void ciTypeEntries::print_ciklass(outputStream* st, intptr_t k) {
  50.102 +  if (TypeEntries::is_type_none(k)) {
  50.103 +    st->print("none");
  50.104 +  } else if (TypeEntries::is_type_unknown(k)) {
  50.105 +    st->print("unknown");
  50.106 +  } else {
  50.107 +    valid_ciklass(k)->print_name_on(st);
  50.108 +  }
  50.109 +  if (TypeEntries::was_null_seen(k)) {
  50.110 +    st->print(" (null seen)");
  50.111 +  }
  50.112 +}
  50.113 +
  50.114 +void ciTypeStackSlotEntries::print_data_on(outputStream* st) const {
  50.115 +  for (int i = 0; i < _number_of_entries; i++) {
  50.116 +    _pd->tab(st);
  50.117 +    st->print("%d: stack (%u) ", i, stack_slot(i));
  50.118 +    print_ciklass(st, type(i));
  50.119 +    st->cr();
  50.120 +  }
  50.121 +}
  50.122 +
  50.123 +void ciReturnTypeEntry::print_data_on(outputStream* st) const {
  50.124 +  _pd->tab(st);
  50.125 +  st->print("ret ");
  50.126 +  print_ciklass(st, type());
  50.127 +  st->cr();
  50.128 +}
  50.129 +
  50.130 +void ciCallTypeData::print_data_on(outputStream* st) const {
  50.131 +  print_shared(st, "ciCallTypeData");
  50.132 +  if (has_arguments()) {
  50.133 +    tab(st, true);
  50.134 +    st->print("argument types");
  50.135 +    args()->print_data_on(st);
  50.136 +  }
  50.137 +  if (has_return()) {
  50.138 +    tab(st, true);
  50.139 +    st->print("return type");
  50.140 +    ret()->print_data_on(st);
  50.141 +  }
  50.142 +}
  50.143 +
  50.144 +void ciReceiverTypeData::print_receiver_data_on(outputStream* st) const {
  50.145    uint row;
  50.146    int entries = 0;
  50.147    for (row = 0; row < row_limit(); row++) {
  50.148 @@ -493,13 +581,28 @@
  50.149    }
  50.150  }
  50.151  
  50.152 -void ciReceiverTypeData::print_data_on(outputStream* st) {
  50.153 +void ciReceiverTypeData::print_data_on(outputStream* st) const {
  50.154    print_shared(st, "ciReceiverTypeData");
  50.155    print_receiver_data_on(st);
  50.156  }
  50.157  
  50.158 -void ciVirtualCallData::print_data_on(outputStream* st) {
  50.159 +void ciVirtualCallData::print_data_on(outputStream* st) const {
  50.160    print_shared(st, "ciVirtualCallData");
  50.161    rtd_super()->print_receiver_data_on(st);
  50.162  }
  50.163 +
  50.164 +void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
  50.165 +  print_shared(st, "ciVirtualCallTypeData");
  50.166 +  rtd_super()->print_receiver_data_on(st);
  50.167 +  if (has_arguments()) {
  50.168 +    tab(st, true);
  50.169 +    st->print("argument types");
  50.170 +    args()->print_data_on(st);
  50.171 +  }
  50.172 +  if (has_return()) {
  50.173 +    tab(st, true);
  50.174 +    st->print("return type");
  50.175 +    ret()->print_data_on(st);
  50.176 +  }
  50.177 +}
  50.178  #endif
    51.1 --- a/src/share/vm/ci/ciMethodData.hpp	Fri Oct 18 10:37:26 2013 +0000
    51.2 +++ b/src/share/vm/ci/ciMethodData.hpp	Fri Oct 18 19:44:40 2013 -0700
    51.3 @@ -41,6 +41,8 @@
    51.4  class ciArrayData;
    51.5  class ciMultiBranchData;
    51.6  class ciArgInfoData;
    51.7 +class ciCallTypeData;
    51.8 +class ciVirtualCallTypeData;
    51.9  
   51.10  typedef ProfileData ciProfileData;
   51.11  
   51.12 @@ -59,6 +61,103 @@
   51.13    ciJumpData(DataLayout* layout) : JumpData(layout) {};
   51.14  };
   51.15  
   51.16 +class ciTypeEntries {
   51.17 +protected:
   51.18 +  static intptr_t translate_klass(intptr_t k) {
   51.19 +    Klass* v = TypeEntries::valid_klass(k);
   51.20 +    if (v != NULL) {
   51.21 +      ciKlass* klass = CURRENT_ENV->get_klass(v);
   51.22 +      return with_status(klass, k);
   51.23 +    }
   51.24 +    return with_status(NULL, k);
   51.25 +  }
   51.26 +
   51.27 +public:
   51.28 +  static ciKlass* valid_ciklass(intptr_t k) {
   51.29 +    if (!TypeEntries::is_type_none(k) &&
   51.30 +        !TypeEntries::is_type_unknown(k)) {
   51.31 +      return (ciKlass*)TypeEntries::klass_part(k);
   51.32 +    } else {
   51.33 +      return NULL;
   51.34 +    }
   51.35 +  }
   51.36 +
   51.37 +  static intptr_t with_status(ciKlass* k, intptr_t in) {
   51.38 +    return TypeEntries::with_status((intptr_t)k, in);
   51.39 +  }
   51.40 +
   51.41 +#ifndef PRODUCT
   51.42 +  static void print_ciklass(outputStream* st, intptr_t k);
   51.43 +#endif
   51.44 +};
   51.45 +
   51.46 +class ciTypeStackSlotEntries : public TypeStackSlotEntries, ciTypeEntries {
   51.47 +public:
   51.48 +  void translate_type_data_from(const TypeStackSlotEntries* args);
   51.49 +
   51.50 +  ciKlass* valid_type(int i) const {
   51.51 +    return valid_ciklass(type(i));
   51.52 +  }
   51.53 +
   51.54 +#ifndef PRODUCT
   51.55 +  void print_data_on(outputStream* st) const;
   51.56 +#endif
   51.57 +};
   51.58 +
   51.59 +class ciReturnTypeEntry : public ReturnTypeEntry, ciTypeEntries {
   51.60 +public:
   51.61 +  void translate_type_data_from(const ReturnTypeEntry* ret);
   51.62 +
   51.63 +  ciKlass* valid_type() const {
   51.64 +    return valid_ciklass(type());
   51.65 +  }
   51.66 +
   51.67 +#ifndef PRODUCT
   51.68 +  void print_data_on(outputStream* st) const;
   51.69 +#endif
   51.70 +};
   51.71 +
   51.72 +class ciCallTypeData : public CallTypeData {
   51.73 +public:
   51.74 +  ciCallTypeData(DataLayout* layout) : CallTypeData(layout) {}
   51.75 +
   51.76 +  ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)CallTypeData::args(); }
   51.77 +  ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)CallTypeData::ret(); }
   51.78 +
   51.79 +  void translate_type_data_from(const ProfileData* data) {
   51.80 +    if (has_arguments()) {
   51.81 +      args()->translate_type_data_from(data->as_CallTypeData()->args());
   51.82 +    }
   51.83 +    if (has_return()) {
   51.84 +      ret()->translate_type_data_from(data->as_CallTypeData()->ret());
   51.85 +    }
   51.86 +  }
   51.87 +
   51.88 +  intptr_t argument_type(int i) const {
   51.89 +    assert(has_arguments(), "no arg type profiling data");
   51.90 +    return args()->type(i);
   51.91 +  }
   51.92 +
   51.93 +  ciKlass* valid_argument_type(int i) const {
   51.94 +    assert(has_arguments(), "no arg type profiling data");
   51.95 +    return args()->valid_type(i);
   51.96 +  }
   51.97 +
   51.98 +  intptr_t return_type() const {
   51.99 +    assert(has_return(), "no ret type profiling data");
  51.100 +    return ret()->type();
  51.101 +  }
  51.102 +
  51.103 +  ciKlass* valid_return_type() const {
  51.104 +    assert(has_return(), "no ret type profiling data");
  51.105 +    return ret()->valid_type();
  51.106 +  }
  51.107 +
  51.108 +#ifndef PRODUCT
  51.109 +  void print_data_on(outputStream* st) const;
  51.110 +#endif
  51.111 +};
  51.112 +
  51.113  class ciReceiverTypeData : public ReceiverTypeData {
  51.114  public:
  51.115    ciReceiverTypeData(DataLayout* layout) : ReceiverTypeData(layout) {};
  51.116 @@ -69,7 +168,7 @@
  51.117                    (intptr_t) recv);
  51.118    }
  51.119  
  51.120 -  ciKlass* receiver(uint row) {
  51.121 +  ciKlass* receiver(uint row) const {
  51.122      assert((uint)row < row_limit(), "oob");
  51.123      ciKlass* recv = (ciKlass*)intptr_at(receiver0_offset + row * receiver_type_row_cell_count);
  51.124      assert(recv == NULL || recv->is_klass(), "wrong type");
  51.125 @@ -77,19 +176,19 @@
  51.126    }
  51.127  
  51.128    // Copy & translate from oop based ReceiverTypeData
  51.129 -  virtual void translate_from(ProfileData* data) {
  51.130 +  virtual void translate_from(const ProfileData* data) {
  51.131      translate_receiver_data_from(data);
  51.132    }
  51.133 -  void translate_receiver_data_from(ProfileData* data);
  51.134 +  void translate_receiver_data_from(const ProfileData* data);
  51.135  #ifndef PRODUCT
  51.136 -  void print_data_on(outputStream* st);
  51.137 -  void print_receiver_data_on(outputStream* st);
  51.138 +  void print_data_on(outputStream* st) const;
  51.139 +  void print_receiver_data_on(outputStream* st) const;
  51.140  #endif
  51.141  };
  51.142  
  51.143  class ciVirtualCallData : public VirtualCallData {
  51.144    // Fake multiple inheritance...  It's a ciReceiverTypeData also.
  51.145 -  ciReceiverTypeData* rtd_super() { return (ciReceiverTypeData*) this; }
  51.146 +  ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
  51.147  
  51.148  public:
  51.149    ciVirtualCallData(DataLayout* layout) : VirtualCallData(layout) {};
  51.150 @@ -103,11 +202,65 @@
  51.151    }
  51.152  
  51.153    // Copy & translate from oop based VirtualCallData
  51.154 -  virtual void translate_from(ProfileData* data) {
  51.155 +  virtual void translate_from(const ProfileData* data) {
  51.156      rtd_super()->translate_receiver_data_from(data);
  51.157    }
  51.158  #ifndef PRODUCT
  51.159 -  void print_data_on(outputStream* st);
  51.160 +  void print_data_on(outputStream* st) const;
  51.161 +#endif
  51.162 +};
  51.163 +
  51.164 +class ciVirtualCallTypeData : public VirtualCallTypeData {
  51.165 +private:
  51.166 +  // Fake multiple inheritance...  It's a ciReceiverTypeData also.
  51.167 +  ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
  51.168 +public:
  51.169 +  ciVirtualCallTypeData(DataLayout* layout) : VirtualCallTypeData(layout) {}
  51.170 +
  51.171 +  void set_receiver(uint row, ciKlass* recv) {
  51.172 +    rtd_super()->set_receiver(row, recv);
  51.173 +  }
  51.174 +
  51.175 +  ciKlass* receiver(uint row) const {
  51.176 +    return rtd_super()->receiver(row);
  51.177 +  }
  51.178 +
  51.179 +  ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)VirtualCallTypeData::args(); }
  51.180 +  ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)VirtualCallTypeData::ret(); }
  51.181 +
  51.182 +  // Copy & translate from oop based VirtualCallData
  51.183 +  virtual void translate_from(const ProfileData* data) {
  51.184 +    rtd_super()->translate_receiver_data_from(data);
  51.185 +    if (has_arguments()) {
  51.186 +      args()->translate_type_data_from(data->as_VirtualCallTypeData()->args());
  51.187 +    }
  51.188 +    if (has_return()) {
  51.189 +      ret()->translate_type_data_from(data->as_VirtualCallTypeData()->ret());
  51.190 +    }
  51.191 +  }
  51.192 +
  51.193 +  intptr_t argument_type(int i) const {
  51.194 +    assert(has_arguments(), "no arg type profiling data");
  51.195 +    return args()->type(i);
  51.196 +  }
  51.197 +
  51.198 +  ciKlass* valid_argument_type(int i) const {
  51.199 +    assert(has_arguments(), "no arg type profiling data");
  51.200 +    return args()->valid_type(i);
  51.201 +  }
  51.202 +
  51.203 +  intptr_t return_type() const {
  51.204 +    assert(has_return(), "no ret type profiling data");
  51.205 +    return ret()->type();
  51.206 +  }
  51.207 +
  51.208 +  ciKlass* valid_return_type() const {
  51.209 +    assert(has_return(), "no ret type profiling data");
  51.210 +    return ret()->valid_type();
  51.211 +  }
  51.212 +
  51.213 +#ifndef PRODUCT
  51.214 +  void print_data_on(outputStream* st) const;
  51.215  #endif
  51.216  };
  51.217  
  51.218 @@ -232,8 +385,6 @@
  51.219  public:
  51.220    bool is_method_data() const { return true; }
  51.221  
  51.222 -  void set_mature() { _state = mature_state; }
  51.223 -
  51.224    bool is_empty()  { return _state == empty_state; }
  51.225    bool is_mature() { return _state == mature_state; }
  51.226  
  51.227 @@ -249,6 +400,10 @@
  51.228    // Also set the numer of loops and blocks in the method.
  51.229    // Again, this is used to determine if a method is trivial.
  51.230    void set_compilation_stats(short loops, short blocks);
  51.231 +  // If the compiler finds a profiled type that is known statically
  51.232 +  // for sure, set it in the MethodData
  51.233 +  void set_argument_type(int bci, int i, ciKlass* k);
  51.234 +  void set_return_type(int bci, ciKlass* k);
  51.235  
  51.236    void load_data();
  51.237  
    52.1 --- a/src/share/vm/ci/ciObjArrayKlass.cpp	Fri Oct 18 10:37:26 2013 +0000
    52.2 +++ b/src/share/vm/ci/ciObjArrayKlass.cpp	Fri Oct 18 19:44:40 2013 -0700
    52.3 @@ -179,3 +179,16 @@
    52.4  ciObjArrayKlass* ciObjArrayKlass::make(ciKlass* element_klass) {
    52.5    GUARDED_VM_ENTRY(return make_impl(element_klass);)
    52.6  }
    52.7 +
    52.8 +ciKlass* ciObjArrayKlass::exact_klass() {
    52.9 +  ciType* base = base_element_type();
   52.10 +  if (base->is_instance_klass()) {
   52.11 +    ciInstanceKlass* ik = base->as_instance_klass();
   52.12 +    if (ik->exact_klass() != NULL) {
   52.13 +      return this;
   52.14 +    }
   52.15 +  } else if (base->is_primitive_type()) {
   52.16 +    return this;
   52.17 +  }
   52.18 +  return NULL;
   52.19 +}
    53.1 --- a/src/share/vm/ci/ciObjArrayKlass.hpp	Fri Oct 18 10:37:26 2013 +0000
    53.2 +++ b/src/share/vm/ci/ciObjArrayKlass.hpp	Fri Oct 18 19:44:40 2013 -0700
    53.3 @@ -73,6 +73,8 @@
    53.4    bool is_obj_array_klass() const { return true; }
    53.5  
    53.6    static ciObjArrayKlass* make(ciKlass* element_klass);
    53.7 +
    53.8 +  virtual ciKlass* exact_klass();
    53.9  };
   53.10  
   53.11  #endif // SHARE_VM_CI_CIOBJARRAYKLASS_HPP
    54.1 --- a/src/share/vm/ci/ciReplay.cpp	Fri Oct 18 10:37:26 2013 +0000
    54.2 +++ b/src/share/vm/ci/ciReplay.cpp	Fri Oct 18 19:44:40 2013 -0700
    54.3 @@ -965,14 +965,12 @@
    54.4      tty->cr();
    54.5    } else {
    54.6      EXCEPTION_CONTEXT;
    54.7 -    MethodCounters* mcs = method->method_counters();
    54.8      // m->_instructions_size = rec->instructions_size;
    54.9      m->_instructions_size = -1;
   54.10      m->_interpreter_invocation_count = rec->interpreter_invocation_count;
   54.11      m->_interpreter_throwout_count = rec->interpreter_throwout_count;
   54.12 -    if (mcs == NULL) {
   54.13 -      mcs = Method::build_method_counters(method, CHECK_AND_CLEAR);
   54.14 -    }
   54.15 +    MethodCounters* mcs = method->get_method_counters(CHECK_AND_CLEAR);
   54.16 +    guarantee(mcs != NULL, "method counters allocation failed");
   54.17      mcs->invocation_counter()->_counter = rec->invocation_counter;
   54.18      mcs->backedge_counter()->_counter = rec->backedge_counter;
   54.19    }
    55.1 --- a/src/share/vm/ci/ciStreams.hpp	Fri Oct 18 10:37:26 2013 +0000
    55.2 +++ b/src/share/vm/ci/ciStreams.hpp	Fri Oct 18 19:44:40 2013 -0700
    55.3 @@ -277,11 +277,14 @@
    55.4  class ciSignatureStream : public StackObj {
    55.5  private:
    55.6    ciSignature* _sig;
    55.7 -  int    _pos;
    55.8 +  int          _pos;
    55.9 +  // holder is a method's holder
   55.10 +  ciKlass*     _holder;
   55.11  public:
   55.12 -  ciSignatureStream(ciSignature* signature) {
   55.13 +  ciSignatureStream(ciSignature* signature, ciKlass* holder = NULL) {
   55.14      _sig = signature;
   55.15      _pos = 0;
   55.16 +    _holder = holder;
   55.17    }
   55.18  
   55.19    bool at_return_type() { return _pos == _sig->count(); }
   55.20 @@ -301,6 +304,23 @@
   55.21        return _sig->type_at(_pos);
   55.22      }
   55.23    }
   55.24 +
   55.25 +  // next klass in the signature
   55.26 +  ciKlass* next_klass() {
   55.27 +    ciKlass* sig_k;
   55.28 +    if (_holder != NULL) {
   55.29 +      sig_k = _holder;
   55.30 +      _holder = NULL;
   55.31 +    } else {
   55.32 +      while (!type()->is_klass()) {
   55.33 +        next();
   55.34 +      }
   55.35 +      assert(!at_return_type(), "passed end of signature");
   55.36 +      sig_k = type()->as_klass();
   55.37 +      next();
   55.38 +    }
   55.39 +    return sig_k;
   55.40 +  }
   55.41  };
   55.42  
   55.43  
    56.1 --- a/src/share/vm/ci/ciTypeArrayKlass.hpp	Fri Oct 18 10:37:26 2013 +0000
    56.2 +++ b/src/share/vm/ci/ciTypeArrayKlass.hpp	Fri Oct 18 19:44:40 2013 -0700
    56.3 @@ -57,6 +57,10 @@
    56.4  
    56.5    // Make an array klass corresponding to the specified primitive type.
    56.6    static ciTypeArrayKlass* make(BasicType type);
    56.7 +
    56.8 +  virtual ciKlass* exact_klass() {
    56.9 +    return this;
   56.10 +  }
   56.11  };
   56.12  
   56.13  #endif // SHARE_VM_CI_CITYPEARRAYKLASS_HPP
    57.1 --- a/src/share/vm/classfile/defaultMethods.cpp	Fri Oct 18 10:37:26 2013 +0000
    57.2 +++ b/src/share/vm/classfile/defaultMethods.cpp	Fri Oct 18 19:44:40 2013 -0700
    57.3 @@ -857,7 +857,6 @@
    57.4    m->set_max_locals(params);
    57.5    m->constMethod()->set_stackmap_data(NULL);
    57.6    m->set_code(code_start);
    57.7 -  m->set_force_inline(true);
    57.8  
    57.9    return m;
   57.10  }
    58.1 --- a/src/share/vm/classfile/verifier.cpp	Fri Oct 18 10:37:26 2013 +0000
    58.2 +++ b/src/share/vm/classfile/verifier.cpp	Fri Oct 18 19:44:40 2013 -0700
    58.3 @@ -2439,19 +2439,19 @@
    58.4               && !ref_class_type.equals(current_type())
    58.5               && !ref_class_type.equals(VerificationType::reference_type(
    58.6                    current_class()->super()->name()))) {
    58.7 -    bool subtype = ref_class_type.is_assignable_from(
    58.8 -      current_type(), this, CHECK_VERIFY(this));
    58.9 +    bool subtype = false;
   58.10 +    if (!current_class()->is_anonymous()) {
   58.11 +      subtype = ref_class_type.is_assignable_from(
   58.12 +                 current_type(), this, CHECK_VERIFY(this));
   58.13 +    } else {
   58.14 +      subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
   58.15 +                 current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
   58.16 +    }
   58.17      if (!subtype) {
   58.18 -      if (current_class()->is_anonymous()) {
   58.19 -        subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
   58.20 -                   current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
   58.21 -      }
   58.22 -      if (!subtype) {
   58.23 -        verify_error(ErrorContext::bad_code(bci),
   58.24 -            "Bad invokespecial instruction: "
   58.25 -            "current class isn't assignable to reference class.");
   58.26 -         return;
   58.27 -      }
   58.28 +      verify_error(ErrorContext::bad_code(bci),
   58.29 +          "Bad invokespecial instruction: "
   58.30 +          "current class isn't assignable to reference class.");
   58.31 +       return;
   58.32      }
   58.33    }
   58.34    // Match method descriptor with operand stack
   58.35 @@ -2470,17 +2470,13 @@
   58.36          if (!current_class()->is_anonymous()) {
   58.37            current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
   58.38          } else {
   58.39 -          // anonymous class invokespecial calls: either the
   58.40 -          // operand stack/objectref  is a subtype of the current class OR
   58.41 -          // the objectref is a subtype of the host_klass of the current class
   58.42 +          // anonymous class invokespecial calls: check if the
   58.43 +          // objectref is a subtype of the host_klass of the current class
   58.44            // to allow an anonymous class to reference methods in the host_klass
   58.45            VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
   58.46 -          bool subtype = current_type().is_assignable_from(top, this, CHECK_VERIFY(this));
   58.47 -          if (!subtype) {
   58.48 -            VerificationType hosttype =
   58.49 -              VerificationType::reference_type(current_class()->host_klass()->name());
   58.50 -            subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
   58.51 -          }
   58.52 +          VerificationType hosttype =
   58.53 +            VerificationType::reference_type(current_class()->host_klass()->name());
   58.54 +          bool subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
   58.55            if (!subtype) {
   58.56              verify_error( ErrorContext::bad_type(current_frame->offset(),
   58.57                current_frame->stack_top_ctx(),
    59.1 --- a/src/share/vm/code/codeBlob.cpp	Fri Oct 18 10:37:26 2013 +0000
    59.2 +++ b/src/share/vm/code/codeBlob.cpp	Fri Oct 18 19:44:40 2013 -0700
    59.3 @@ -245,8 +245,8 @@
    59.4  }
    59.5  
    59.6  
    59.7 -void* BufferBlob::operator new(size_t s, unsigned size) throw() {
    59.8 -  void* p = CodeCache::allocate(size);
    59.9 +void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
   59.10 +  void* p = CodeCache::allocate(size, is_critical);
   59.11    return p;
   59.12  }
   59.13  
   59.14 @@ -277,7 +277,10 @@
   59.15    unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
   59.16    {
   59.17      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   59.18 -    blob = new (size) AdapterBlob(size, cb);
   59.19 +    // The parameter 'true' indicates a critical memory allocation.
   59.20 +    // This means that CodeCacheMinimumFreeSpace is used, if necessary
   59.21 +    const bool is_critical = true;
   59.22 +    blob = new (size, is_critical) AdapterBlob(size, cb);
   59.23    }
   59.24    // Track memory usage statistic after releasing CodeCache_lock
   59.25    MemoryService::track_code_cache_memory_usage();
   59.26 @@ -299,7 +302,10 @@
   59.27    size += round_to(buffer_size, oopSize);
   59.28    {
   59.29      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   59.30 -    blob = new (size) MethodHandlesAdapterBlob(size);
   59.31 +    // The parameter 'true' indicates a critical memory allocation.
   59.32 +    // This means that CodeCacheMinimumFreeSpace is used, if necessary
   59.33 +    const bool is_critical = true;
   59.34 +    blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
   59.35    }
   59.36    // Track memory usage statistic after releasing CodeCache_lock
   59.37    MemoryService::track_code_cache_memory_usage();
    60.1 --- a/src/share/vm/code/codeBlob.hpp	Fri Oct 18 10:37:26 2013 +0000
    60.2 +++ b/src/share/vm/code/codeBlob.hpp	Fri Oct 18 19:44:40 2013 -0700
    60.3 @@ -209,7 +209,7 @@
    60.4    BufferBlob(const char* name, int size);
    60.5    BufferBlob(const char* name, int size, CodeBuffer* cb);
    60.6  
    60.7 -  void* operator new(size_t s, unsigned size) throw();
    60.8 +  void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
    60.9  
   60.10   public:
   60.11    // Creation
   60.12 @@ -253,7 +253,6 @@
   60.13  class MethodHandlesAdapterBlob: public BufferBlob {
   60.14  private:
   60.15    MethodHandlesAdapterBlob(int size)                 : BufferBlob("MethodHandles adapters", size) {}
   60.16 -  MethodHandlesAdapterBlob(int size, CodeBuffer* cb) : BufferBlob("MethodHandles adapters", size, cb) {}
   60.17  
   60.18  public:
   60.19    // Creation
    61.1 --- a/src/share/vm/compiler/abstractCompiler.cpp	Fri Oct 18 10:37:26 2013 +0000
    61.2 +++ b/src/share/vm/compiler/abstractCompiler.cpp	Fri Oct 18 19:44:40 2013 -0700
    61.3 @@ -24,41 +24,42 @@
    61.4  
    61.5  #include "precompiled.hpp"
    61.6  #include "compiler/abstractCompiler.hpp"
    61.7 +#include "compiler/compileBroker.hpp"
    61.8  #include "runtime/mutexLocker.hpp"
    61.9 -void AbstractCompiler::initialize_runtimes(initializer f, volatile int* state) {
   61.10 -  if (*state != initialized) {
   61.11  
   61.12 -    // We are thread in native here...
   61.13 -    CompilerThread* thread = CompilerThread::current();
   61.14 -    bool do_initialization = false;
   61.15 -    {
   61.16 -      ThreadInVMfromNative tv(thread);
   61.17 -      ResetNoHandleMark rnhm;
   61.18 -      MutexLocker only_one(CompileThread_lock, thread);
   61.19 -      if ( *state == uninitialized) {
   61.20 -        do_initialization = true;
   61.21 -        *state = initializing;
   61.22 -      } else {
   61.23 -        while (*state == initializing ) {
   61.24 -          CompileThread_lock->wait();
   61.25 -        }
   61.26 +bool AbstractCompiler::should_perform_init() {
   61.27 +  if (_compiler_state != initialized) {
   61.28 +    MutexLocker only_one(CompileThread_lock);
   61.29 +
   61.30 +    if (_compiler_state == uninitialized) {
   61.31 +      _compiler_state = initializing;
   61.32 +      return true;
   61.33 +    } else {
   61.34 +      while (_compiler_state == initializing) {
   61.35 +        CompileThread_lock->wait();
   61.36        }
   61.37      }
   61.38 -    if (do_initialization) {
   61.39 -      // We can not hold any locks here since JVMTI events may call agents
   61.40 +  }
   61.41 +  return false;
   61.42 +}
   61.43  
   61.44 -      // Compiler(s) run as native
   61.45 +bool AbstractCompiler::should_perform_shutdown() {
   61.46 +  // Since this method can be called by multiple threads, the lock ensures atomicity of
   61.47 +  // decrementing '_num_compiler_threads' and the following operations.
   61.48 +  MutexLocker only_one(CompileThread_lock);
   61.49 +  _num_compiler_threads--;
   61.50 +  assert (CompileBroker::is_compilation_disabled_forever(), "Must be set, otherwise thread waits forever");
   61.51  
   61.52 -      (*f)();
   61.53 +  // Only the last thread will perform shutdown operations
   61.54 +  if (_num_compiler_threads == 0) {
   61.55 +    return true;
   61.56 +  }
   61.57 +  return false;
   61.58 +}
   61.59  
   61.60 -      // To in_vm so we can use the lock
   61.61 -
   61.62 -      ThreadInVMfromNative tv(thread);
   61.63 -      ResetNoHandleMark rnhm;
   61.64 -      MutexLocker only_one(CompileThread_lock, thread);
   61.65 -      assert(*state == initializing, "wrong state");
   61.66 -      *state = initialized;
   61.67 -      CompileThread_lock->notify_all();
   61.68 -    }
   61.69 -  }
   61.70 +void AbstractCompiler::set_state(int state) {
   61.71 +  // Ensure that ste is only set by one thread at a time
   61.72 +  MutexLocker only_one(CompileThread_lock);
   61.73 +  _compiler_state =  state;
   61.74 +  CompileThread_lock->notify_all();
   61.75  }
    62.1 --- a/src/share/vm/compiler/abstractCompiler.hpp	Fri Oct 18 10:37:26 2013 +0000
    62.2 +++ b/src/share/vm/compiler/abstractCompiler.hpp	Fri Oct 18 19:44:40 2013 -0700
    62.3 @@ -27,22 +27,25 @@
    62.4  
    62.5  #include "ci/compilerInterface.hpp"
    62.6  
    62.7 -typedef void (*initializer)(void);
    62.8 -
    62.9  class AbstractCompiler : public CHeapObj<mtCompiler> {
   62.10   private:
   62.11 -  bool _is_initialized; // Mark whether compiler object is initialized
   62.12 +  volatile int _num_compiler_threads;
   62.13  
   62.14   protected:
   62.15 +  volatile int _compiler_state;
   62.16    // Used for tracking global state of compiler runtime initialization
   62.17 -  enum { uninitialized, initializing, initialized };
   62.18 +  enum { uninitialized, initializing, initialized, failed, shut_down };
   62.19  
   62.20 -  // This method will call the initialization method "f" once (per compiler class/subclass)
   62.21 -  // and do so without holding any locks
   62.22 -  void initialize_runtimes(initializer f, volatile int* state);
   62.23 +  // This method returns true for the first compiler thread that reaches that methods.
   62.24 +  // This thread will initialize the compiler runtime.
   62.25 +  bool should_perform_init();
   62.26  
   62.27   public:
   62.28 -  AbstractCompiler() : _is_initialized(false)    {}
   62.29 +  AbstractCompiler() : _compiler_state(uninitialized), _num_compiler_threads(0) {}
   62.30 +
   62.31 +  // This function determines the compiler thread that will perform the
   62.32 +  // shutdown of the corresponding compiler runtime.
   62.33 +  bool should_perform_shutdown();
   62.34  
   62.35    // Name of this compiler
   62.36    virtual const char* name() = 0;
   62.37 @@ -74,17 +77,18 @@
   62.38  #endif // TIERED
   62.39  
   62.40    // Customization
   62.41 -  virtual bool needs_stubs            ()         = 0;
   62.42 +  virtual void initialize () = 0;
   62.43  
   62.44 -  void mark_initialized()                        { _is_initialized = true; }
   62.45 -  bool is_initialized()                          { return _is_initialized; }
   62.46 +  void set_num_compiler_threads(int num) { _num_compiler_threads = num;  }
   62.47 +  int num_compiler_threads()             { return _num_compiler_threads; }
   62.48  
   62.49 -  virtual void initialize()                      = 0;
   62.50 -
   62.51 +  // Get/set state of compiler objects
   62.52 +  bool is_initialized()           { return _compiler_state == initialized; }
   62.53 +  bool is_failed     ()           { return _compiler_state == failed;}
   62.54 +  void set_state     (int state);
   62.55 +  void set_shut_down ()           { set_state(shut_down); }
   62.56    // Compilation entry point for methods
   62.57 -  virtual void compile_method(ciEnv* env,
   62.58 -                              ciMethod* target,
   62.59 -                              int entry_bci) {
   62.60 +  virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
   62.61      ShouldNotReachHere();
   62.62    }
   62.63  
    63.1 --- a/src/share/vm/compiler/compileBroker.cpp	Fri Oct 18 10:37:26 2013 +0000
    63.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Fri Oct 18 19:44:40 2013 -0700
    63.3 @@ -186,7 +186,7 @@
    63.4  CompileQueue* CompileBroker::_c1_method_queue    = NULL;
    63.5  CompileTask*  CompileBroker::_task_free_list     = NULL;
    63.6  
    63.7 -GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
    63.8 +GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
    63.9  
   63.10  
   63.11  class CompilationLog : public StringEventLog {
   63.12 @@ -587,9 +587,6 @@
   63.13  
   63.14  
   63.15  
   63.16 -// ------------------------------------------------------------------
   63.17 -// CompileQueue::add
   63.18 -//
   63.19  // Add a CompileTask to a CompileQueue
   63.20  void CompileQueue::add(CompileTask* task) {
   63.21    assert(lock()->owned_by_self(), "must own lock");
   63.22 @@ -626,6 +623,16 @@
   63.23    lock()->notify_all();
   63.24  }
   63.25  
   63.26 +void CompileQueue::delete_all() {
   63.27 +  assert(lock()->owned_by_self(), "must own lock");
   63.28 +  if (_first != NULL) {
   63.29 +    for (CompileTask* task = _first; task != NULL; task = task->next()) {
   63.30 +      delete task;
   63.31 +    }
   63.32 +    _first = NULL;
   63.33 +  }
   63.34 +}
   63.35 +
   63.36  // ------------------------------------------------------------------
   63.37  // CompileQueue::get
   63.38  //
   63.39 @@ -640,6 +647,11 @@
   63.40    // case we perform code cache sweeps to free memory such that we can re-enable
   63.41    // compilation.
   63.42    while (_first == NULL) {
   63.43 +    // Exit loop if compilation is disabled forever
   63.44 +    if (CompileBroker::is_compilation_disabled_forever()) {
   63.45 +      return NULL;
   63.46 +    }
   63.47 +
   63.48      if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
   63.49        // Wait a certain amount of time to possibly do another sweep.
   63.50        // We must wait until stack scanning has happened so that we can
   63.51 @@ -664,9 +676,17 @@
   63.52        // remains unchanged. This behavior is desired, since we want to keep
   63.53        // the stable state, i.e., we do not want to evict methods from the
   63.54        // code cache if it is unnecessary.
   63.55 -      lock()->wait();
   63.56 +      // We need a timed wait here, since compiler threads can exit if compilation
   63.57 +      // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
   63.58 +      // is not critical and we do not want idle compiler threads to wake up too often.
   63.59 +      lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
   63.60      }
   63.61    }
   63.62 +
   63.63 +  if (CompileBroker::is_compilation_disabled_forever()) {
   63.64 +    return NULL;
   63.65 +  }
   63.66 +
   63.67    CompileTask* task = CompilationPolicy::policy()->select_task(this);
   63.68    remove(task);
   63.69    return task;
   63.70 @@ -891,10 +911,8 @@
   63.71  }
   63.72  
   63.73  
   63.74 -
   63.75 -// ------------------------------------------------------------------
   63.76 -// CompileBroker::make_compiler_thread
   63.77 -CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS) {
   63.78 +CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
   63.79 +                                                    AbstractCompiler* comp, TRAPS) {
   63.80    CompilerThread* compiler_thread = NULL;
   63.81  
   63.82    Klass* k =
   63.83 @@ -961,6 +979,7 @@
   63.84      java_lang_Thread::set_daemon(thread_oop());
   63.85  
   63.86      compiler_thread->set_threadObj(thread_oop());
   63.87 +    compiler_thread->set_compiler(comp);
   63.88      Threads::add(compiler_thread);
   63.89      Thread::start(compiler_thread);
   63.90    }
   63.91 @@ -972,25 +991,24 @@
   63.92  }
   63.93  
   63.94  
   63.95 -// ------------------------------------------------------------------
   63.96 -// CompileBroker::init_compiler_threads
   63.97 -//
   63.98 -// Initialize the compilation queue
   63.99  void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
  63.100    EXCEPTION_MARK;
  63.101  #if !defined(ZERO) && !defined(SHARK)
  63.102    assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
  63.103  #endif // !ZERO && !SHARK
  63.104 +  // Initialize the compilation queue
  63.105    if (c2_compiler_count > 0) {
  63.106      _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
  63.107 +    _compilers[1]->set_num_compiler_threads(c2_compiler_count);
  63.108    }
  63.109    if (c1_compiler_count > 0) {
  63.110      _c1_method_queue  = new CompileQueue("C1MethodQueue",  MethodCompileQueue_lock);
  63.111 +    _compilers[0]->set_num_compiler_threads(c1_compiler_count);
  63.112    }
  63.113  
  63.114    int compiler_count = c1_compiler_count + c2_compiler_count;
  63.115  
  63.116 -  _method_threads =
  63.117 +  _compiler_threads =
  63.118      new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
  63.119  
  63.120    char name_buffer[256];
  63.121 @@ -998,21 +1016,22 @@
  63.122      // Create a name for our thread.
  63.123      sprintf(name_buffer, "C2 CompilerThread%d", i);
  63.124      CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
  63.125 -    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK);
  63.126 -    _method_threads->append(new_thread);
  63.127 +    // Shark and C2
  63.128 +    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK);
  63.129 +    _compiler_threads->append(new_thread);
  63.130    }
  63.131  
  63.132    for (int i = c2_compiler_count; i < compiler_count; i++) {
  63.133      // Create a name for our thread.
  63.134      sprintf(name_buffer, "C1 CompilerThread%d", i);
  63.135      CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
  63.136 -    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK);
  63.137 -    _method_threads->append(new_thread);
  63.138 +    // C1
  63.139 +    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK);
  63.140 +    _compiler_threads->append(new_thread);
  63.141    }
  63.142  
  63.143    if (UsePerfData) {
  63.144 -    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes,
  63.145 -                                     compiler_count, CHECK);
  63.146 +    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
  63.147    }
  63.148  }
  63.149  
  63.150 @@ -1029,27 +1048,6 @@
  63.151  }
  63.152  
  63.153  // ------------------------------------------------------------------
  63.154 -// CompileBroker::is_idle
  63.155 -bool CompileBroker::is_idle() {
  63.156 -  if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) {
  63.157 -    return false;
  63.158 -  } else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) {
  63.159 -    return false;
  63.160 -  } else {
  63.161 -    int num_threads = _method_threads->length();
  63.162 -    for (int i=0; i<num_threads; i++) {
  63.163 -      if (_method_threads->at(i)->task() != NULL) {
  63.164 -        return false;
  63.165 -      }
  63.166 -    }
  63.167 -
  63.168 -    // No pending or active compilations.
  63.169 -    return true;
  63.170 -  }
  63.171 -}
  63.172 -
  63.173 -
  63.174 -// ------------------------------------------------------------------
  63.175  // CompileBroker::compile_method
  63.176  //
  63.177  // Request compilation of a method.
  63.178 @@ -1551,6 +1549,101 @@
  63.179    free_task(task);
  63.180  }
  63.181  
  63.182 +// Initialize compiler thread(s) + compiler object(s). The postcondition
  63.183 +// of this function is that the compiler runtimes are initialized and that
  63.184 +//compiler threads can start compiling.
  63.185 +bool CompileBroker::init_compiler_runtime() {
  63.186 +  CompilerThread* thread = CompilerThread::current();
  63.187 +  AbstractCompiler* comp = thread->compiler();
  63.188 +  // Final sanity check - the compiler object must exist
  63.189 +  guarantee(comp != NULL, "Compiler object must exist");
  63.190 +
  63.191 +  int system_dictionary_modification_counter;
  63.192 +  {
  63.193 +    MutexLocker locker(Compile_lock, thread);
  63.194 +    system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
  63.195 +  }
  63.196 +
  63.197 +  {
  63.198 +    // Must switch to native to allocate ci_env
  63.199 +    ThreadToNativeFromVM ttn(thread);
  63.200 +    ciEnv ci_env(NULL, system_dictionary_modification_counter);
  63.201 +    // Cache Jvmti state
  63.202 +    ci_env.cache_jvmti_state();
  63.203 +    // Cache DTrace flags
  63.204 +    ci_env.cache_dtrace_flags();
  63.205 +
  63.206 +    // Switch back to VM state to do compiler initialization
  63.207 +    ThreadInVMfromNative tv(thread);
  63.208 +    ResetNoHandleMark rnhm;
  63.209 +
  63.210 +
  63.211 +    if (!comp->is_shark()) {
  63.212 +      // Perform per-thread and global initializations
  63.213 +      comp->initialize();
  63.214 +    }
  63.215 +  }
  63.216 +
  63.217 +  if (comp->is_failed()) {
  63.218 +    disable_compilation_forever();
  63.219 +    // If compiler initialization failed, no compiler thread that is specific to a
  63.220 +    // particular compiler runtime will ever start to compile methods.
  63.221 +
  63.222 +    shutdown_compiler_runtime(comp, thread);
  63.223 +    return false;
  63.224 +  }
  63.225 +
  63.226 +  // C1 specific check
  63.227 +  if (comp->is_c1() && (thread->get_buffer_blob() == NULL)) {
  63.228 +    warning("Initialization of %s thread failed (no space to run compilers)", thread->name());
  63.229 +    return false;
  63.230 +  }
  63.231 +
  63.232 +  return true;
  63.233 +}
  63.234 +
  63.235 +// If C1 and/or C2 initialization failed, we shut down all compilation.
  63.236 +// We do this to keep things simple. This can be changed if it ever turns out to be
  63.237 +// a problem.
  63.238 +void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
  63.239 +  // Free buffer blob, if allocated
  63.240 +  if (thread->get_buffer_blob() != NULL) {
  63.241 +    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  63.242 +    CodeCache::free(thread->get_buffer_blob());
  63.243 +  }
  63.244 +
  63.245 +  if (comp->should_perform_shutdown()) {
  63.246 +    // There are two reasons for shutting down the compiler
  63.247 +    // 1) compiler runtime initialization failed
  63.248 +    // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
  63.249 +    warning("Shutting down compiler %s (no space to run compilers)", comp->name());
  63.250 +
  63.251 +    // Only one thread per compiler runtime object enters here
  63.252 +    // Set state to shut down
  63.253 +    comp->set_shut_down();
  63.254 +
  63.255 +    MutexLocker mu(MethodCompileQueue_lock, thread);
  63.256 +    CompileQueue* queue;
  63.257 +    if (_c1_method_queue != NULL) {
  63.258 +      _c1_method_queue->delete_all();
  63.259 +      queue = _c1_method_queue;
  63.260 +      _c1_method_queue = NULL;
  63.261 +      delete _c1_method_queue;
  63.262 +    }
  63.263 +
  63.264 +    if (_c2_method_queue != NULL) {
  63.265 +      _c2_method_queue->delete_all();
  63.266 +      queue = _c2_method_queue;
  63.267 +      _c2_method_queue = NULL;
  63.268 +      delete _c2_method_queue;
  63.269 +    }
  63.270 +
  63.271 +    // We could delete compiler runtimes also. However, there are references to
  63.272 +    // the compiler runtime(s) (e.g.,  nmethod::is_compiled_by_c1()) which then
  63.273 +    // fail. This can be done later if necessary.
  63.274 +  }
  63.275 +}
  63.276 +
  63.277  // ------------------------------------------------------------------
  63.278  // CompileBroker::compiler_thread_loop
  63.279  //
  63.280 @@ -1558,7 +1651,6 @@
  63.281  void CompileBroker::compiler_thread_loop() {
  63.282    CompilerThread* thread = CompilerThread::current();
  63.283    CompileQueue* queue = thread->queue();
  63.284 -
  63.285    // For the thread that initializes the ciObjectFactory
  63.286    // this resource mark holds all the shared objects
  63.287    ResourceMark rm;
  63.288 @@ -1587,65 +1679,78 @@
  63.289      log->end_elem();
  63.290    }
  63.291  
  63.292 -  while (true) {
  63.293 -    {
  63.294 -      // We need this HandleMark to avoid leaking VM handles.
  63.295 -      HandleMark hm(thread);
  63.296 +  // If compiler thread/runtime initialization fails, exit the compiler thread
  63.297 +  if (!init_compiler_runtime()) {
  63.298 +    return;
  63.299 +  }
  63.300  
  63.301 -      if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
  63.302 -        // the code cache is really full
  63.303 -        handle_full_code_cache();
  63.304 -      }
  63.305 +  // Poll for new compilation tasks as long as the JVM runs. Compilation
  63.306 +  // should only be disabled if something went wrong while initializing the
  63.307 +  // compiler runtimes. This, in turn, should not happen. The only known case
  63.308 +  // when compiler runtime initialization fails is if there is not enough free
  63.309 +  // space in the code cache to generate the necessary stubs, etc.
  63.310 +  while (!is_compilation_disabled_forever()) {
  63.311 +    // We need this HandleMark to avoid leaking VM handles.
  63.312 +    HandleMark hm(thread);
  63.313  
  63.314 -      CompileTask* task = queue->get();
  63.315 +    if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
  63.316 +      // the code cache is really full
  63.317 +      handle_full_code_cache();
  63.318 +    }
  63.319  
  63.320 -      // Give compiler threads an extra quanta.  They tend to be bursty and
  63.321 -      // this helps the compiler to finish up the job.
  63.322 -      if( CompilerThreadHintNoPreempt )
  63.323 -        os::hint_no_preempt();
  63.324 +    CompileTask* task = queue->get();
  63.325 +    if (task == NULL) {
  63.326 +      continue;
  63.327 +    }
  63.328  
  63.329 -      // trace per thread time and compile statistics
  63.330 -      CompilerCounters* counters = ((CompilerThread*)thread)->counters();
  63.331 -      PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
  63.332 +    // Give compiler threads an extra quanta.  They tend to be bursty and
  63.333 +    // this helps the compiler to finish up the job.
  63.334 +    if( CompilerThreadHintNoPreempt )
  63.335 +      os::hint_no_preempt();
  63.336  
  63.337 -      // Assign the task to the current thread.  Mark this compilation
  63.338 -      // thread as active for the profiler.
  63.339 -      CompileTaskWrapper ctw(task);
  63.340 -      nmethodLocker result_handle;  // (handle for the nmethod produced by this task)
  63.341 -      task->set_code_handle(&result_handle);
  63.342 -      methodHandle method(thread, task->method());
  63.343 +    // trace per thread time and compile statistics
  63.344 +    CompilerCounters* counters = ((CompilerThread*)thread)->counters();
  63.345 +    PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
  63.346  
  63.347 -      // Never compile a method if breakpoints are present in it
  63.348 -      if (method()->number_of_breakpoints() == 0) {
  63.349 -        // Compile the method.
  63.350 -        if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
  63.351 +    // Assign the task to the current thread.  Mark this compilation
  63.352 +    // thread as active for the profiler.
  63.353 +    CompileTaskWrapper ctw(task);
  63.354 +    nmethodLocker result_handle;  // (handle for the nmethod produced by this task)
  63.355 +    task->set_code_handle(&result_handle);
  63.356 +    methodHandle method(thread, task->method());
  63.357 +
  63.358 +    // Never compile a method if breakpoints are present in it
  63.359 +    if (method()->number_of_breakpoints() == 0) {
  63.360 +      // Compile the method.
  63.361 +      if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
  63.362  #ifdef COMPILER1
  63.363 -          // Allow repeating compilations for the purpose of benchmarking
  63.364 -          // compile speed. This is not useful for customers.
  63.365 -          if (CompilationRepeat != 0) {
  63.366 -            int compile_count = CompilationRepeat;
  63.367 -            while (compile_count > 0) {
  63.368 -              invoke_compiler_on_method(task);
  63.369 -              nmethod* nm = method->code();
  63.370 -              if (nm != NULL) {
  63.371 -                nm->make_zombie();
  63.372 -                method->clear_code();
  63.373 -              }
  63.374 -              compile_count--;
  63.375 +        // Allow repeating compilations for the purpose of benchmarking
  63.376 +        // compile speed. This is not useful for customers.
  63.377 +        if (CompilationRepeat != 0) {
  63.378 +          int compile_count = CompilationRepeat;
  63.379 +          while (compile_count > 0) {
  63.380 +            invoke_compiler_on_method(task);
  63.381 +            nmethod* nm = method->code();
  63.382 +            if (nm != NULL) {
  63.383 +              nm->make_zombie();
  63.384 +              method->clear_code();
  63.385              }
  63.386 +            compile_count--;
  63.387            }
  63.388 +        }
  63.389  #endif /* COMPILER1 */
  63.390 -          invoke_compiler_on_method(task);
  63.391 -        } else {
  63.392 -          // After compilation is disabled, remove remaining methods from queue
  63.393 -          method->clear_queued_for_compilation();
  63.394 -        }
  63.395 +        invoke_compiler_on_method(task);
  63.396 +      } else {
  63.397 +        // After compilation is disabled, remove remaining methods from queue
  63.398 +        method->clear_queued_for_compilation();
  63.399        }
  63.400      }
  63.401    }
  63.402 +
  63.403 +  // Shut down compiler runtime
  63.404 +  shutdown_compiler_runtime(thread->compiler(), thread);
  63.405  }
  63.406  
  63.407 -
  63.408  // ------------------------------------------------------------------
  63.409  // CompileBroker::init_compiler_thread_log
  63.410  //
  63.411 @@ -1953,11 +2058,14 @@
  63.412        // Since code cache is full, immediately stop new compiles
  63.413        if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
  63.414          NMethodSweeper::log_sweep("disable_compiler");
  63.415 +
  63.416 +        // Switch to 'vm_state'. This ensures that possibly_sweep() can be called
  63.417 +        // without having to consider the state in which the current thread is.
  63.418 +        ThreadInVMfromUnknown in_vm;
  63.419          NMethodSweeper::possibly_sweep();
  63.420        }
  63.421      } else {
  63.422 -      UseCompiler               = false;
  63.423 -      AlwaysCompileLoopMethods  = false;
  63.424 +      disable_compilation_forever();
  63.425      }
  63.426    }
  63.427    codecache_print(/* detailed= */ true);
    64.1 --- a/src/share/vm/compiler/compileBroker.hpp	Fri Oct 18 10:37:26 2013 +0000
    64.2 +++ b/src/share/vm/compiler/compileBroker.hpp	Fri Oct 18 19:44:40 2013 -0700
    64.3 @@ -213,8 +213,12 @@
    64.4  
    64.5    // Redefine Classes support
    64.6    void mark_on_stack();
    64.7 +  void delete_all();
    64.8 +  void         print();
    64.9  
   64.10 -  void         print();
   64.11 +  ~CompileQueue() {
   64.12 +    assert (is_empty(), " Compile Queue must be empty");
   64.13 +  }
   64.14  };
   64.15  
   64.16  // CompileTaskWrapper
   64.17 @@ -266,7 +270,7 @@
   64.18    static CompileQueue* _c1_method_queue;
   64.19    static CompileTask* _task_free_list;
   64.20  
   64.21 -  static GrowableArray<CompilerThread*>* _method_threads;
   64.22 +  static GrowableArray<CompilerThread*>* _compiler_threads;
   64.23  
   64.24    // performance counters
   64.25    static PerfCounter* _perf_total_compilation;
   64.26 @@ -311,7 +315,7 @@
   64.27    static int _sum_nmethod_code_size;
   64.28    static long _peak_compilation_time;
   64.29  
   64.30 -  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
   64.31 +  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
   64.32    static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   64.33    static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
   64.34    static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
   64.35 @@ -351,6 +355,9 @@
   64.36      if (is_c1_compile(comp_level)) return _c1_method_queue;
   64.37      return NULL;
   64.38    }
   64.39 +  static bool init_compiler_runtime();
   64.40 +  static void shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread);
   64.41 +
   64.42   public:
   64.43    enum {
   64.44      // The entry bci used for non-OSR compilations.
   64.45 @@ -378,9 +385,7 @@
   64.46                                   const char* comment, Thread* thread);
   64.47  
   64.48    static void compiler_thread_loop();
   64.49 -
   64.50    static uint get_compilation_id() { return _compilation_id; }
   64.51 -  static bool is_idle();
   64.52  
   64.53    // Set _should_block.
   64.54    // Call this from the VM, with Threads_lock held and a safepoint requested.
   64.55 @@ -391,8 +396,9 @@
   64.56  
   64.57    enum {
   64.58      // Flags for toggling compiler activity
   64.59 -    stop_compilation = 0,
   64.60 -    run_compilation  = 1
   64.61 +    stop_compilation    = 0,
   64.62 +    run_compilation     = 1,
   64.63 +    shutdown_compilaton = 2
   64.64    };
   64.65  
   64.66    static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
   64.67 @@ -401,6 +407,16 @@
   64.68      jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
   64.69      return (old == (1-new_state));
   64.70    }
   64.71 +
   64.72 +  static void disable_compilation_forever() {
   64.73 +    UseCompiler               = false;
   64.74 +    AlwaysCompileLoopMethods  = false;
   64.75 +    Atomic::xchg(shutdown_compilaton, &_should_compile_new_jobs);
   64.76 +  }
   64.77 +
   64.78 +  static bool is_compilation_disabled_forever() {
   64.79 +    return _should_compile_new_jobs == shutdown_compilaton;
   64.80 +  }
   64.81    static void handle_full_code_cache();
   64.82  
   64.83    // Return total compilation ticks
    65.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Oct 18 10:37:26 2013 +0000
    65.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Oct 18 19:44:40 2013 -0700
    65.3 @@ -344,6 +344,10 @@
    65.4      }
    65.5    }
    65.6  
    65.7 +  if (FLAG_IS_CMDLINE(NewSize) && FLAG_IS_CMDLINE(MaxNewSize) && NewSize > MaxNewSize) {
    65.8 +    vm_exit_during_initialization("Initial young gen size set larger than the maximum young gen size");
    65.9 +  }
   65.10 +
   65.11    if (FLAG_IS_CMDLINE(NewSize)) {
   65.12      _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
   65.13                                       1U);
    66.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Oct 18 10:37:26 2013 +0000
    66.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Oct 18 19:44:40 2013 -0700
    66.3 @@ -211,7 +211,7 @@
    66.4    // a GC that freed space for the allocation.
    66.5    if (!MetadataAllocationFailALot) {
    66.6      _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
    66.7 -    }
    66.8 +  }
    66.9  
   66.10    if (_result == NULL) {
   66.11      if (UseConcMarkSweepGC) {
   66.12 @@ -223,9 +223,7 @@
   66.13          _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
   66.14      }
   66.15      if (_result == NULL) {
   66.16 -      // Don't clear the soft refs.  This GC is for reclaiming metadata
   66.17 -      // and is unrelated to the fullness of the Java heap which should
   66.18 -      // be the criteria for clearing SoftReferences.
   66.19 +      // Don't clear the soft refs yet.
   66.20        if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
   66.21          gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
   66.22        }
   66.23 @@ -235,7 +233,7 @@
   66.24        _result =
   66.25          _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   66.26      }
   66.27 -    if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) {
   66.28 +    if (_result == NULL) {
   66.29        // If still failing, allow the Metaspace to expand.
   66.30        // See delta_capacity_until_GC() for explanation of the
   66.31        // amount of the expansion.
   66.32 @@ -243,7 +241,16 @@
   66.33        // or a MaxMetaspaceSize has been specified on the command line.
   66.34        _result =
   66.35          _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
   66.36 -
   66.37 +      if (_result == NULL) {
   66.38 +        // If expansion failed, do a last-ditch collection and try allocating
   66.39 +        // again.  A last-ditch collection will clear softrefs.  This
   66.40 +        // behavior is similar to the last-ditch collection done for perm
   66.41 +        // gen when it was full and a collection for failed allocation
   66.42 +        // did not free perm gen space.
   66.43 +        heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
   66.44 +        _result =
   66.45 +          _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
   66.46 +      }
   66.47      }
   66.48      if (Verbose && PrintGCDetails && _result == NULL) {
   66.49        gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
    67.1 --- a/src/share/vm/interpreter/linkResolver.cpp	Fri Oct 18 10:37:26 2013 +0000
    67.2 +++ b/src/share/vm/interpreter/linkResolver.cpp	Fri Oct 18 19:44:40 2013 -0700
    67.3 @@ -1,5 +1,6 @@
    67.4  /*
    67.5   * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    67.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    67.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    67.8   *
    67.9   * This code is free software; you can redistribute it and/or modify it
   67.10 @@ -158,6 +159,22 @@
   67.11      index = vt->index_of_miranda(resolved_method->name(),
   67.12                                   resolved_method->signature());
   67.13      kind = CallInfo::vtable_call;
   67.14 +  } else if (resolved_method->has_vtable_index()) {
   67.15 +    // Can occur if an interface redeclares a method of Object.
   67.16 +
   67.17 +#ifdef ASSERT
   67.18 +    // Ensure that this is really the case.
   67.19 +    KlassHandle object_klass = SystemDictionary::Object_klass();
   67.20 +    Method * object_resolved_method = object_klass()->vtable()->method_at(index);
   67.21 +    assert(object_resolved_method->name() == resolved_method->name(),
   67.22 +      err_msg("Object and interface method names should match at vtable index %d, %s != %s",
   67.23 +      index, object_resolved_method->name()->as_C_string(), resolved_method->name()->as_C_string()));
   67.24 +    assert(object_resolved_method->signature() == resolved_method->signature(),
   67.25 +      err_msg("Object and interface method signatures should match at vtable index %d, %s != %s",
   67.26 +      index, object_resolved_method->signature()->as_C_string(), resolved_method->signature()->as_C_string()));
   67.27 +#endif // ASSERT
   67.28 +
   67.29 +    kind = CallInfo::vtable_call;
   67.30    } else {
   67.31      // A regular interface call.
   67.32      kind = CallInfo::itable_call;
    68.1 --- a/src/share/vm/memory/binaryTreeDictionary.cpp	Fri Oct 18 10:37:26 2013 +0000
    68.2 +++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Fri Oct 18 19:44:40 2013 -0700
    68.3 @@ -28,7 +28,6 @@
    68.4  #include "memory/binaryTreeDictionary.hpp"
    68.5  #include "memory/freeList.hpp"
    68.6  #include "memory/freeBlockDictionary.hpp"
    68.7 -#include "memory/metablock.hpp"
    68.8  #include "memory/metachunk.hpp"
    68.9  #include "runtime/globals.hpp"
   68.10  #include "utilities/ostream.hpp"
    69.1 --- a/src/share/vm/memory/freeBlockDictionary.cpp	Fri Oct 18 10:37:26 2013 +0000
    69.2 +++ b/src/share/vm/memory/freeBlockDictionary.cpp	Fri Oct 18 19:44:40 2013 -0700
    69.3 @@ -28,7 +28,6 @@
    69.4  #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
    69.5  #endif // INCLUDE_ALL_GCS
    69.6  #include "memory/freeBlockDictionary.hpp"
    69.7 -#include "memory/metablock.hpp"
    69.8  #include "memory/metachunk.hpp"
    69.9  #include "runtime/thread.inline.hpp"
   69.10  #include "utilities/macros.hpp"
    70.1 --- a/src/share/vm/memory/freeList.cpp	Fri Oct 18 10:37:26 2013 +0000
    70.2 +++ b/src/share/vm/memory/freeList.cpp	Fri Oct 18 19:44:40 2013 -0700
    70.3 @@ -25,7 +25,6 @@
    70.4  #include "precompiled.hpp"
    70.5  #include "memory/freeBlockDictionary.hpp"
    70.6  #include "memory/freeList.hpp"
    70.7 -#include "memory/metablock.hpp"
    70.8  #include "memory/metachunk.hpp"
    70.9  #include "memory/sharedHeap.hpp"
   70.10  #include "runtime/globals.hpp"
    71.1 --- a/src/share/vm/memory/metablock.cpp	Fri Oct 18 10:37:26 2013 +0000
    71.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    71.3 @@ -1,68 +0,0 @@
    71.4 -/*
    71.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    71.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    71.7 - *
    71.8 - * This code is free software; you can redistribute it and/or modify it
    71.9 - * under the terms of the GNU General Public License version 2 only, as
   71.10 - * published by the Free Software Foundation.
   71.11 - *
   71.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   71.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   71.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   71.15 - * version 2 for more details (a copy is included in the LICENSE file that
   71.16 - * accompanied this code).
   71.17 - *
   71.18 - * You should have received a copy of the GNU General Public License version
   71.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   71.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   71.21 - *
   71.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   71.23 - * or visit www.oracle.com if you need additional information or have any
   71.24 - * questions.
   71.25 - *
   71.26 - */
   71.27 -
   71.28 -#include "precompiled.hpp"
   71.29 -#include "memory/allocation.hpp"
   71.30 -#include "memory/metablock.hpp"
   71.31 -#include "utilities/copy.hpp"
   71.32 -#include "utilities/debug.hpp"
   71.33 -
   71.34 -// Blocks of space for metadata are allocated out of Metachunks.
   71.35 -//
   71.36 -// Metachunk are allocated out of MetadataVirtualspaces and once
   71.37 -// allocated there is no explicit link between a Metachunk and
   71.38 -// the MetadataVirtualspaces from which it was allocated.
   71.39 -//
   71.40 -// Each SpaceManager maintains a
   71.41 -// list of the chunks it is using and the current chunk.  The current
   71.42 -// chunk is the chunk from which allocations are done.  Space freed in
   71.43 -// a chunk is placed on the free list of blocks (BlockFreelist) and
   71.44 -// reused from there.
   71.45 -//
   71.46 -// Future modification
   71.47 -//
   71.48 -// The Metachunk can conceivable be replaced by the Chunk in
   71.49 -// allocation.hpp.  Note that the latter Chunk is the space for
   71.50 -// allocation (allocations from the chunk are out of the space in
   71.51 -// the Chunk after the header for the Chunk) where as Metachunks
   71.52 -// point to space in a VirtualSpace.  To replace Metachunks with
   71.53 -// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
   71.54 -size_t Metablock::_min_block_byte_size = sizeof(Metablock);
   71.55 -
   71.56 -// New blocks returned by the Metaspace are zero initialized.
   71.57 -// We should fix the constructors to not assume this instead.
   71.58 -Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
   71.59 -  if (p == NULL) {
   71.60 -    return NULL;
   71.61 -  }
   71.62 -
   71.63 -  Metablock* result = (Metablock*) p;
   71.64 -
   71.65 -  // Clear the memory
   71.66 -  Copy::fill_to_aligned_words((HeapWord*)result, word_size);
   71.67 -#ifdef ASSERT
   71.68 -  result->set_word_size(word_size);
   71.69 -#endif
   71.70 -  return result;
   71.71 -}
    72.1 --- a/src/share/vm/memory/metablock.hpp	Fri Oct 18 10:37:26 2013 +0000
    72.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    72.3 @@ -1,101 +0,0 @@
    72.4 -/*
    72.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    72.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    72.7 - *
    72.8 - * This code is free software; you can redistribute it and/or modify it
    72.9 - * under the terms of the GNU General Public License version 2 only, as
   72.10 - * published by the Free Software Foundation.
   72.11 - *
   72.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   72.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   72.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   72.15 - * version 2 for more details (a copy is included in the LICENSE file that
   72.16 - * accompanied this code).
   72.17 - *
   72.18 - * You should have received a copy of the GNU General Public License version
   72.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   72.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   72.21 - *
   72.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   72.23 - * or visit www.oracle.com if you need additional information or have any
   72.24 - * questions.
   72.25 - *
   72.26 - */
   72.27 -#ifndef SHARE_VM_MEMORY_METABLOCK_HPP
   72.28 -#define SHARE_VM_MEMORY_METABLOCK_HPP
   72.29 -
   72.30 -// Metablock are the unit of allocation from a Chunk.  It is initialized
   72.31 -// with the size of the requested allocation.  That size is overwritten
   72.32 -// once the allocation returns.
   72.33 -//
   72.34 -// A Metablock may be reused by its SpaceManager but are never moved between
   72.35 -// SpaceManagers.  There is no explicit link to the Metachunk
   72.36 -// from which it was allocated.  Metablock may be deallocated and
   72.37 -// put on a freelist but the space is never freed, rather
   72.38 -// the Metachunk it is a part of will be deallocated when it's
   72.39 -// associated class loader is collected.
   72.40 -
   72.41 -class Metablock VALUE_OBJ_CLASS_SPEC {
   72.42 -  friend class VMStructs;
   72.43 - private:
   72.44 -  // Used to align the allocation (see below).
   72.45 -  union block_t {
   72.46 -    void* _data[3];
   72.47 -    struct header_t {
   72.48 -      size_t _word_size;
   72.49 -      Metablock* _next;
   72.50 -      Metablock* _prev;
   72.51 -    } _header;
   72.52 -  } _block;
   72.53 -  static size_t _min_block_byte_size;
   72.54 -
   72.55 -  typedef union block_t Block;
   72.56 -  typedef struct header_t Header;
   72.57 -  const Block* block() const { return &_block; }
   72.58 -  const Block::header_t* header() const { return &(block()->_header); }
   72.59 - public:
   72.60 -
   72.61 -  static Metablock* initialize(MetaWord* p, size_t word_size);
   72.62 -
   72.63 -  // This places the body of the block at a 2 word boundary
   72.64 -  // because every block starts on a 2 word boundary.  Work out
   72.65 -  // how to make the body on a 2 word boundary if the block
   72.66 -  // starts on a arbitrary boundary.  JJJ
   72.67 -
   72.68 -  size_t word_size() const  { return header()->_word_size; }
   72.69 -  void set_word_size(size_t v) { _block._header._word_size = v; }
   72.70 -  size_t size() const volatile { return _block._header._word_size; }
   72.71 -  void set_size(size_t v) { _block._header._word_size = v; }
   72.72 -  Metablock* next() const { return header()->_next; }
   72.73 -  void set_next(Metablock* v) { _block._header._next = v; }
   72.74 -  Metablock* prev() const { return header()->_prev; }
   72.75 -  void set_prev(Metablock* v) { _block._header._prev = v; }
   72.76 -
   72.77 -  static size_t min_block_byte_size() { return _min_block_byte_size; }
   72.78 -
   72.79 -  bool is_free()                 { return header()->_word_size != 0; }
   72.80 -  void clear_next()              { set_next(NULL); }
   72.81 -  void link_prev(Metablock* ptr) { set_prev(ptr); }
   72.82 -  uintptr_t* end()              { return ((uintptr_t*) this) + size(); }
   72.83 -  bool cantCoalesce() const     { return false; }
   72.84 -  void link_next(Metablock* ptr) { set_next(ptr); }
   72.85 -  void link_after(Metablock* ptr){
   72.86 -    link_next(ptr);
   72.87 -    if (ptr != NULL) ptr->link_prev(this);
   72.88 -  }
   72.89 -
   72.90 -  // Should not be needed in a free list of Metablocks
   72.91 -  void markNotFree()            { ShouldNotReachHere(); }
   72.92 -
   72.93 -  // Debug support
   72.94 -#ifdef ASSERT
   72.95 -  void* prev_addr() const { return (void*)&_block._header._prev; }
   72.96 -  void* next_addr() const { return (void*)&_block._header._next; }
   72.97 -  void* size_addr() const { return (void*)&_block._header._word_size; }
   72.98 -#endif
   72.99 -  bool verify_chunk_in_free_list(Metablock* tc) const { return true; }
  72.100 -  bool verify_par_locked() { return true; }
  72.101 -
  72.102 -  void assert_is_mangled() const {/* Don't check "\*/}
  72.103 -};
  72.104 -#endif // SHARE_VM_MEMORY_METABLOCK_HPP
    73.1 --- a/src/share/vm/memory/metachunk.cpp	Fri Oct 18 10:37:26 2013 +0000
    73.2 +++ b/src/share/vm/memory/metachunk.cpp	Fri Oct 18 19:44:40 2013 -0700
    73.3 @@ -1,5 +1,5 @@
    73.4  /*
    73.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    73.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    73.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.8   *
    73.9   * This code is free software; you can redistribute it and/or modify it
   73.10 @@ -29,42 +29,39 @@
   73.11  #include "utilities/debug.hpp"
   73.12  
   73.13  class VirtualSpaceNode;
   73.14 -//
   73.15 -// Future modification
   73.16 -//
   73.17 -// The Metachunk can conceivable be replaced by the Chunk in
   73.18 -// allocation.hpp.  Note that the latter Chunk is the space for
   73.19 -// allocation (allocations from the chunk are out of the space in
   73.20 -// the Chunk after the header for the Chunk) where as Metachunks
   73.21 -// point to space in a VirtualSpace.  To replace Metachunks with
   73.22 -// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
   73.23  
   73.24  const size_t metadata_chunk_initialize = 0xf7f7f7f7;
   73.25  
   73.26 -size_t Metachunk::_overhead =
   73.27 -  Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
   73.28 +size_t Metachunk::object_alignment() {
   73.29 +  // Must align pointers and sizes to 8,
   73.30 +  // so that 64 bit types get correctly aligned.
   73.31 +  const size_t alignment = 8;
   73.32 +
   73.33 +  // Make sure that the Klass alignment also agree.
   73.34 +  STATIC_ASSERT(alignment == (size_t)KlassAlignmentInBytes);
   73.35 +
   73.36 +  return alignment;
   73.37 +}
   73.38 +
   73.39 +size_t Metachunk::overhead() {
   73.40 +  return align_size_up(sizeof(Metachunk), object_alignment()) / BytesPerWord;
   73.41 +}
   73.42  
   73.43  // Metachunk methods
   73.44  
   73.45  Metachunk::Metachunk(size_t word_size,
   73.46 -                     VirtualSpaceNode* container) :
   73.47 -    _word_size(word_size),
   73.48 -    _bottom(NULL),
   73.49 -    _end(NULL),
   73.50 +                     VirtualSpaceNode* container)
   73.51 +    : Metabase<Metachunk>(word_size),
   73.52      _top(NULL),
   73.53 -    _next(NULL),
   73.54 -    _prev(NULL),
   73.55      _container(container)
   73.56  {
   73.57 -  _bottom = (MetaWord*)this;
   73.58 -  _top = (MetaWord*)this + _overhead;
   73.59 -  _end = (MetaWord*)this + word_size;
   73.60 +  _top = initial_top();
   73.61  #ifdef ASSERT
   73.62 -  set_is_free(false);
   73.63 +  set_is_tagged_free(false);
   73.64    size_t data_word_size = pointer_delta(end(),
   73.65 -                                        top(),
   73.66 +                                        _top,
   73.67                                          sizeof(MetaWord));
   73.68 -  Copy::fill_to_words((HeapWord*) top(),
   73.69 +  Copy::fill_to_words((HeapWord*)_top,
   73.70                        data_word_size,
   73.71                        metadata_chunk_initialize);
   73.72  #endif
   73.73 @@ -82,22 +79,18 @@
   73.74  
   73.75  // _bottom points to the start of the chunk including the overhead.
   73.76  size_t Metachunk::used_word_size() const {
   73.77 -  return pointer_delta(_top, _bottom, sizeof(MetaWord));
   73.78 +  return pointer_delta(_top, bottom(), sizeof(MetaWord));
   73.79  }
   73.80  
   73.81  size_t Metachunk::free_word_size() const {
   73.82 -  return pointer_delta(_end, _top, sizeof(MetaWord));
   73.83 -}
   73.84 -
   73.85 -size_t Metachunk::capacity_word_size() const {
   73.86 -  return pointer_delta(_end, _bottom, sizeof(MetaWord));
   73.87 +  return pointer_delta(end(), _top, sizeof(MetaWord));
   73.88  }
   73.89  
   73.90  void Metachunk::print_on(outputStream* st) const {
   73.91    st->print_cr("Metachunk:"
   73.92                 " bottom " PTR_FORMAT " top " PTR_FORMAT
   73.93                 " end " PTR_FORMAT " size " SIZE_FORMAT,
   73.94 -               bottom(), top(), end(), word_size());
   73.95 +               bottom(), _top, end(), word_size());
   73.96    if (Verbose) {
   73.97      st->print_cr("    used " SIZE_FORMAT " free " SIZE_FORMAT,
   73.98                   used_word_size(), free_word_size());
   73.99 @@ -109,8 +102,8 @@
  73.100    // Mangle the payload of the chunk and not the links that
  73.101    // maintain list of chunks.
  73.102    HeapWord* start = (HeapWord*)(bottom() + overhead());
  73.103 -  size_t word_size = capacity_word_size() - overhead();
  73.104 -  Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
  73.105 +  size_t size = word_size() - overhead();
  73.106 +  Copy::fill_to_words(start, size, metadata_chunk_initialize);
  73.107  }
  73.108  #endif // PRODUCT
  73.109  
  73.110 @@ -118,9 +111,68 @@
  73.111  #ifdef ASSERT
  73.112    // Cannot walk through the blocks unless the blocks have
  73.113    // headers with sizes.
  73.114 -  assert(_bottom <= _top &&
  73.115 -         _top <= _end,
  73.116 +  assert(bottom() <= _top &&
  73.117 +         _top <= (MetaWord*)end(),
  73.118           "Chunk has been smashed");
  73.119  #endif
  73.120    return;
  73.121  }
  73.122 +
  73.123 +/////////////// Unit tests ///////////////
  73.124 +
  73.125 +#ifndef PRODUCT
  73.126 +
  73.127 +class TestMetachunk {
  73.128 + public:
  73.129 +  static void test() {
  73.130 +    size_t size = 2 * 1024 * 1024;
  73.131 +    void* memory = malloc(size);
  73.132 +    assert(memory != NULL, "Failed to malloc 2MB");
  73.133 +
  73.134 +    Metachunk* metachunk = ::new (memory) Metachunk(size / BytesPerWord, NULL);
  73.135 +
  73.136 +    assert(metachunk->bottom() == (MetaWord*)metachunk, "assert");
  73.137 +    assert(metachunk->end() == (uintptr_t*)metachunk + metachunk->size(), "assert");
  73.138 +
  73.139 +    // Check sizes
  73.140 +    assert(metachunk->size() == metachunk->word_size(), "assert");
  73.141 +    assert(metachunk->word_size() == pointer_delta(metachunk->end(), metachunk->bottom(),
  73.142 +        sizeof(MetaWord*)), "assert");
  73.143 +
  73.144 +    // Check usage
  73.145 +    assert(metachunk->used_word_size() == metachunk->overhead(), "assert");
  73.146 +    assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
  73.147 +    assert(metachunk->top() == metachunk->initial_top(), "assert");
  73.148 +    assert(metachunk->is_empty(), "assert");
  73.149 +
  73.150 +    // Allocate
  73.151 +    size_t alloc_size = 64; // Words
  73.152 +    assert(is_size_aligned(alloc_size, Metachunk::object_alignment()), "assert");
  73.153 +
  73.154 +    MetaWord* mem = metachunk->allocate(alloc_size);
  73.155 +
  73.156 +    // Check post alloc
  73.157 +    assert(mem == metachunk->initial_top(), "assert");
  73.158 +    assert(mem + alloc_size == metachunk->top(), "assert");
  73.159 +    assert(metachunk->used_word_size() == metachunk->overhead() + alloc_size, "assert");
  73.160 +    assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
  73.161 +    assert(!metachunk->is_empty(), "assert");
  73.162 +
  73.163 +    // Clear chunk
  73.164 +    metachunk->reset_empty();
  73.165 +
  73.166 +    // Check post clear
  73.167 +    assert(metachunk->used_word_size() == metachunk->overhead(), "assert");
  73.168 +    assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
  73.169 +    assert(metachunk->top() == metachunk->initial_top(), "assert");
  73.170 +    assert(metachunk->is_empty(), "assert");
  73.171 +
  73.172 +    free(memory);
  73.173 +  }
  73.174 +};
  73.175 +
  73.176 +void TestMetachunk_test() {
  73.177 +  TestMetachunk::test();
  73.178 +}
  73.179 +
  73.180 +#endif
    74.1 --- a/src/share/vm/memory/metachunk.hpp	Fri Oct 18 10:37:26 2013 +0000
    74.2 +++ b/src/share/vm/memory/metachunk.hpp	Fri Oct 18 19:44:40 2013 -0700
    74.3 @@ -1,5 +1,5 @@
    74.4  /*
    74.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    74.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    74.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    74.8   *
    74.9   * This code is free software; you can redistribute it and/or modify it
   74.10 @@ -24,89 +24,44 @@
   74.11  #ifndef SHARE_VM_MEMORY_METACHUNK_HPP
   74.12  #define SHARE_VM_MEMORY_METACHUNK_HPP
   74.13  
   74.14 -//  Metachunk - Quantum of allocation from a Virtualspace
   74.15 -//    Metachunks are reused (when freed are put on a global freelist) and
   74.16 -//    have no permanent association to a SpaceManager.
   74.17 -
   74.18 -//            +--------------+ <- end
   74.19 -//            |              |          --+       ---+
   74.20 -//            |              |            | free     |
   74.21 -//            |              |            |          |
   74.22 -//            |              |            |          | capacity
   74.23 -//            |              |            |          |
   74.24 -//            |              | <- top   --+          |
   74.25 -//            |              |           ---+        |
   74.26 -//            |              |              | used   |
   74.27 -//            |              |              |        |
   74.28 -//            |              |              |        |
   74.29 -//            +--------------+ <- bottom ---+     ---+
   74.30 +#include "memory/allocation.hpp"
   74.31 +#include "utilities/debug.hpp"
   74.32 +#include "utilities/globalDefinitions.hpp"
   74.33  
   74.34  class VirtualSpaceNode;
   74.35  
   74.36 -class Metachunk VALUE_OBJ_CLASS_SPEC {
   74.37 -  // link to support lists of chunks
   74.38 -  Metachunk* _next;
   74.39 -  Metachunk* _prev;
   74.40 -  VirtualSpaceNode* _container;
   74.41 +// Super class of Metablock and Metachunk to allow them to
   74.42 +// be put on the FreeList and in the BinaryTreeDictionary.
   74.43 +template <class T>
   74.44 +class Metabase VALUE_OBJ_CLASS_SPEC {
   74.45 +  size_t _word_size;
   74.46 +  T*     _next;
   74.47 +  T*     _prev;
   74.48  
   74.49 -  MetaWord* _bottom;
   74.50 -  MetaWord* _end;
   74.51 -  MetaWord* _top;
   74.52 -  size_t _word_size;
   74.53 -  // Used in a guarantee() so included in the Product builds
   74.54 -  // even through it is only for debugging.
   74.55 -  bool _is_free;
   74.56 -
   74.57 -  // Metachunks are allocated out of a MetadataVirtualSpace and
   74.58 -  // and use some of its space to describe itself (plus alignment
   74.59 -  // considerations).  Metadata is allocated in the rest of the chunk.
   74.60 -  // This size is the overhead of maintaining the Metachunk within
   74.61 -  // the space.
   74.62 -  static size_t _overhead;
   74.63 + protected:
   74.64 +  Metabase(size_t word_size) : _word_size(word_size), _next(NULL), _prev(NULL) {}
   74.65  
   74.66   public:
   74.67 -  Metachunk(size_t word_size , VirtualSpaceNode* container);
   74.68 +  T* next() const         { return _next; }
   74.69 +  T* prev() const         { return _prev; }
   74.70 +  void set_next(T* v)     { _next = v; assert(v != this, "Boom");}
   74.71 +  void set_prev(T* v)     { _prev = v; assert(v != this, "Boom");}
   74.72 +  void clear_next()       { set_next(NULL); }
   74.73 +  void clear_prev()       { set_prev(NULL); }
   74.74  
   74.75 -  // Used to add a Metachunk to a list of Metachunks
   74.76 -  void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
   74.77 -  void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
   74.78 -  void set_container(VirtualSpaceNode* v) { _container = v; }
   74.79 -
   74.80 -  MetaWord* allocate(size_t word_size);
   74.81 -
   74.82 -  // Accessors
   74.83 -  Metachunk* next() const { return _next; }
   74.84 -  Metachunk* prev() const { return _prev; }
   74.85 -  VirtualSpaceNode* container() const { return _container; }
   74.86 -  MetaWord* bottom() const { return _bottom; }
   74.87 -  MetaWord* end() const { return _end; }
   74.88 -  MetaWord* top() const { return _top; }
   74.89 -  size_t word_size() const { return _word_size; }
   74.90    size_t size() const volatile { return _word_size; }
   74.91    void set_size(size_t v) { _word_size = v; }
   74.92 -  bool is_free() { return _is_free; }
   74.93 -  void set_is_free(bool v) { _is_free = v; }
   74.94 -  static size_t overhead() { return _overhead; }
   74.95 -  void clear_next()              { set_next(NULL); }
   74.96 -  void link_prev(Metachunk* ptr) { set_prev(ptr); }
   74.97 -  uintptr_t* end()              { return ((uintptr_t*) this) + size(); }
   74.98 -  bool cantCoalesce() const     { return false; }
   74.99 -  void link_next(Metachunk* ptr) { set_next(ptr); }
  74.100 -  void link_after(Metachunk* ptr){
  74.101 +
  74.102 +  void link_next(T* ptr)  { set_next(ptr); }
  74.103 +  void link_prev(T* ptr)  { set_prev(ptr); }
  74.104 +  void link_after(T* ptr) {
  74.105      link_next(ptr);
  74.106 -    if (ptr != NULL) ptr->link_prev(this);
  74.107 +    if (ptr != NULL) ptr->link_prev((T*)this);
  74.108    }
  74.109  
  74.110 -  // Reset top to bottom so chunk can be reused.
  74.111 -  void reset_empty() { _top = (_bottom + _overhead); _next = NULL; _prev = NULL; }
  74.112 -  bool is_empty() { return _top == (_bottom + _overhead); }
  74.113 +  uintptr_t* end() const        { return ((uintptr_t*) this) + size(); }
  74.114  
  74.115 -  // used (has been allocated)
  74.116 -  // free (available for future allocations)
  74.117 -  // capacity (total size of chunk)
  74.118 -  size_t used_word_size() const;
  74.119 -  size_t free_word_size() const;
  74.120 -  size_t capacity_word_size()const;
  74.121 +  bool cantCoalesce() const     { return false; }
  74.122  
  74.123    // Debug support
  74.124  #ifdef ASSERT
  74.125 @@ -114,14 +69,99 @@
  74.126    void* next_addr() const { return (void*)&_next; }
  74.127    void* size_addr() const { return (void*)&_word_size; }
  74.128  #endif
  74.129 -  bool verify_chunk_in_free_list(Metachunk* tc) const { return true; }
  74.130 +  bool verify_chunk_in_free_list(T* tc) const { return true; }
  74.131    bool verify_par_locked() { return true; }
  74.132  
  74.133    void assert_is_mangled() const {/* Don't check "\*/}
  74.134  
  74.135 +  bool is_free()                 { return true; }
  74.136 +};
  74.137 +
  74.138 +//  Metachunk - Quantum of allocation from a Virtualspace
  74.139 +//    Metachunks are reused (when freed are put on a global freelist) and
  74.140 +//    have no permanent association to a SpaceManager.
  74.141 +
  74.142 +//            +--------------+ <- end    --+       --+
  74.143 +//            |              |             |         |
  74.144 +//            |              |             | free    |
  74.145 +//            |              |             |         |
  74.146 +//            |              |             |         | size | capacity
  74.147 +//            |              |             |         |
  74.148 +//            |              | <- top   -- +         |
  74.149 +//            |              |             |         |
  74.150 +//            |              |             | used    |
  74.151 +//            |              |             |         |
  74.152 +//            |              |             |         |
  74.153 +//            +--------------+ <- bottom --+       --+
  74.154 +
  74.155 +class Metachunk : public Metabase<Metachunk> {
  74.156 +  friend class TestMetachunk;
  74.157 +  // The VirtualSpaceNode containing this chunk.
  74.158 +  VirtualSpaceNode* _container;
  74.159 +
  74.160 +  // Current allocation top.
  74.161 +  MetaWord* _top;
  74.162 +
  74.163 +  DEBUG_ONLY(bool _is_tagged_free;)
  74.164 +
  74.165 +  MetaWord* initial_top() const { return (MetaWord*)this + overhead(); }
  74.166 +  MetaWord* top() const         { return _top; }
  74.167 +
  74.168 + public:
  74.169 +  // Metachunks are allocated out of a MetadataVirtualSpace and
  74.170 +  // and use some of its space to describe itself (plus alignment
  74.171 +  // considerations).  Metadata is allocated in the rest of the chunk.
  74.172 +  // This size is the overhead of maintaining the Metachunk within
  74.173 +  // the space.
  74.174 +
  74.175 +  // Alignment of each allocation in the chunks.
  74.176 +  static size_t object_alignment();
  74.177 +
  74.178 +  // Size of the Metachunk header, including alignment.
  74.179 +  static size_t overhead();
  74.180 +
  74.181 +  Metachunk(size_t word_size , VirtualSpaceNode* container);
  74.182 +
  74.183 +  MetaWord* allocate(size_t word_size);
  74.184 +
  74.185 +  VirtualSpaceNode* container() const { return _container; }
  74.186 +
  74.187 +  MetaWord* bottom() const { return (MetaWord*) this; }
  74.188 +
  74.189 +  // Reset top to bottom so chunk can be reused.
  74.190 +  void reset_empty() { _top = initial_top(); clear_next(); clear_prev(); }
  74.191 +  bool is_empty() { return _top == initial_top(); }
  74.192 +
  74.193 +  // used (has been allocated)
  74.194 +  // free (available for future allocations)
  74.195 +  size_t word_size() const { return size(); }
  74.196 +  size_t used_word_size() const;
  74.197 +  size_t free_word_size() const;
  74.198 +
  74.199 +#ifdef ASSERT
  74.200 +  bool is_tagged_free() { return _is_tagged_free; }
  74.201 +  void set_is_tagged_free(bool v) { _is_tagged_free = v; }
  74.202 +#endif
  74.203 +
  74.204    NOT_PRODUCT(void mangle();)
  74.205  
  74.206    void print_on(outputStream* st) const;
  74.207    void verify();
  74.208  };
  74.209 +
  74.210 +// Metablock is the unit of allocation from a Chunk.
  74.211 +//
  74.212 +// A Metablock may be reused by its SpaceManager but are never moved between
  74.213 +// SpaceManagers.  There is no explicit link to the Metachunk
  74.214 +// from which it was allocated.  Metablock may be deallocated and
  74.215 +// put on a freelist but the space is never freed, rather
  74.216 +// the Metachunk it is a part of will be deallocated when it's
  74.217 +// associated class loader is collected.
  74.218 +
  74.219 +class Metablock : public Metabase<Metablock> {
  74.220 +  friend class VMStructs;
  74.221 + public:
  74.222 +  Metablock(size_t word_size) : Metabase<Metablock>(word_size) {}
  74.223 +};
  74.224 +
  74.225  #endif  // SHARE_VM_MEMORY_METACHUNK_HPP
    75.1 --- a/src/share/vm/memory/metaspace.cpp	Fri Oct 18 10:37:26 2013 +0000
    75.2 +++ b/src/share/vm/memory/metaspace.cpp	Fri Oct 18 19:44:40 2013 -0700
    75.3 @@ -30,7 +30,6 @@
    75.4  #include "memory/filemap.hpp"
    75.5  #include "memory/freeList.hpp"
    75.6  #include "memory/gcLocker.hpp"
    75.7 -#include "memory/metablock.hpp"
    75.8  #include "memory/metachunk.hpp"
    75.9  #include "memory/metaspace.hpp"
   75.10  #include "memory/metaspaceShared.hpp"
   75.11 @@ -49,13 +48,10 @@
   75.12  
   75.13  typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
   75.14  typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
   75.15 -// Define this macro to enable slow integrity checking of
   75.16 -// the free chunk lists
   75.17 +
   75.18 +// Set this constant to enable slow integrity checking of the free chunk lists
   75.19  const bool metaspace_slow_verify = false;
   75.20  
   75.21 -// Parameters for stress mode testing
   75.22 -const uint metadata_deallocate_a_lot_block = 10;
   75.23 -const uint metadata_deallocate_a_lock_chunk = 3;
   75.24  size_t const allocation_from_dictionary_limit = 4 * K;
   75.25  
   75.26  MetaWord* last_allocated = 0;
   75.27 @@ -92,24 +88,9 @@
   75.28  uint MetaspaceGC::_shrink_factor = 0;
   75.29  bool MetaspaceGC::_should_concurrent_collect = false;
   75.30  
   75.31 -// Blocks of space for metadata are allocated out of Metachunks.
   75.32 -//
   75.33 -// Metachunk are allocated out of MetadataVirtualspaces and once
   75.34 -// allocated there is no explicit link between a Metachunk and
   75.35 -// the MetadataVirtualspaces from which it was allocated.
   75.36 -//
   75.37 -// Each SpaceManager maintains a
   75.38 -// list of the chunks it is using and the current chunk.  The current
   75.39 -// chunk is the chunk from which allocations are done.  Space freed in
   75.40 -// a chunk is placed on the free list of blocks (BlockFreelist) and
   75.41 -// reused from there.
   75.42 -
   75.43  typedef class FreeList<Metachunk> ChunkList;
   75.44  
   75.45  // Manages the global free lists of chunks.
   75.46 -// Has three lists of free chunks, and a total size and
   75.47 -// count that includes all three
   75.48 -
   75.49  class ChunkManager : public CHeapObj<mtInternal> {
   75.50  
   75.51    // Free list of chunks of different sizes.
   75.52 @@ -119,7 +100,6 @@
   75.53    //   HumongousChunk
   75.54    ChunkList _free_chunks[NumberOfFreeLists];
   75.55  
   75.56 -
   75.57    //   HumongousChunk
   75.58    ChunkTreeDictionary _humongous_dictionary;
   75.59  
   75.60 @@ -166,7 +146,6 @@
   75.61  
   75.62    // add or delete (return) a chunk to the global freelist.
   75.63    Metachunk* chunk_freelist_allocate(size_t word_size);
   75.64 -  void chunk_freelist_deallocate(Metachunk* chunk);
   75.65  
   75.66    // Map a size to a list index assuming that there are lists
   75.67    // for special, small, medium, and humongous chunks.
   75.68 @@ -200,9 +179,7 @@
   75.69    // Returns the list for the given chunk word size.
   75.70    ChunkList* find_free_chunks_list(size_t word_size);
   75.71  
   75.72 -  // Add and remove from a list by size.  Selects
   75.73 -  // list based on size of chunk.
   75.74 -  void free_chunks_put(Metachunk* chuck);
   75.75 +  // Remove from a list by size.  Selects list based on size of chunk.
   75.76    Metachunk* free_chunks_get(size_t chunk_word_size);
   75.77  
   75.78    // Debug support
   75.79 @@ -230,7 +207,6 @@
   75.80  // to the allocation of a quantum of metadata).
   75.81  class BlockFreelist VALUE_OBJ_CLASS_SPEC {
   75.82    BlockTreeDictionary* _dictionary;
   75.83 -  static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
   75.84  
   75.85    // Only allocate and split from freelist if the size of the allocation
   75.86    // is at least 1/4th the size of the available block.
   75.87 @@ -258,6 +234,7 @@
   75.88    void print_on(outputStream* st) const;
   75.89  };
   75.90  
   75.91 +// A VirtualSpaceList node.
   75.92  class VirtualSpaceNode : public CHeapObj<mtClass> {
   75.93    friend class VirtualSpaceList;
   75.94  
   75.95 @@ -414,13 +391,13 @@
   75.96    Metachunk* chunk = first_chunk();
   75.97    Metachunk* invalid_chunk = (Metachunk*) top();
   75.98    while (chunk < invalid_chunk ) {
   75.99 -    assert(chunk->is_free(), "Should be marked free");
  75.100 -      MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
  75.101 -      chunk_manager->remove_chunk(chunk);
  75.102 -      assert(chunk->next() == NULL &&
  75.103 -             chunk->prev() == NULL,
  75.104 -             "Was not removed from its list");
  75.105 -      chunk = (Metachunk*) next;
  75.106 +    assert(chunk->is_tagged_free(), "Should be tagged free");
  75.107 +    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
  75.108 +    chunk_manager->remove_chunk(chunk);
  75.109 +    assert(chunk->next() == NULL &&
  75.110 +           chunk->prev() == NULL,
  75.111 +           "Was not removed from its list");
  75.112 +    chunk = (Metachunk*) next;
  75.113    }
  75.114  }
  75.115  
  75.116 @@ -434,7 +411,7 @@
  75.117      // Don't count the chunks on the free lists.  Those are
  75.118      // still part of the VirtualSpaceNode but not currently
  75.119      // counted.
  75.120 -    if (!chunk->is_free()) {
  75.121 +    if (!chunk->is_tagged_free()) {
  75.122        count++;
  75.123      }
  75.124      chunk = (Metachunk*) next;
  75.125 @@ -550,44 +527,16 @@
  75.126  
  75.127  class Metadebug : AllStatic {
  75.128    // Debugging support for Metaspaces
  75.129 -  static int _deallocate_block_a_lot_count;
  75.130 -  static int _deallocate_chunk_a_lot_count;
  75.131    static int _allocation_fail_alot_count;
  75.132  
  75.133   public:
  75.134 -  static int deallocate_block_a_lot_count() {
  75.135 -    return _deallocate_block_a_lot_count;
  75.136 -  }
  75.137 -  static void set_deallocate_block_a_lot_count(int v) {
  75.138 -    _deallocate_block_a_lot_count = v;
  75.139 -  }
  75.140 -  static void inc_deallocate_block_a_lot_count() {
  75.141 -    _deallocate_block_a_lot_count++;
  75.142 -  }
  75.143 -  static int deallocate_chunk_a_lot_count() {
  75.144 -    return _deallocate_chunk_a_lot_count;
  75.145 -  }
  75.146 -  static void reset_deallocate_chunk_a_lot_count() {
  75.147 -    _deallocate_chunk_a_lot_count = 1;
  75.148 -  }
  75.149 -  static void inc_deallocate_chunk_a_lot_count() {
  75.150 -    _deallocate_chunk_a_lot_count++;
  75.151 -  }
  75.152  
  75.153    static void init_allocation_fail_alot_count();
  75.154  #ifdef ASSERT
  75.155    static bool test_metadata_failure();
  75.156  #endif
  75.157 -
  75.158 -  static void deallocate_chunk_a_lot(SpaceManager* sm,
  75.159 -                                     size_t chunk_word_size);
  75.160 -  static void deallocate_block_a_lot(SpaceManager* sm,
  75.161 -                                     size_t chunk_word_size);
  75.162 -
  75.163  };
  75.164  
  75.165 -int Metadebug::_deallocate_block_a_lot_count = 0;
  75.166 -int Metadebug::_deallocate_chunk_a_lot_count = 0;
  75.167  int Metadebug::_allocation_fail_alot_count = 0;
  75.168  
  75.169  //  SpaceManager - used by Metaspace to handle allocations
  75.170 @@ -753,14 +702,11 @@
  75.171  #endif
  75.172  
  75.173    size_t get_raw_word_size(size_t word_size) {
  75.174 -    // If only the dictionary is going to be used (i.e., no
  75.175 -    // indexed free list), then there is a minimum size requirement.
  75.176 -    // MinChunkSize is a placeholder for the real minimum size JJJ
  75.177      size_t byte_size = word_size * BytesPerWord;
  75.178  
  75.179 -    size_t raw_bytes_size = MAX2(byte_size,
  75.180 -                                 Metablock::min_block_byte_size());
  75.181 -    raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
  75.182 +    size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
  75.183 +    raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
  75.184 +
  75.185      size_t raw_word_size = raw_bytes_size / BytesPerWord;
  75.186      assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
  75.187  
  75.188 @@ -813,17 +759,8 @@
  75.189    }
  75.190  }
  75.191  
  75.192 -Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
  75.193 -  Metablock* block = (Metablock*) p;
  75.194 -  block->set_word_size(word_size);
  75.195 -  block->set_prev(NULL);
  75.196 -  block->set_next(NULL);
  75.197 -
  75.198 -  return block;
  75.199 -}
  75.200 -
  75.201  void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
  75.202 -  Metablock* free_chunk = initialize_free_chunk(p, word_size);
  75.203 +  Metablock* free_chunk = ::new (p) Metablock(word_size);
  75.204    if (dictionary() == NULL) {
  75.205     _dictionary = new BlockTreeDictionary();
  75.206    }
  75.207 @@ -1069,7 +1006,7 @@
  75.208    }
  75.209  
  75.210    // Chunk is being removed from the chunks free list.
  75.211 -  dec_free_chunks_total(chunk->capacity_word_size());
  75.212 +  dec_free_chunks_total(chunk->word_size());
  75.213  }
  75.214  
  75.215  // Walk the list of VirtualSpaceNodes and delete
  75.216 @@ -1563,54 +1500,6 @@
  75.217  
  75.218  // Metadebug methods
  75.219  
  75.220 -void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
  75.221 -                                       size_t chunk_word_size){
  75.222 -#ifdef ASSERT
  75.223 -  VirtualSpaceList* vsl = sm->vs_list();
  75.224 -  if (MetaDataDeallocateALot &&
  75.225 -      Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
  75.226 -    Metadebug::reset_deallocate_chunk_a_lot_count();
  75.227 -    for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
  75.228 -      Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
  75.229 -      if (dummy_chunk == NULL) {
  75.230 -        break;
  75.231 -      }
  75.232 -      sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  75.233 -
  75.234 -      if (TraceMetadataChunkAllocation && Verbose) {
  75.235 -        gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
  75.236 -                               sm->sum_count_in_chunks_in_use());
  75.237 -        dummy_chunk->print_on(gclog_or_tty);
  75.238 -        gclog_or_tty->print_cr("  Free chunks total %d  count %d",
  75.239 -                               sm->chunk_manager()->free_chunks_total_words(),
  75.240 -                               sm->chunk_manager()->free_chunks_count());
  75.241 -      }
  75.242 -    }
  75.243 -  } else {
  75.244 -    Metadebug::inc_deallocate_chunk_a_lot_count();
  75.245 -  }
  75.246 -#endif
  75.247 -}
  75.248 -
  75.249 -void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
  75.250 -                                       size_t raw_word_size){
  75.251 -#ifdef ASSERT
  75.252 -  if (MetaDataDeallocateALot &&
  75.253 -        Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
  75.254 -    Metadebug::set_deallocate_block_a_lot_count(0);
  75.255 -    for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
  75.256 -      MetaWord* dummy_block = sm->allocate_work(raw_word_size);
  75.257 -      if (dummy_block == 0) {
  75.258 -        break;
  75.259 -      }
  75.260 -      sm->deallocate(dummy_block, raw_word_size);
  75.261 -    }
  75.262 -  } else {
  75.263 -    Metadebug::inc_deallocate_block_a_lot_count();
  75.264 -  }
  75.265 -#endif
  75.266 -}
  75.267 -
  75.268  void Metadebug::init_allocation_fail_alot_count() {
  75.269    if (MetadataAllocationFailALot) {
  75.270      _allocation_fail_alot_count =
  75.271 @@ -1754,31 +1643,6 @@
  75.272    return free_chunks(index);
  75.273  }
  75.274  
  75.275 -void ChunkManager::free_chunks_put(Metachunk* chunk) {
  75.276 -  assert_lock_strong(SpaceManager::expand_lock());
  75.277 -  ChunkList* free_list = find_free_chunks_list(chunk->word_size());
  75.278 -  chunk->set_next(free_list->head());
  75.279 -  free_list->set_head(chunk);
  75.280 -  // chunk is being returned to the chunk free list
  75.281 -  inc_free_chunks_total(chunk->capacity_word_size());
  75.282 -  slow_locked_verify();
  75.283 -}
  75.284 -
  75.285 -void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
  75.286 -  // The deallocation of a chunk originates in the freelist
  75.287 -  // manangement code for a Metaspace and does not hold the
  75.288 -  // lock.
  75.289 -  assert(chunk != NULL, "Deallocating NULL");
  75.290 -  assert_lock_strong(SpaceManager::expand_lock());
  75.291 -  slow_locked_verify();
  75.292 -  if (TraceMetadataChunkAllocation) {
  75.293 -    gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
  75.294 -                           PTR_FORMAT "  size " SIZE_FORMAT,
  75.295 -                           chunk, chunk->word_size());
  75.296 -  }
  75.297 -  free_chunks_put(chunk);
  75.298 -}
  75.299 -
  75.300  Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
  75.301    assert_lock_strong(SpaceManager::expand_lock());
  75.302  
  75.303 @@ -1822,7 +1686,7 @@
  75.304    }
  75.305  
  75.306    // Chunk is being removed from the chunks free list.
  75.307 -  dec_free_chunks_total(chunk->capacity_word_size());
  75.308 +  dec_free_chunks_total(chunk->word_size());
  75.309  
  75.310    // Remove it from the links to this freelist
  75.311    chunk->set_next(NULL);
  75.312 @@ -1830,7 +1694,7 @@
  75.313  #ifdef ASSERT
  75.314    // Chunk is no longer on any freelist. Setting to false make container_count_slow()
  75.315    // work.
  75.316 -  chunk->set_is_free(false);
  75.317 +  chunk->set_is_tagged_free(false);
  75.318  #endif
  75.319    chunk->container()->inc_container_count();
  75.320  
  75.321 @@ -1962,7 +1826,7 @@
  75.322      for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  75.323        Metachunk* chunk = chunks_in_use(i);
  75.324        while (chunk != NULL) {
  75.325 -        sum += chunk->capacity_word_size();
  75.326 +        sum += chunk->word_size();
  75.327          chunk = chunk->next();
  75.328        }
  75.329      }
  75.330 @@ -2098,10 +1962,6 @@
  75.331    size_t grow_chunks_by_words = calc_chunk_size(word_size);
  75.332    Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
  75.333  
  75.334 -  if (next != NULL) {
  75.335 -    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
  75.336 -  }
  75.337 -
  75.338    MetaWord* mem = NULL;
  75.339  
  75.340    // If a chunk was available, add it to the in-use chunk list
  75.341 @@ -2210,7 +2070,7 @@
  75.342      // Capture the next link before it is changed
  75.343      // by the call to return_chunk_at_head();
  75.344      Metachunk* next = cur->next();
  75.345 -    cur->set_is_free(true);
  75.346 +    DEBUG_ONLY(cur->set_is_tagged_free(true);)
  75.347      list->return_chunk_at_head(cur);
  75.348      cur = next;
  75.349    }
  75.350 @@ -2282,7 +2142,7 @@
  75.351  
  75.352    while (humongous_chunks != NULL) {
  75.353  #ifdef ASSERT
  75.354 -    humongous_chunks->set_is_free(true);
  75.355 +    humongous_chunks->set_is_tagged_free(true);
  75.356  #endif
  75.357      if (TraceMetadataChunkAllocation && Verbose) {
  75.358        gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
  75.359 @@ -2446,7 +2306,6 @@
  75.360    if (p == NULL) {
  75.361      p = allocate_work(raw_word_size);
  75.362    }
  75.363 -  Metadebug::deallocate_block_a_lot(this, raw_word_size);
  75.364  
  75.365    return p;
  75.366  }
  75.367 @@ -2545,7 +2404,7 @@
  75.368        curr->print_on(out);
  75.369        curr_total += curr->word_size();
  75.370        used += curr->used_word_size();
  75.371 -      capacity += curr->capacity_word_size();
  75.372 +      capacity += curr->word_size();
  75.373        waste += curr->free_word_size() + curr->overhead();;
  75.374      }
  75.375    }
  75.376 @@ -3396,7 +3255,7 @@
  75.377  }
  75.378  
  75.379  
  75.380 -Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
  75.381 +MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
  75.382                                bool read_only, MetaspaceObj::Type type, TRAPS) {
  75.383    if (HAS_PENDING_EXCEPTION) {
  75.384      assert(false, "Should not allocate with exception pending");
  75.385 @@ -3415,10 +3274,14 @@
  75.386      MetaWord* result = space->allocate(word_size, NonClassType);
  75.387      if (result == NULL) {
  75.388        report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
  75.389 -    } else {
  75.390 -      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
  75.391      }
  75.392 -    return Metablock::initialize(result, word_size);
  75.393 +
  75.394 +    space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
  75.395 +
  75.396 +    // Zero initialize.
  75.397 +    Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
  75.398 +
  75.399 +    return result;
  75.400    }
  75.401  
  75.402    MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
  75.403 @@ -3443,7 +3306,10 @@
  75.404      return NULL;
  75.405    }
  75.406  
  75.407 -  return Metablock::initialize(result, word_size);
  75.408 +  // Zero initialize.
  75.409 +  Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
  75.410 +
  75.411 +  return result;
  75.412  }
  75.413  
  75.414  void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
    76.1 --- a/src/share/vm/memory/metaspace.hpp	Fri Oct 18 10:37:26 2013 +0000
    76.2 +++ b/src/share/vm/memory/metaspace.hpp	Fri Oct 18 19:44:40 2013 -0700
    76.3 @@ -139,7 +139,6 @@
    76.4    // Allocate space for metadata of type mdtype. This is space
    76.5    // within a Metachunk and is used by
    76.6    //   allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
    76.7 -  // which returns a Metablock.
    76.8    MetaWord* allocate(size_t word_size, MetadataType mdtype);
    76.9  
   76.10    // Virtual Space lists for both classes and other metadata
   76.11 @@ -217,8 +216,8 @@
   76.12    size_t used_bytes_slow(MetadataType mdtype) const;
   76.13    size_t capacity_bytes_slow(MetadataType mdtype) const;
   76.14  
   76.15 -  static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size,
   76.16 -                             bool read_only, MetaspaceObj::Type type, TRAPS);
   76.17 +  static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
   76.18 +                            bool read_only, MetaspaceObj::Type type, TRAPS);
   76.19    void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
   76.20  
   76.21    MetaWord* expand_and_allocate(size_t size,
    77.1 --- a/src/share/vm/oops/method.hpp	Fri Oct 18 10:37:26 2013 +0000
    77.2 +++ b/src/share/vm/oops/method.hpp	Fri Oct 18 19:44:40 2013 -0700
    77.3 @@ -805,6 +805,7 @@
    77.4   private:
    77.5    void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
    77.6  
    77.7 + public:
    77.8    MethodCounters* get_method_counters(TRAPS) {
    77.9      if (_method_counters == NULL) {
   77.10        build_method_counters(this, CHECK_AND_CLEAR_NULL);
   77.11 @@ -812,7 +813,6 @@
   77.12      return _method_counters;
   77.13    }
   77.14  
   77.15 - public:
   77.16    bool   is_not_c1_compilable() const         { return access_flags().is_not_c1_compilable();  }
   77.17    void  set_not_c1_compilable()               {       _access_flags.set_not_c1_compilable();   }
   77.18    void clear_not_c1_compilable()              {       _access_flags.clear_not_c1_compilable(); }
    78.1 --- a/src/share/vm/oops/methodData.cpp	Fri Oct 18 10:37:26 2013 +0000
    78.2 +++ b/src/share/vm/oops/methodData.cpp	Fri Oct 18 19:44:40 2013 -0700
    78.3 @@ -56,6 +56,11 @@
    78.4    if (needs_array_len(tag)) {
    78.5      set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
    78.6    }
    78.7 +  if (tag == call_type_data_tag) {
    78.8 +    CallTypeData::initialize(this, cell_count);
    78.9 +  } else if (tag == virtual_call_type_data_tag) {
   78.10 +    VirtualCallTypeData::initialize(this, cell_count);
   78.11 +  }
   78.12  }
   78.13  
   78.14  void DataLayout::clean_weak_klass_links(BoolObjectClosure* cl) {
   78.15 @@ -76,7 +81,7 @@
   78.16  }
   78.17  
   78.18  #ifndef PRODUCT
   78.19 -void ProfileData::print_shared(outputStream* st, const char* name) {
   78.20 +void ProfileData::print_shared(outputStream* st, const char* name) const {
   78.21    st->print("bci: %d", bci());
   78.22    st->fill_to(tab_width_one);
   78.23    st->print("%s", name);
   78.24 @@ -91,8 +96,8 @@
   78.25      st->print("flags(%d) ", flags);
   78.26  }
   78.27  
   78.28 -void ProfileData::tab(outputStream* st) {
   78.29 -  st->fill_to(tab_width_two);
   78.30 +void ProfileData::tab(outputStream* st, bool first) const {
   78.31 +  st->fill_to(first ? tab_width_one : tab_width_two);
   78.32  }
   78.33  #endif // !PRODUCT
   78.34  
   78.35 @@ -104,7 +109,7 @@
   78.36  
   78.37  
   78.38  #ifndef PRODUCT
   78.39 -void BitData::print_data_on(outputStream* st) {
   78.40 +void BitData::print_data_on(outputStream* st) const {
   78.41    print_shared(st, "BitData");
   78.42  }
   78.43  #endif // !PRODUCT
   78.44 @@ -115,7 +120,7 @@
   78.45  // A CounterData corresponds to a simple counter.
   78.46  
   78.47  #ifndef PRODUCT
   78.48 -void CounterData::print_data_on(outputStream* st) {
   78.49 +void CounterData::print_data_on(outputStream* st) const {
   78.50    print_shared(st, "CounterData");
   78.51    st->print_cr("count(%u)", count());
   78.52  }
   78.53 @@ -145,12 +150,207 @@
   78.54  }
   78.55  
   78.56  #ifndef PRODUCT
   78.57 -void JumpData::print_data_on(outputStream* st) {
   78.58 +void JumpData::print_data_on(outputStream* st) const {
   78.59    print_shared(st, "JumpData");
   78.60    st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
   78.61  }
   78.62  #endif // !PRODUCT
   78.63  
   78.64 +int TypeStackSlotEntries::compute_cell_count(Symbol* signature, int max) {
   78.65 +  ResourceMark rm;
   78.66 +  SignatureStream ss(signature);
   78.67 +  int args_count = MIN2(ss.reference_parameter_count(), max);
   78.68 +  return args_count * per_arg_cell_count;
   78.69 +}
   78.70 +
   78.71 +int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
   78.72 +  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
   78.73 +  assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
   78.74 +  Bytecode_invoke inv(stream->method(), stream->bci());
   78.75 +  int args_cell = 0;
   78.76 +  if (arguments_profiling_enabled()) {
   78.77 +    args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), TypeProfileArgsLimit);
   78.78 +  }
   78.79 +  int ret_cell = 0;
   78.80 +  if (return_profiling_enabled() && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) {
   78.81 +    ret_cell = ReturnTypeEntry::static_cell_count();
   78.82 +  }
   78.83 +  int header_cell = 0;
   78.84 +  if (args_cell + ret_cell > 0) {
   78.85 +    header_cell = header_cell_count();
   78.86 +  }
   78.87 +
   78.88 +  return header_cell + args_cell + ret_cell;
   78.89 +}
   78.90 +
   78.91 +class ArgumentOffsetComputer : public SignatureInfo {
   78.92 +private:
   78.93 +  int _max;
   78.94 +  GrowableArray<int> _offsets;
   78.95 +
   78.96 +  void set(int size, BasicType type) { _size += size; }
   78.97 +  void do_object(int begin, int end) {
   78.98 +    if (_offsets.length() < _max) {
   78.99 +      _offsets.push(_size);
  78.100 +    }
  78.101 +    SignatureInfo::do_object(begin, end);
  78.102 +  }
  78.103 +  void do_array (int begin, int end) {
  78.104 +    if (_offsets.length() < _max) {
  78.105 +      _offsets.push(_size);
  78.106 +    }
  78.107 +    SignatureInfo::do_array(begin, end);
  78.108 +  }
  78.109 +
  78.110 +public:
  78.111 +  ArgumentOffsetComputer(Symbol* signature, int max)
  78.112 +    : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) {
  78.113 +  }
  78.114 +
  78.115 +  int total() { lazy_iterate_parameters(); return _size; }
  78.116 +
  78.117 +  int off_at(int i) const { return _offsets.at(i); }
  78.118 +};
  78.119 +
  78.120 +void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver) {
  78.121 +  ResourceMark rm;
  78.122 +  ArgumentOffsetComputer aos(signature, _number_of_entries);
  78.123 +  aos.total();
  78.124 +  for (int i = 0; i < _number_of_entries; i++) {
  78.125 +    set_stack_slot(i, aos.off_at(i) + (has_receiver ? 1 : 0));
  78.126 +    set_type(i, type_none());
  78.127 +  }
  78.128 +}
  78.129 +
  78.130 +void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
  78.131 +  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
  78.132 +  Bytecode_invoke inv(stream->method(), stream->bci());
  78.133 +
  78.134 +  SignatureStream ss(inv.signature());
  78.135 +  if (has_arguments()) {
  78.136 +#ifdef ASSERT
  78.137 +    ResourceMark rm;
  78.138 +    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
  78.139 +    assert(count > 0, "room for args type but none found?");
  78.140 +    check_number_of_arguments(count);
  78.141 +#endif
  78.142 +    _args.post_initialize(inv.signature(), inv.has_receiver());
  78.143 +  }
  78.144 +
  78.145 +  if (has_return()) {
  78.146 +    assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
  78.147 +    _ret.post_initialize();
  78.148 +  }
  78.149 +}
  78.150 +
  78.151 +void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
  78.152 +  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
  78.153 +  Bytecode_invoke inv(stream->method(), stream->bci());
  78.154 +
  78.155 +  if (has_arguments()) {
  78.156 +#ifdef ASSERT
  78.157 +    ResourceMark rm;
  78.158 +    SignatureStream ss(inv.signature());
  78.159 +    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
  78.160 +    assert(count > 0, "room for args type but none found?");
  78.161 +    check_number_of_arguments(count);
  78.162 +#endif
  78.163 +    _args.post_initialize(inv.signature(), inv.has_receiver());
  78.164 +  }
  78.165 +
  78.166 +  if (has_return()) {
  78.167 +    assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
  78.168 +    _ret.post_initialize();
  78.169 +  }
  78.170 +}
  78.171 +
  78.172 +bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) {
  78.173 +  return !is_type_none(p) &&
  78.174 +    !((Klass*)klass_part(p))->is_loader_alive(is_alive_cl);
  78.175 +}
  78.176 +
  78.177 +void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
  78.178 +  for (int i = 0; i < _number_of_entries; i++) {
  78.179 +    intptr_t p = type(i);
  78.180 +    if (is_loader_alive(is_alive_cl, p)) {
  78.181 +      set_type(i, type_none());
  78.182 +    }
  78.183 +  }
  78.184 +}
  78.185 +
  78.186 +void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
  78.187 +  intptr_t p = type();
  78.188 +  if (is_loader_alive(is_alive_cl, p)) {
  78.189 +    set_type(type_none());
  78.190 +  }
  78.191 +}
  78.192 +
  78.193 +bool TypeEntriesAtCall::return_profiling_enabled() {
  78.194 +  return MethodData::profile_return();
  78.195 +}
  78.196 +
  78.197 +bool TypeEntriesAtCall::arguments_profiling_enabled() {
  78.198 +  return MethodData::profile_arguments();
  78.199 +}
  78.200 +
  78.201 +#ifndef PRODUCT
  78.202 +void TypeEntries::print_klass(outputStream* st, intptr_t k) {
  78.203 +  if (is_type_none(k)) {
  78.204 +    st->print("none");
  78.205 +  } else if (is_type_unknown(k)) {
  78.206 +    st->print("unknown");
  78.207 +  } else {
  78.208 +    valid_klass(k)->print_value_on(st);
  78.209 +  }
  78.210 +  if (was_null_seen(k)) {
  78.211 +    st->print(" (null seen)");
  78.212 +  }
  78.213 +}
  78.214 +
  78.215 +void TypeStackSlotEntries::print_data_on(outputStream* st) const {
  78.216 +  for (int i = 0; i < _number_of_entries; i++) {
  78.217 +    _pd->tab(st);
  78.218 +    st->print("%d: stack(%u) ", i, stack_slot(i));
  78.219 +    print_klass(st, type(i));
  78.220 +    st->cr();
  78.221 +  }
  78.222 +}
  78.223 +
  78.224 +void ReturnTypeEntry::print_data_on(outputStream* st) const {
  78.225 +  _pd->tab(st);
  78.226 +  print_klass(st, type());
  78.227 +  st->cr();
  78.228 +}
  78.229 +
  78.230 +void CallTypeData::print_data_on(outputStream* st) const {
  78.231 +  CounterData::print_data_on(st);
  78.232 +  if (has_arguments()) {
  78.233 +    tab(st, true);
  78.234 +    st->print("argument types");
  78.235 +    _args.print_data_on(st);
  78.236 +  }
  78.237 +  if (has_return()) {
  78.238 +    tab(st, true);
  78.239 +    st->print("return type");
  78.240 +    _ret.print_data_on(st);
  78.241 +  }
  78.242 +}
  78.243 +
  78.244 +void VirtualCallTypeData::print_data_on(outputStream* st) const {
  78.245 +  VirtualCallData::print_data_on(st);
  78.246 +  if (has_arguments()) {
  78.247 +    tab(st, true);
  78.248 +    st->print("argument types");
  78.249 +    _args.print_data_on(st);
  78.250 +  }
  78.251 +  if (has_return()) {
  78.252 +    tab(st, true);
  78.253 +    st->print("return type");
  78.254 +    _ret.print_data_on(st);
  78.255 +  }
  78.256 +}
  78.257 +#endif
  78.258 +
  78.259  // ==================================================================
  78.260  // ReceiverTypeData
  78.261  //
  78.262 @@ -169,7 +369,7 @@
  78.263  }
  78.264  
  78.265  #ifndef PRODUCT
  78.266 -void ReceiverTypeData::print_receiver_data_on(outputStream* st) {
  78.267 +void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
  78.268    uint row;
  78.269    int entries = 0;
  78.270    for (row = 0; row < row_limit(); row++) {
  78.271 @@ -190,11 +390,11 @@
  78.272      }
  78.273    }
  78.274  }
  78.275 -void ReceiverTypeData::print_data_on(outputStream* st) {
  78.276 +void ReceiverTypeData::print_data_on(outputStream* st) const {
  78.277    print_shared(st, "ReceiverTypeData");
  78.278    print_receiver_data_on(st);
  78.279  }
  78.280 -void VirtualCallData::print_data_on(outputStream* st) {
  78.281 +void VirtualCallData::print_data_on(outputStream* st) const {
  78.282    print_shared(st, "VirtualCallData");
  78.283    print_receiver_data_on(st);
  78.284  }
  78.285 @@ -246,7 +446,7 @@
  78.286  
  78.287  
  78.288  #ifndef PRODUCT
  78.289 -void RetData::print_data_on(outputStream* st) {
  78.290 +void RetData::print_data_on(outputStream* st) const {
  78.291    print_shared(st, "RetData");
  78.292    uint row;
  78.293    int entries = 0;
  78.294 @@ -281,7 +481,7 @@
  78.295  }
  78.296  
  78.297  #ifndef PRODUCT
  78.298 -void BranchData::print_data_on(outputStream* st) {
  78.299 +void BranchData::print_data_on(outputStream* st) const {
  78.300    print_shared(st, "BranchData");
  78.301    st->print_cr("taken(%u) displacement(%d)",
  78.302                 taken(), displacement());
  78.303 @@ -355,7 +555,7 @@
  78.304  }
  78.305  
  78.306  #ifndef PRODUCT
  78.307 -void MultiBranchData::print_data_on(outputStream* st) {
  78.308 +void MultiBranchData::print_data_on(outputStream* st) const {
  78.309    print_shared(st, "MultiBranchData");
  78.310    st->print_cr("default_count(%u) displacement(%d)",
  78.311                 default_count(), default_displacement());
  78.312 @@ -369,7 +569,7 @@
  78.313  #endif
  78.314  
  78.315  #ifndef PRODUCT
  78.316 -void ArgInfoData::print_data_on(outputStream* st) {
  78.317 +void ArgInfoData::print_data_on(outputStream* st) const {
  78.318    print_shared(st, "ArgInfoData");
  78.319    int nargs = number_of_args();
  78.320    for (int i = 0; i < nargs; i++) {
  78.321 @@ -407,7 +607,11 @@
  78.322      }
  78.323    case Bytecodes::_invokespecial:
  78.324    case Bytecodes::_invokestatic:
  78.325 -    return CounterData::static_cell_count();
  78.326 +    if (MethodData::profile_arguments() || MethodData::profile_return()) {
  78.327 +      return variable_cell_count;
  78.328 +    } else {
  78.329 +      return CounterData::static_cell_count();
  78.330 +    }
  78.331    case Bytecodes::_goto:
  78.332    case Bytecodes::_goto_w:
  78.333    case Bytecodes::_jsr:
  78.334 @@ -415,9 +619,17 @@
  78.335      return JumpData::static_cell_count();
  78.336    case Bytecodes::_invokevirtual:
  78.337    case Bytecodes::_invokeinterface:
  78.338 -    return VirtualCallData::static_cell_count();
  78.339 +    if (MethodData::profile_arguments() || MethodData::profile_return()) {
  78.340 +      return variable_cell_count;
  78.341 +    } else {
  78.342 +      return VirtualCallData::static_cell_count();
  78.343 +    }
  78.344    case Bytecodes::_invokedynamic:
  78.345 -    return CounterData::static_cell_count();
  78.346 +    if (MethodData::profile_arguments() || MethodData::profile_return()) {
  78.347 +      return variable_cell_count;
  78.348 +    } else {
  78.349 +      return CounterData::static_cell_count();
  78.350 +    }
  78.351    case Bytecodes::_ret:
  78.352      return RetData::static_cell_count();
  78.353    case Bytecodes::_ifeq:
  78.354 @@ -453,7 +665,36 @@
  78.355      return 0;
  78.356    }
  78.357    if (cell_count == variable_cell_count) {
  78.358 -    cell_count = MultiBranchData::compute_cell_count(stream);
  78.359 +    switch (stream->code()) {
  78.360 +    case Bytecodes::_lookupswitch:
  78.361 +    case Bytecodes::_tableswitch:
  78.362 +      cell_count = MultiBranchData::compute_cell_count(stream);
  78.363 +      break;
  78.364 +    case Bytecodes::_invokespecial:
  78.365 +    case Bytecodes::_invokestatic:
  78.366 +    case Bytecodes::_invokedynamic:
  78.367 +      assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
  78.368 +      if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  78.369 +          profile_return_for_invoke(stream->method(), stream->bci())) {
  78.370 +        cell_count = CallTypeData::compute_cell_count(stream);
  78.371 +      } else {
  78.372 +        cell_count = CounterData::static_cell_count();
  78.373 +      }
  78.374 +      break;
  78.375 +    case Bytecodes::_invokevirtual:
  78.376 +    case Bytecodes::_invokeinterface: {
  78.377 +      assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
  78.378 +      if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  78.379 +          profile_return_for_invoke(stream->method(), stream->bci())) {
  78.380 +        cell_count = VirtualCallTypeData::compute_cell_count(stream);
  78.381 +      } else {
  78.382 +        cell_count = VirtualCallData::static_cell_count();
  78.383 +      }
  78.384 +      break;
  78.385 +    }
  78.386 +    default:
  78.387 +      fatal("unexpected bytecode for var length profile data");
  78.388 +    }
  78.389    }
  78.390    // Note:  cell_count might be zero, meaning that there is just
  78.391    //        a DataLayout header, with no extra cells.
  78.392 @@ -499,6 +740,7 @@
  78.393    // Add a cell to record information about modified arguments.
  78.394    int arg_size = method->size_of_parameters();
  78.395    object_size += DataLayout::compute_size_in_bytes(arg_size+1);
  78.396 +
  78.397    return object_size;
  78.398  }
  78.399  
  78.400 @@ -534,10 +776,21 @@
  78.401      }
  78.402      break;
  78.403    case Bytecodes::_invokespecial:
  78.404 -  case Bytecodes::_invokestatic:
  78.405 -    cell_count = CounterData::static_cell_count();
  78.406 -    tag = DataLayout::counter_data_tag;
  78.407 +  case Bytecodes::_invokestatic: {
  78.408 +    int counter_data_cell_count = CounterData::static_cell_count();
  78.409 +    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  78.410 +        profile_return_for_invoke(stream->method(), stream->bci())) {
  78.411 +      cell_count = CallTypeData::compute_cell_count(stream);
  78.412 +    } else {
  78.413 +      cell_count = counter_data_cell_count;
  78.414 +    }
  78.415 +    if (cell_count > counter_data_cell_count) {
  78.416 +      tag = DataLayout::call_type_data_tag;
  78.417 +    } else {
  78.418 +      tag = DataLayout::counter_data_tag;
  78.419 +    }
  78.420      break;
  78.421 +  }
  78.422    case Bytecodes::_goto:
  78.423    case Bytecodes::_goto_w:
  78.424    case Bytecodes::_jsr:
  78.425 @@ -546,15 +799,37 @@
  78.426      tag = DataLayout::jump_data_tag;
  78.427      break;
  78.428    case Bytecodes::_invokevirtual:
  78.429 -  case Bytecodes::_invokeinterface:
  78.430 -    cell_count = VirtualCallData::static_cell_count();
  78.431 -    tag = DataLayout::virtual_call_data_tag;
  78.432 +  case Bytecodes::_invokeinterface: {
  78.433 +    int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
  78.434 +    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  78.435 +        profile_return_for_invoke(stream->method(), stream->bci())) {
  78.436 +      cell_count = VirtualCallTypeData::compute_cell_count(stream);
  78.437 +    } else {
  78.438 +      cell_count = virtual_call_data_cell_count;
  78.439 +    }
  78.440 +    if (cell_count > virtual_call_data_cell_count) {
  78.441 +      tag = DataLayout::virtual_call_type_data_tag;
  78.442 +    } else {
  78.443 +      tag = DataLayout::virtual_call_data_tag;
  78.444 +    }
  78.445      break;
  78.446 -  case Bytecodes::_invokedynamic:
  78.447 +  }
  78.448 +  case Bytecodes::_invokedynamic: {
  78.449      // %%% should make a type profile for any invokedynamic that takes a ref argument
  78.450 -    cell_count = CounterData::static_cell_count();
  78.451 -    tag = DataLayout::counter_data_tag;
  78.452 +    int counter_data_cell_count = CounterData::static_cell_count();
  78.453 +    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  78.454 +        profile_return_for_invoke(stream->method(), stream->bci())) {
  78.455 +      cell_count = CallTypeData::compute_cell_count(stream);
  78.456 +    } else {
  78.457 +      cell_count = counter_data_cell_count;
  78.458 +    }
  78.459 +    if (cell_count > counter_data_cell_count) {
  78.460 +      tag = DataLayout::call_type_data_tag;
  78.461 +    } else {
  78.462 +      tag = DataLayout::counter_data_tag;
  78.463 +    }
  78.464      break;
  78.465 +  }
  78.466    case Bytecodes::_ret:
  78.467      cell_count = RetData::static_cell_count();
  78.468      tag = DataLayout::ret_data_tag;
  78.469 @@ -585,6 +860,11 @@
  78.470      break;
  78.471    }
  78.472    assert(tag == DataLayout::multi_branch_data_tag ||
  78.473 +         ((MethodData::profile_arguments() || MethodData::profile_return()) &&
  78.474 +          (tag == DataLayout::call_type_data_tag ||
  78.475 +           tag == DataLayout::counter_data_tag ||
  78.476 +           tag == DataLayout::virtual_call_type_data_tag ||
  78.477 +           tag == DataLayout::virtual_call_data_tag)) ||
  78.478           cell_count == bytecode_cell_count(c), "cell counts must agree");
  78.479    if (cell_count >= 0) {
  78.480      assert(tag != DataLayout::no_tag, "bad tag");
  78.481 @@ -631,6 +911,10 @@
  78.482      return new MultiBranchData(this);
  78.483    case DataLayout::arg_info_data_tag:
  78.484      return new ArgInfoData(this);
  78.485 +  case DataLayout::call_type_data_tag:
  78.486 +    return new CallTypeData(this);
  78.487 +  case DataLayout::virtual_call_type_data_tag:
  78.488 +    return new VirtualCallTypeData(this);
  78.489    };
  78.490  }
  78.491  
  78.492 @@ -898,3 +1182,70 @@
  78.493    NEEDS_CLEANUP;
  78.494    // not yet implemented.
  78.495  }
  78.496 +
  78.497 +bool MethodData::profile_jsr292(methodHandle m, int bci) {
  78.498 +  if (m->is_compiled_lambda_form()) {
  78.499 +    return true;
  78.500 +  }
  78.501 +
  78.502 +  Bytecode_invoke inv(m , bci);
  78.503 +  return inv.is_invokedynamic() || inv.is_invokehandle();
  78.504 +}
  78.505 +
  78.506 +int MethodData::profile_arguments_flag() {
  78.507 +  return TypeProfileLevel % 10;
  78.508 +}
  78.509 +
  78.510 +bool MethodData::profile_arguments() {
  78.511 +  return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all;
  78.512 +}
  78.513 +
  78.514 +bool MethodData::profile_arguments_jsr292_only() {
  78.515 +  return profile_arguments_flag() == type_profile_jsr292;
  78.516 +}
  78.517 +
  78.518 +bool MethodData::profile_all_arguments() {
  78.519 +  return profile_arguments_flag() == type_profile_all;
  78.520 +}
  78.521 +
  78.522 +bool MethodData::profile_arguments_for_invoke(methodHandle m, int bci) {
  78.523 +  if (!profile_arguments()) {
  78.524 +    return false;
  78.525 +  }
  78.526 +
  78.527 +  if (profile_all_arguments()) {
  78.528 +    return true;
  78.529 +  }
  78.530 +
  78.531 +  assert(profile_arguments_jsr292_only(), "inconsistent");
  78.532 +  return profile_jsr292(m, bci);
  78.533 +}
  78.534 +
  78.535 +int MethodData::profile_return_flag() {
  78.536 +  return TypeProfileLevel / 10;
  78.537 +}
  78.538 +
  78.539 +bool MethodData::profile_return() {
  78.540 +  return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
  78.541 +}
  78.542 +
  78.543 +bool MethodData::profile_return_jsr292_only() {
  78.544 +  return profile_return_flag() == type_profile_jsr292;
  78.545 +}
  78.546 +
  78.547 +bool MethodData::profile_all_return() {
  78.548 +  return profile_return_flag() == type_profile_all;
  78.549 +}
  78.550 +
  78.551 +bool MethodData::profile_return_for_invoke(methodHandle m, int bci) {
  78.552 +  if (!profile_return()) {
  78.553 +    return false;
  78.554 +  }
  78.555 +
  78.556 +  if (profile_all_return()) {
  78.557 +    return true;
  78.558 +  }
  78.559 +
  78.560 +  assert(profile_return_jsr292_only(), "inconsistent");
  78.561 +  return profile_jsr292(m, bci);
  78.562 +}
    79.1 --- a/src/share/vm/oops/methodData.hpp	Fri Oct 18 10:37:26 2013 +0000
    79.2 +++ b/src/share/vm/oops/methodData.hpp	Fri Oct 18 19:44:40 2013 -0700
    79.3 @@ -117,7 +117,9 @@
    79.4      ret_data_tag,
    79.5      branch_data_tag,
    79.6      multi_branch_data_tag,
    79.7 -    arg_info_data_tag
    79.8 +    arg_info_data_tag,
    79.9 +    call_type_data_tag,
   79.10 +    virtual_call_type_data_tag
   79.11    };
   79.12  
   79.13    enum {
   79.14 @@ -165,7 +167,7 @@
   79.15    // occurred, and the MDO shows N occurrences of X, we make the
   79.16    // simplifying assumption that all N occurrences can be blamed
   79.17    // on that BCI.
   79.18 -  int trap_state() {
   79.19 +  int trap_state() const {
   79.20      return ((_header._struct._flags >> trap_shift) & trap_mask);
   79.21    }
   79.22  
   79.23 @@ -175,11 +177,11 @@
   79.24      _header._struct._flags = (new_state << trap_shift) | old_flags;
   79.25    }
   79.26  
   79.27 -  u1 flags() {
   79.28 +  u1 flags() const {
   79.29      return _header._struct._flags;
   79.30    }
   79.31  
   79.32 -  u2 bci() {
   79.33 +  u2 bci() const {
   79.34      return _header._struct._bci;
   79.35    }
   79.36  
   79.37 @@ -198,7 +200,7 @@
   79.38    void release_set_cell_at(int index, intptr_t value) {
   79.39      OrderAccess::release_store_ptr(&_cells[index], value);
   79.40    }
   79.41 -  intptr_t cell_at(int index) {
   79.42 +  intptr_t cell_at(int index) const {
   79.43      return _cells[index];
   79.44    }
   79.45  
   79.46 @@ -206,7 +208,7 @@
   79.47      assert(flag_number < flag_limit, "oob");
   79.48      _header._struct._flags |= (0x1 << flag_number);
   79.49    }
   79.50 -  bool flag_at(int flag_number) {
   79.51 +  bool flag_at(int flag_number) const {
   79.52      assert(flag_number < flag_limit, "oob");
   79.53      return (_header._struct._flags & (0x1 << flag_number)) != 0;
   79.54    }
   79.55 @@ -254,19 +256,23 @@
   79.56  class     CounterData;
   79.57  class       ReceiverTypeData;
   79.58  class         VirtualCallData;
   79.59 +class           VirtualCallTypeData;
   79.60  class       RetData;
   79.61 +class       CallTypeData;
   79.62  class   JumpData;
   79.63  class     BranchData;
   79.64  class   ArrayData;
   79.65  class     MultiBranchData;
   79.66  class     ArgInfoData;
   79.67  
   79.68 -
   79.69  // ProfileData
   79.70  //
   79.71  // A ProfileData object is created to refer to a section of profiling
   79.72  // data in a structured way.
   79.73  class ProfileData : public ResourceObj {
   79.74 +  friend class TypeEntries;
   79.75 +  friend class ReturnTypeEntry;
   79.76 +  friend class TypeStackSlotEntries;
   79.77  private:
   79.78  #ifndef PRODUCT
   79.79    enum {
   79.80 @@ -280,6 +286,7 @@
   79.81  
   79.82  protected:
   79.83    DataLayout* data() { return _data; }
   79.84 +  const DataLayout* data() const { return _data; }
   79.85  
   79.86    enum {
   79.87      cell_size = DataLayout::cell_size
   79.88 @@ -287,7 +294,7 @@
   79.89  
   79.90  public:
   79.91    // How many cells are in this?
   79.92 -  virtual int cell_count() {
   79.93 +  virtual int cell_count() const {
   79.94      ShouldNotReachHere();
   79.95      return -1;
   79.96    }
   79.97 @@ -307,7 +314,7 @@
   79.98      assert(0 <= index && index < cell_count(), "oob");
   79.99      data()->release_set_cell_at(index, value);
  79.100    }
  79.101 -  intptr_t intptr_at(int index) {
  79.102 +  intptr_t intptr_at(int index) const {
  79.103      assert(0 <= index && index < cell_count(), "oob");
  79.104      return data()->cell_at(index);
  79.105    }
  79.106 @@ -317,7 +324,7 @@
  79.107    void release_set_uint_at(int index, uint value) {
  79.108      release_set_intptr_at(index, (intptr_t) value);
  79.109    }
  79.110 -  uint uint_at(int index) {
  79.111 +  uint uint_at(int index) const {
  79.112      return (uint)intptr_at(index);
  79.113    }
  79.114    void set_int_at(int index, int value) {
  79.115 @@ -326,23 +333,23 @@
  79.116    void release_set_int_at(int index, int value) {
  79.117      release_set_intptr_at(index, (intptr_t) value);
  79.118    }
  79.119 -  int int_at(int index) {
  79.120 +  int int_at(int index) const {
  79.121      return (int)intptr_at(index);
  79.122    }
  79.123 -  int int_at_unchecked(int index) {
  79.124 +  int int_at_unchecked(int index) const {
  79.125      return (int)data()->cell_at(index);
  79.126    }
  79.127    void set_oop_at(int index, oop value) {
  79.128      set_intptr_at(index, cast_from_oop<intptr_t>(value));
  79.129    }
  79.130 -  oop oop_at(int index) {
  79.131 +  oop oop_at(int index) const {
  79.132      return cast_to_oop(intptr_at(index));
  79.133    }
  79.134  
  79.135    void set_flag_at(int flag_number) {
  79.136      data()->set_flag_at(flag_number);
  79.137    }
  79.138 -  bool flag_at(int flag_number) {
  79.139 +  bool flag_at(int flag_number) const {
  79.140      return data()->flag_at(flag_number);
  79.141    }
  79.142  
  79.143 @@ -362,7 +369,7 @@
  79.144    // Constructor for invalid ProfileData.
  79.145    ProfileData();
  79.146  
  79.147 -  u2 bci() {
  79.148 +  u2 bci() const {
  79.149      return data()->bci();
  79.150    }
  79.151  
  79.152 @@ -370,7 +377,7 @@
  79.153      return (address)_data;
  79.154    }
  79.155  
  79.156 -  int trap_state() {
  79.157 +  int trap_state() const {
  79.158      return data()->trap_state();
  79.159    }
  79.160    void set_trap_state(int new_state) {
  79.161 @@ -378,58 +385,68 @@
  79.162    }
  79.163  
  79.164    // Type checking
  79.165 -  virtual bool is_BitData()         { return false; }
  79.166 -  virtual bool is_CounterData()     { return false; }
  79.167 -  virtual bool is_JumpData()        { return false; }
  79.168 -  virtual bool is_ReceiverTypeData(){ return false; }
  79.169 -  virtual bool is_VirtualCallData() { return false; }
  79.170 -  virtual bool is_RetData()         { return false; }
  79.171 -  virtual bool is_BranchData()      { return false; }
  79.172 -  virtual bool is_ArrayData()       { return false; }
  79.173 -  virtual bool is_MultiBranchData() { return false; }
  79.174 -  virtual bool is_ArgInfoData()     { return false; }
  79.175 +  virtual bool is_BitData()         const { return false; }
  79.176 +  virtual bool is_CounterData()     const { return false; }
  79.177 +  virtual bool is_JumpData()        const { return false; }
  79.178 +  virtual bool is_ReceiverTypeData()const { return false; }
  79.179 +  virtual bool is_VirtualCallData() const { return false; }
  79.180 +  virtual bool is_RetData()         const { return false; }
  79.181 +  virtual bool is_BranchData()      const { return false; }
  79.182 +  virtual bool is_ArrayData()       const { return false; }
  79.183 +  virtual bool is_MultiBranchData() const { return false; }
  79.184 +  virtual bool is_ArgInfoData()     const { return false; }
  79.185 +  virtual bool is_CallTypeData()    const { return false; }
  79.186 +  virtual bool is_VirtualCallTypeData()const { return false; }
  79.187  
  79.188  
  79.189 -  BitData* as_BitData() {
  79.190 +  BitData* as_BitData() const {
  79.191      assert(is_BitData(), "wrong type");
  79.192      return is_BitData()         ? (BitData*)        this : NULL;
  79.193    }
  79.194 -  CounterData* as_CounterData() {
  79.195 +  CounterData* as_CounterData() const {
  79.196      assert(is_CounterData(), "wrong type");
  79.197      return is_CounterData()     ? (CounterData*)    this : NULL;
  79.198    }
  79.199 -  JumpData* as_JumpData() {
  79.200 +  JumpData* as_JumpData() const {
  79.201      assert(is_JumpData(), "wrong type");
  79.202      return is_JumpData()        ? (JumpData*)       this : NULL;
  79.203    }
  79.204 -  ReceiverTypeData* as_ReceiverTypeData() {
  79.205 +  ReceiverTypeData* as_ReceiverTypeData() const {
  79.206      assert(is_ReceiverTypeData(), "wrong type");
  79.207      return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL;
  79.208    }
  79.209 -  VirtualCallData* as_VirtualCallData() {
  79.210 +  VirtualCallData* as_VirtualCallData() const {
  79.211      assert(is_VirtualCallData(), "wrong type");
  79.212      return is_VirtualCallData() ? (VirtualCallData*)this : NULL;
  79.213    }
  79.214 -  RetData* as_RetData() {
  79.215 +  RetData* as_RetData() const {
  79.216      assert(is_RetData(), "wrong type");
  79.217      return is_RetData()         ? (RetData*)        this : NULL;
  79.218    }
  79.219 -  BranchData* as_BranchData() {
  79.220 +  BranchData* as_BranchData() const {
  79.221      assert(is_BranchData(), "wrong type");
  79.222      return is_BranchData()      ? (BranchData*)     this : NULL;
  79.223    }
  79.224 -  ArrayData* as_ArrayData() {
  79.225 +  ArrayData* as_ArrayData() const {
  79.226      assert(is_ArrayData(), "wrong type");
  79.227      return is_ArrayData()       ? (ArrayData*)      this : NULL;
  79.228    }
  79.229 -  MultiBranchData* as_MultiBranchData() {
  79.230 +  MultiBranchData* as_MultiBranchData() const {
  79.231      assert(is_MultiBranchData(), "wrong type");
  79.232      return is_MultiBranchData() ? (MultiBranchData*)this : NULL;
  79.233    }
  79.234 -  ArgInfoData* as_ArgInfoData() {
  79.235 +  ArgInfoData* as_ArgInfoData() const {
  79.236      assert(is_ArgInfoData(), "wrong type");
  79.237      return is_ArgInfoData() ? (ArgInfoData*)this : NULL;
  79.238    }
  79.239 +  CallTypeData* as_CallTypeData() const {
  79.240 +    assert(is_CallTypeData(), "wrong type");
  79.241 +    return is_CallTypeData() ? (CallTypeData*)this : NULL;
  79.242 +  }
  79.243 +  VirtualCallTypeData* as_VirtualCallTypeData() const {
  79.244 +    assert(is_VirtualCallTypeData(), "wrong type");
  79.245 +    return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
  79.246 +  }
  79.247  
  79.248  
  79.249    // Subclass specific initialization
  79.250 @@ -443,15 +460,15 @@
  79.251    // an oop in a ProfileData to the ci equivalent. Generally speaking,
  79.252    // most ProfileData don't require any translation, so we provide the null
  79.253    // translation here, and the required translators are in the ci subclasses.
  79.254 -  virtual void translate_from(ProfileData* data) {}
  79.255 +  virtual void translate_from(const ProfileData* data) {}
  79.256  
  79.257 -  virtual void print_data_on(outputStream* st) {
  79.258 +  virtual void print_data_on(outputStream* st) const {
  79.259      ShouldNotReachHere();
  79.260    }
  79.261  
  79.262  #ifndef PRODUCT
  79.263 -  void print_shared(outputStream* st, const char* name);
  79.264 -  void tab(outputStream* st);
  79.265 +  void print_shared(outputStream* st, const char* name) const;
  79.266 +  void tab(outputStream* st, bool first = false) const;
  79.267  #endif
  79.268  };
  79.269  
  79.270 @@ -470,13 +487,13 @@
  79.271    BitData(DataLayout* layout) : ProfileData(layout) {
  79.272    }
  79.273  
  79.274 -  virtual bool is_BitData() { return true; }
  79.275 +  virtual bool is_BitData() const { return true; }
  79.276  
  79.277    static int static_cell_count() {
  79.278      return bit_cell_count;
  79.279    }
  79.280  
  79.281 -  virtual int cell_count() {
  79.282 +  virtual int cell_count() const {
  79.283      return static_cell_count();
  79.284    }
  79.285  
  79.286 @@ -498,7 +515,7 @@
  79.287    }
  79.288  
  79.289  #ifndef PRODUCT
  79.290 -  void print_data_on(outputStream* st);
  79.291 +  void print_data_on(outputStream* st) const;
  79.292  #endif
  79.293  };
  79.294  
  79.295 @@ -514,18 +531,18 @@
  79.296  public:
  79.297    CounterData(DataLayout* layout) : BitData(layout) {}
  79.298  
  79.299 -  virtual bool is_CounterData() { return true; }
  79.300 +  virtual bool is_CounterData() const { return true; }
  79.301  
  79.302    static int static_cell_count() {
  79.303      return counter_cell_count;
  79.304    }
  79.305  
  79.306 -  virtual int cell_count() {
  79.307 +  virtual int cell_count() const {
  79.308      return static_cell_count();
  79.309    }
  79.310  
  79.311    // Direct accessor
  79.312 -  uint count() {
  79.313 +  uint count() const {
  79.314      return uint_at(count_off);
  79.315    }
  79.316  
  79.317 @@ -542,7 +559,7 @@
  79.318    }
  79.319  
  79.320  #ifndef PRODUCT
  79.321 -  void print_data_on(outputStream* st);
  79.322 +  void print_data_on(outputStream* st) const;
  79.323  #endif
  79.324  };
  79.325  
  79.326 @@ -570,18 +587,18 @@
  79.327        layout->tag() == DataLayout::branch_data_tag, "wrong type");
  79.328    }
  79.329  
  79.330 -  virtual bool is_JumpData() { return true; }
  79.331 +  virtual bool is_JumpData() const { return true; }
  79.332  
  79.333    static int static_cell_count() {
  79.334      return jump_cell_count;
  79.335    }
  79.336  
  79.337 -  virtual int cell_count() {
  79.338 +  virtual int cell_count() const {
  79.339      return static_cell_count();
  79.340    }
  79.341  
  79.342    // Direct accessor
  79.343 -  uint taken() {
  79.344 +  uint taken() const {
  79.345      return uint_at(taken_off_set);
  79.346    }
  79.347  
  79.348 @@ -598,7 +615,7 @@
  79.349      return cnt;
  79.350    }
  79.351  
  79.352 -  int displacement() {
  79.353 +  int displacement() const {
  79.354      return int_at(displacement_off_set);
  79.355    }
  79.356  
  79.357 @@ -615,7 +632,418 @@
  79.358    void post_initialize(BytecodeStream* stream, MethodData* mdo);
  79.359  
  79.360  #ifndef PRODUCT
  79.361 -  void print_data_on(outputStream* st);
  79.362 +  void print_data_on(outputStream* st) const;
  79.363 +#endif
  79.364 +};
  79.365 +
  79.366 +// Entries in a ProfileData object to record types: it can either be
  79.367 +// none (no profile), unknown (conflicting profile data) or a klass if
  79.368 +// a single one is seen. Whether a null reference was seen is also
  79.369 +// recorded. No counter is associated with the type and a single type
  79.370 +// is tracked (unlike VirtualCallData).
  79.371 +class TypeEntries {
  79.372 +
  79.373 +public:
  79.374 +
  79.375 +  // A single cell is used to record information for a type:
  79.376 +  // - the cell is initialized to 0
  79.377 +  // - when a type is discovered it is stored in the cell
  79.378 +  // - bit zero of the cell is used to record whether a null reference
  79.379 +  // was encountered or not
  79.380 +  // - bit 1 is set to record a conflict in the type information
  79.381 +
  79.382 +  enum {
  79.383 +    null_seen = 1,
  79.384 +    type_mask = ~null_seen,
  79.385 +    type_unknown = 2,
  79.386 +    status_bits = null_seen | type_unknown,
  79.387 +    type_klass_mask = ~status_bits
  79.388 +  };
  79.389 +
  79.390 +  // what to initialize a cell to
  79.391 +  static intptr_t type_none() {
  79.392 +    return 0;
  79.393 +  }
  79.394 +
  79.395 +  // null seen = bit 0 set?
  79.396 +  static bool was_null_seen(intptr_t v) {
  79.397 +    return (v & null_seen) != 0;
  79.398 +  }
  79.399 +
  79.400 +  // conflicting type information = bit 1 set?
  79.401 +  static bool is_type_unknown(intptr_t v) {
  79.402 +    return (v & type_unknown) != 0;
  79.403 +  }
  79.404 +
  79.405 +  // not type information yet = all bits cleared, ignoring bit 0?
  79.406 +  static bool is_type_none(intptr_t v) {
  79.407 +    return (v & type_mask) == 0;
  79.408 +  }
  79.409 +
  79.410 +  // recorded type: cell without bit 0 and 1
  79.411 +  static intptr_t klass_part(intptr_t v) {
  79.412 +    intptr_t r = v & type_klass_mask;
  79.413 +    assert (r != 0, "invalid");
  79.414 +    return r;
  79.415 +  }
  79.416 +
  79.417 +  // type recorded
  79.418 +  static Klass* valid_klass(intptr_t k) {
  79.419 +    if (!is_type_none(k) &&
  79.420 +        !is_type_unknown(k)) {
  79.421 +      return (Klass*)klass_part(k);
  79.422 +    } else {
  79.423 +      return NULL;
  79.424 +    }
  79.425 +  }
  79.426 +
  79.427 +  static intptr_t with_status(intptr_t k, intptr_t in) {
  79.428 +    return k | (in & status_bits);
  79.429 +  }
  79.430 +
  79.431 +  static intptr_t with_status(Klass* k, intptr_t in) {
  79.432 +    return with_status((intptr_t)k, in);
  79.433 +  }
  79.434 +
  79.435 +#ifndef PRODUCT
  79.436 +  static void print_klass(outputStream* st, intptr_t k);
  79.437 +#endif
  79.438 +
  79.439 +  // GC support
  79.440 +  static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
  79.441 +
  79.442 +protected:
  79.443 +  // ProfileData object these entries are part of
  79.444 +  ProfileData* _pd;
  79.445 +  // offset within the ProfileData object where the entries start
  79.446 +  const int _base_off;
  79.447 +
  79.448 +  TypeEntries(int base_off)
  79.449 +    : _base_off(base_off), _pd(NULL) {}
  79.450 +
  79.451 +  void set_intptr_at(int index, intptr_t value) {
  79.452 +    _pd->set_intptr_at(index, value);
  79.453 +  }
  79.454 +
  79.455 +  intptr_t intptr_at(int index) const {
  79.456 +    return _pd->intptr_at(index);
  79.457 +  }
  79.458 +
  79.459 +public:
  79.460 +  void set_profile_data(ProfileData* pd) {
  79.461 +    _pd = pd;
  79.462 +  }
  79.463 +};
  79.464 +
  79.465 +// Type entries used for arguments passed at a call and parameters on
  79.466 +// method entry. 2 cells per entry: one for the type encoded as in
  79.467 +// TypeEntries and one initialized with the stack slot where the
  79.468 +// profiled object is to be found so that the interpreter can locate
  79.469 +// it quickly.
  79.470 +class TypeStackSlotEntries : public TypeEntries {
  79.471 +
  79.472 +private:
  79.473 +  enum {
  79.474 +    stack_slot_entry,
  79.475 +    type_entry,
  79.476 +    per_arg_cell_count
  79.477 +  };
  79.478 +
  79.479 +  // offset of cell for stack slot for entry i within ProfileData object
  79.480 +  int stack_slot_offset(int i) const {
  79.481 +    return _base_off + stack_slot_local_offset(i);
  79.482 +  }
  79.483 +
  79.484 +protected:
  79.485 +  const int _number_of_entries;
  79.486 +
  79.487 +  // offset of cell for type for entry i within ProfileData object
  79.488 +  int type_offset(int i) const {
  79.489 +    return _base_off + type_local_offset(i);
  79.490 +  }
  79.491 +
  79.492 +public:
  79.493 +
  79.494 +  TypeStackSlotEntries(int base_off, int nb_entries)
  79.495 +    : TypeEntries(base_off), _number_of_entries(nb_entries) {}
  79.496 +
  79.497 +  static int compute_cell_count(Symbol* signature, int max);
  79.498 +
  79.499 +  void post_initialize(Symbol* signature, bool has_receiver);
  79.500 +
  79.501 +  // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
  79.502 +  static int stack_slot_local_offset(int i) {
  79.503 +    return i * per_arg_cell_count + stack_slot_entry;
  79.504 +  }
  79.505 +
  79.506 +  // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
  79.507 +  static int type_local_offset(int i) {
  79.508 +    return i * per_arg_cell_count + type_entry;
  79.509 +  }
  79.510 +
  79.511 +  // stack slot for entry i
  79.512 +  uint stack_slot(int i) const {
  79.513 +    assert(i >= 0 && i < _number_of_entries, "oob");
  79.514 +    return _pd->uint_at(stack_slot_offset(i));
  79.515 +  }
  79.516 +
  79.517 +  // set stack slot for entry i
  79.518 +  void set_stack_slot(int i, uint num) {
  79.519 +    assert(i >= 0 && i < _number_of_entries, "oob");
  79.520 +    _pd->set_uint_at(stack_slot_offset(i), num);
  79.521 +  }
  79.522 +
  79.523 +  // type for entry i
  79.524 +  intptr_t type(int i) const {
  79.525 +    assert(i >= 0 && i < _number_of_entries, "oob");
  79.526 +    return _pd->intptr_at(type_offset(i));
  79.527 +  }
  79.528 +
  79.529 +  // set type for entry i
  79.530 +  void set_type(int i, intptr_t k) {
  79.531 +    assert(i >= 0 && i < _number_of_entries, "oob");
  79.532 +    _pd->set_intptr_at(type_offset(i), k);
  79.533 +  }
  79.534 +
  79.535 +  static ByteSize per_arg_size() {
  79.536 +    return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
  79.537 +  }
  79.538 +
  79.539 +  static int per_arg_count() {
  79.540 +    return per_arg_cell_count ;
  79.541 +  }
  79.542 +
  79.543 +  // GC support
  79.544 +  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
  79.545 +
  79.546 +#ifndef PRODUCT
  79.547 +  void print_data_on(outputStream* st) const;
  79.548 +#endif
  79.549 +};
  79.550 +
  79.551 +// Type entry used for return from a call. A single cell to record the
  79.552 +// type.
  79.553 +class ReturnTypeEntry : public TypeEntries {
  79.554 +
  79.555 +private:
  79.556 +  enum {
  79.557 +    cell_count = 1
  79.558 +  };
  79.559 +
  79.560 +public:
  79.561 +  ReturnTypeEntry(int base_off)
  79.562 +    : TypeEntries(base_off) {}
  79.563 +
  79.564 +  void post_initialize() {
  79.565 +    set_type(type_none());
  79.566 +  }
  79.567 +
  79.568 +  intptr_t type() const {
  79.569 +    return _pd->intptr_at(_base_off);
  79.570 +  }
  79.571 +
  79.572 +  void set_type(intptr_t k) {
  79.573 +    _pd->set_intptr_at(_base_off, k);
  79.574 +  }
  79.575 +
  79.576 +  static int static_cell_count() {
  79.577 +    return cell_count;
  79.578 +  }
  79.579 +
  79.580 +  static ByteSize size() {
  79.581 +    return in_ByteSize(cell_count * DataLayout::cell_size);
  79.582 +  }
  79.583 +
  79.584 +  ByteSize type_offset() {
  79.585 +    return DataLayout::cell_offset(_base_off);
  79.586 +  }
  79.587 +
  79.588 +  // GC support
  79.589 +  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
  79.590 +
  79.591 +#ifndef PRODUCT
  79.592 +  void print_data_on(outputStream* st) const;
  79.593 +#endif
  79.594 +};
  79.595 +
  79.596 +// Entries to collect type information at a call: contains arguments
  79.597 +// (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
  79.598 +// number of cells. Because the number of cells for the return type is
  79.599 +// smaller than the number of cells for the type of an arguments, the
  79.600 +// number of cells is used to tell how many arguments are profiled and
  79.601 +// whether a return value is profiled. See has_arguments() and
  79.602 +// has_return().
  79.603 +class TypeEntriesAtCall {
  79.604 +private:
  79.605 +  static int stack_slot_local_offset(int i) {
  79.606 +    return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
  79.607 +  }
  79.608 +
  79.609 +  static int argument_type_local_offset(int i) {
  79.610 +    return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);;
  79.611 +  }
  79.612 +
  79.613 +public:
  79.614 +
  79.615 +  static int header_cell_count() {
  79.616 +    return 1;
  79.617 +  }
  79.618 +
  79.619 +  static int cell_count_local_offset() {
  79.620 +    return 0;
  79.621 +  }
  79.622 +
  79.623 +  static int compute_cell_count(BytecodeStream* stream);
  79.624 +
  79.625 +  static void initialize(DataLayout* dl, int base, int cell_count) {
  79.626 +    int off = base + cell_count_local_offset();
  79.627 +    dl->set_cell_at(off, cell_count - base - header_cell_count());
  79.628 +  }
  79.629 +
  79.630 +  static bool arguments_profiling_enabled();
  79.631 +  static bool return_profiling_enabled();
  79.632 +
  79.633 +  // Code generation support
  79.634 +  static ByteSize cell_count_offset() {
  79.635 +    return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
  79.636 +  }
  79.637 +
  79.638 +  static ByteSize args_data_offset() {
  79.639 +    return in_ByteSize(header_cell_count() * DataLayout::cell_size);
  79.640 +  }
  79.641 +
  79.642 +  static ByteSize stack_slot_offset(int i) {
  79.643 +    return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
  79.644 +  }
  79.645 +
  79.646 +  static ByteSize argument_type_offset(int i) {
  79.647 +    return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
  79.648 +  }
  79.649 +};
  79.650 +
  79.651 +// CallTypeData
  79.652 +//
  79.653 +// A CallTypeData is used to access profiling information about a non
  79.654 +// virtual call for which we collect type information about arguments
  79.655 +// and return value.
  79.656 +class CallTypeData : public CounterData {
  79.657 +private:
  79.658 +  // entries for arguments if any
  79.659 +  TypeStackSlotEntries _args;
  79.660 +  // entry for return type if any
  79.661 +  ReturnTypeEntry _ret;
  79.662 +
  79.663 +  int cell_count_global_offset() const {
  79.664 +    return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
  79.665 +  }
  79.666 +
  79.667 +  // number of cells not counting the header
  79.668 +  int cell_count_no_header() const {
  79.669 +    return uint_at(cell_count_global_offset());
  79.670 +  }
  79.671 +
  79.672 +  void check_number_of_arguments(int total) {
  79.673 +    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
  79.674 +  }
  79.675 +
  79.676 +protected:
  79.677 +  // An entry for a return value takes less space than an entry for an
  79.678 +  // argument so if the number of cells exceeds the number of cells
  79.679 +  // needed for an argument, this object contains type information for
  79.680 +  // at least one argument.
  79.681 +  bool has_arguments() const {
  79.682 +    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
  79.683 +    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
  79.684 +    return res;
  79.685 +  }
  79.686 +
  79.687 +public:
  79.688 +  CallTypeData(DataLayout* layout) :
  79.689 +    CounterData(layout),
  79.690 +    _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
  79.691 +    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
  79.692 +  {
  79.693 +    assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
  79.694 +    // Some compilers (VC++) don't want this passed in member initialization list
  79.695 +    _args.set_profile_data(this);
  79.696 +    _ret.set_profile_data(this);
  79.697 +  }
  79.698 +
  79.699 +  const TypeStackSlotEntries* args() const {
  79.700 +    assert(has_arguments(), "no profiling of arguments");
  79.701 +    return &_args;
  79.702 +  }
  79.703 +
  79.704 +  const ReturnTypeEntry* ret() const {
  79.705 +    assert(has_return(), "no profiling of return value");
  79.706 +    return &_ret;
  79.707 +  }
  79.708 +
  79.709 +  virtual bool is_CallTypeData() const { return true; }
  79.710 +
  79.711 +  static int static_cell_count() {
  79.712 +    return -1;
  79.713 +  }
  79.714 +
  79.715 +  static int compute_cell_count(BytecodeStream* stream) {
  79.716 +    return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
  79.717 +  }
  79.718 +
  79.719 +  static void initialize(DataLayout* dl, int cell_count) {
  79.720 +    TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
  79.721 +  }
  79.722 +
  79.723 +  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
  79.724 +
  79.725 +  virtual int cell_count() const {
  79.726 +    return CounterData::static_cell_count() +
  79.727 +      TypeEntriesAtCall::header_cell_count() +
  79.728 +      int_at_unchecked(cell_count_global_offset());
  79.729 +  }
  79.730 +
  79.731 +  int number_of_arguments() const {
  79.732 +    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
  79.733 +  }
  79.734 +
  79.735 +  void set_argument_type(int i, Klass* k) {
  79.736 +    assert(has_arguments(), "no arguments!");
  79.737 +    intptr_t current = _args.type(i);
  79.738 +    _args.set_type(i, TypeEntries::with_status(k, current));
  79.739 +  }
  79.740 +
  79.741 +  void set_return_type(Klass* k) {
  79.742 +    assert(has_return(), "no return!");
  79.743 +    intptr_t current = _ret.type();
  79.744 +    _ret.set_type(TypeEntries::with_status(k, current));
  79.745 +  }
  79.746 +
  79.747 +  // An entry for a return value takes less space than an entry for an
  79.748 +  // argument, so if the remainder of the number of cells divided by
  79.749 +  // the number of cells for an argument is not null, a return value
  79.750 +  // is profiled in this object.
  79.751 +  bool has_return() const {
  79.752 +    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
  79.753 +    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
  79.754 +    return res;
  79.755 +  }
  79.756 +
  79.757 +  // Code generation support
  79.758 +  static ByteSize args_data_offset() {
  79.759 +    return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
  79.760 +  }
  79.761 +
  79.762 +  // GC support
  79.763 +  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
  79.764 +    if (has_arguments()) {
  79.765 +      _args.clean_weak_klass_links(is_alive_closure);
  79.766 +    }
  79.767 +    if (has_return()) {
  79.768 +      _ret.clean_weak_klass_links(is_alive_closure);
  79.769 +    }
  79.770 +  }
  79.771 +
  79.772 +#ifndef PRODUCT
  79.773 +  virtual void print_data_on(outputStream* st) const;
  79.774  #endif
  79.775  };
  79.776  
  79.777 @@ -636,16 +1064,17 @@
  79.778  public:
  79.779    ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
  79.780      assert(layout->tag() == DataLayout::receiver_type_data_tag ||
  79.781 -           layout->tag() == DataLayout::virtual_call_data_tag, "wrong type");
  79.782 +           layout->tag() == DataLayout::virtual_call_data_tag ||
  79.783 +           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
  79.784    }
  79.785  
  79.786 -  virtual bool is_ReceiverTypeData() { return true; }
  79.787 +  virtual bool is_ReceiverTypeData() const { return true; }
  79.788  
  79.789    static int static_cell_count() {
  79.790      return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count;
  79.791    }
  79.792  
  79.793 -  virtual int cell_count() {
  79.794 +  virtual int cell_count() const {
  79.795      return static_cell_count();
  79.796    }
  79.797  
  79.798 @@ -660,7 +1089,7 @@
  79.799      return count0_offset + row * receiver_type_row_cell_count;
  79.800    }
  79.801  
  79.802 -  Klass* receiver(uint row) {
  79.803 +  Klass* receiver(uint row) const {
  79.804      assert(row < row_limit(), "oob");
  79.805  
  79.806      Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
  79.807 @@ -673,7 +1102,7 @@
  79.808      set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
  79.809    }
  79.810  
  79.811 -  uint receiver_count(uint row) {
  79.812 +  uint receiver_count(uint row) const {
  79.813      assert(row < row_limit(), "oob");
  79.814      return uint_at(receiver_count_cell_index(row));
  79.815    }
  79.816 @@ -721,8 +1150,8 @@
  79.817    virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
  79.818  
  79.819  #ifndef PRODUCT
  79.820 -  void print_receiver_data_on(outputStream* st);
  79.821 -  void print_data_on(outputStream* st);
  79.822 +  void print_receiver_data_on(outputStream* st) const;
  79.823 +  void print_data_on(outputStream* st) const;
  79.824  #endif
  79.825  };
  79.826  
  79.827 @@ -733,10 +1162,11 @@
  79.828  class VirtualCallData : public ReceiverTypeData {
  79.829  public:
  79.830    VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
  79.831 -    assert(layout->tag() == DataLayout::virtual_call_data_tag, "wrong type");
  79.832 +    assert(layout->tag() == DataLayout::virtual_call_data_tag ||
  79.833 +           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
  79.834    }
  79.835  
  79.836 -  virtual bool is_VirtualCallData() { return true; }
  79.837 +  virtual bool is_VirtualCallData() const { return true; }
  79.838  
  79.839    static int static_cell_count() {
  79.840      // At this point we could add more profile state, e.g., for arguments.
  79.841 @@ -744,7 +1174,7 @@
  79.842      return ReceiverTypeData::static_cell_count();
  79.843    }
  79.844  
  79.845 -  virtual int cell_count() {
  79.846 +  virtual int cell_count() const {
  79.847      return static_cell_count();
  79.848    }
  79.849  
  79.850 @@ -754,7 +1184,134 @@
  79.851    }
  79.852  
  79.853  #ifndef PRODUCT
  79.854 -  void print_data_on(outputStream* st);
  79.855 +  void print_data_on(outputStream* st) const;
  79.856 +#endif
  79.857 +};
  79.858 +
  79.859 +// VirtualCallTypeData
  79.860 +//
  79.861 +// A VirtualCallTypeData is used to access profiling information about
  79.862 +// a virtual call for which we collect type information about
  79.863 +// arguments and return value.
  79.864 +class VirtualCallTypeData : public VirtualCallData {
  79.865 +private:
  79.866 +  // entries for arguments if any
  79.867 +  TypeStackSlotEntries _args;
  79.868 +  // entry for return type if any
  79.869 +  ReturnTypeEntry _ret;
  79.870 +
  79.871 +  int cell_count_global_offset() const {
  79.872 +    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
  79.873 +  }
  79.874 +
  79.875 +  // number of cells not counting the header
  79.876 +  int cell_count_no_header() const {
  79.877 +    return uint_at(cell_count_global_offset());
  79.878 +  }
  79.879 +
  79.880 +  void check_number_of_arguments(int total) {
  79.881 +    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
  79.882 +  }
  79.883 +
  79.884 +protected:
  79.885 +  // An entry for a return value takes less space than an entry for an
  79.886 +  // argument so if the number of cells exceeds the number of cells
  79.887 +  // needed for an argument, this object contains type information for
  79.888 +  // at least one argument.
  79.889 +  bool has_arguments() const {
  79.890 +    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
  79.891 +    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
  79.892 +    return res;
  79.893 +  }
  79.894 +
  79.895 +public:
  79.896 +  VirtualCallTypeData(DataLayout* layout) :
  79.897 +    VirtualCallData(layout),
  79.898 +    _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
  79.899 +    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
  79.900 +  {
  79.901 +    assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
  79.902 +    // Some compilers (VC++) don't want this passed in member initialization list
  79.903 +    _args.set_profile_data(this);
  79.904 +    _ret.set_profile_data(this);
  79.905 +  }
  79.906 +
  79.907 +  const TypeStackSlotEntries* args() const {
  79.908 +    assert(has_arguments(), "no profiling of arguments");
  79.909 +    return &_args;
  79.910 +  }
  79.911 +
  79.912 +  const ReturnTypeEntry* ret() const {
  79.913 +    assert(has_return(), "no profiling of return value");
  79.914 +    return &_ret;
  79.915 +  }
  79.916 +
  79.917 +  virtual bool is_VirtualCallTypeData() const { return true; }
  79.918 +
  79.919 +  static int static_cell_count() {
  79.920 +    return -1;
  79.921 +  }
  79.922 +
  79.923 +  static int compute_cell_count(BytecodeStream* stream) {
  79.924 +    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
  79.925 +  }
  79.926 +
  79.927 +  static void initialize(DataLayout* dl, int cell_count) {
  79.928 +    TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
  79.929 +  }
  79.930 +
  79.931 +  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
  79.932 +
  79.933 +  virtual int cell_count() const {
  79.934 +    return VirtualCallData::static_cell_count() +
  79.935 +      TypeEntriesAtCall::header_cell_count() +
  79.936 +      int_at_unchecked(cell_count_global_offset());
  79.937 +  }
  79.938 +
  79.939 +  int number_of_arguments() const {
  79.940 +    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
  79.941 +  }
  79.942 +
  79.943 +  void set_argument_type(int i, Klass* k) {
  79.944 +    assert(has_arguments(), "no arguments!");
  79.945 +    intptr_t current = _args.type(i);
  79.946 +    _args.set_type(i, TypeEntries::with_status(k, current));
  79.947 +  }
  79.948 +
  79.949 +  void set_return_type(Klass* k) {
  79.950 +    assert(has_return(), "no return!");
  79.951 +    intptr_t current = _ret.type();
  79.952 +    _ret.set_type(TypeEntries::with_status(k, current));
  79.953 +  }
  79.954 +
  79.955 +  // An entry for a return value takes less space than an entry for an
  79.956 +  // argument, so if the remainder of the number of cells divided by
  79.957 +  // the number of cells for an argument is not null, a return value
  79.958 +  // is profiled in this object.
  79.959 +  bool has_return() const {
  79.960 +    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
  79.961 +    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
  79.962 +    return res;
  79.963 +  }
  79.964 +
  79.965 +  // Code generation support
  79.966 +  static ByteSize args_data_offset() {
  79.967 +    return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
  79.968 +  }
  79.969 +
  79.970 +  // GC support
  79.971 +  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
  79.972 +    ReceiverTypeData::clean_weak_klass_links(is_alive_closure);
  79.973 +    if (has_arguments()) {
  79.974 +      _args.clean_weak_klass_links(is_alive_closure);
  79.975 +    }
  79.976 +    if (has_return()) {
  79.977 +      _ret.clean_weak_klass_links(is_alive_closure);
  79.978 +    }
  79.979 +  }
  79.980 +
  79.981 +#ifndef PRODUCT
  79.982 +  virtual void print_data_on(outputStream* st) const;
  79.983  #endif
  79.984  };
  79.985  
  79.986 @@ -797,7 +1354,7 @@
  79.987      assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
  79.988    }
  79.989  
  79.990 -  virtual bool is_RetData() { return true; }
  79.991 +  virtual bool is_RetData() const { return true; }
  79.992  
  79.993    enum {
  79.994      no_bci = -1 // value of bci when bci1/2 are not in use.
  79.995 @@ -807,7 +1364,7 @@
  79.996      return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
  79.997    }
  79.998  
  79.999 -  virtual int cell_count() {
 79.1000 +  virtual int cell_count() const {
 79.1001      return static_cell_count();
 79.1002    }
 79.1003  
 79.1004 @@ -825,13 +1382,13 @@
 79.1005    }
 79.1006  
 79.1007    // Direct accessors
 79.1008 -  int bci(uint row) {
 79.1009 +  int bci(uint row) const {
 79.1010      return int_at(bci_cell_index(row));
 79.1011    }
 79.1012 -  uint bci_count(uint row) {
 79.1013 +  uint bci_count(uint row) const {
 79.1014      return uint_at(bci_count_cell_index(row));
 79.1015    }
 79.1016 -  int bci_displacement(uint row) {
 79.1017 +  int bci_displacement(uint row) const {
 79.1018      return int_at(bci_displacement_cell_index(row));
 79.1019    }
 79.1020  
 79.1021 @@ -853,7 +1410,7 @@
 79.1022    void post_initialize(BytecodeStream* stream, MethodData* mdo);
 79.1023  
 79.1024  #ifndef PRODUCT
 79.1025 -  void print_data_on(outputStream* st);
 79.1026 +  void print_data_on(outputStream* st) const;
 79.1027  #endif
 79.1028  };
 79.1029  
 79.1030 @@ -878,18 +1435,18 @@
 79.1031      assert(layout->tag() == DataLayout::branch_data_tag, "wrong type");
 79.1032    }
 79.1033  
 79.1034 -  virtual bool is_BranchData() { return true; }
 79.1035 +  virtual bool is_BranchData() const { return true; }
 79.1036  
 79.1037    static int static_cell_count() {
 79.1038      return branch_cell_count;
 79.1039    }
 79.1040  
 79.1041 -  virtual int cell_count() {
 79.1042 +  virtual int cell_count() const {
 79.1043      return static_cell_count();
 79.1044    }
 79.1045  
 79.1046    // Direct accessor
 79.1047 -  uint not_taken() {
 79.1048 +  uint not_taken() const {
 79.1049      return uint_at(not_taken_off_set);
 79.1050    }
 79.1051  
 79.1052 @@ -917,7 +1474,7 @@
 79.1053    void post_initialize(BytecodeStream* stream, MethodData* mdo);
 79.1054  
 79.1055  #ifndef PRODUCT
 79.1056 -  void print_data_on(outputStream* st);
 79.1057 +  void print_data_on(outputStream* st) const;
 79.1058  #endif
 79.1059  };
 79.1060  
 79.1061 @@ -935,15 +1492,15 @@
 79.1062      array_start_off_set
 79.1063    };
 79.1064  
 79.1065 -  uint array_uint_at(int index) {
 79.1066 +  uint array_uint_at(int index) const {
 79.1067      int aindex = index + array_start_off_set;
 79.1068      return uint_at(aindex);
 79.1069    }
 79.1070 -  int array_int_at(int index) {
 79.1071 +  int array_int_at(int index) const {
 79.1072      int aindex = index + array_start_off_set;
 79.1073      return int_at(aindex);
 79.1074    }
 79.1075 -  oop array_oop_at(int index) {
 79.1076 +  oop array_oop_at(int index) const {
 79.1077      int aindex = index + array_start_off_set;
 79.1078      return oop_at(aindex);
 79.1079    }
 79.1080 @@ -960,17 +1517,17 @@
 79.1081  public:
 79.1082    ArrayData(DataLayout* layout) : ProfileData(layout) {}
 79.1083  
 79.1084 -  virtual bool is_ArrayData() { return true; }
 79.1085 +  virtual bool is_ArrayData() const { return true; }
 79.1086  
 79.1087    static int static_cell_count() {
 79.1088      return -1;
 79.1089    }
 79.1090  
 79.1091 -  int array_len() {
 79.1092 +  int array_len() const {
 79.1093      return int_at_unchecked(array_len_off_set);
 79.1094    }
 79.1095  
 79.1096 -  virtual int cell_count() {
 79.1097 +  virtual int cell_count() const {
 79.1098      return array_len() + 1;
 79.1099    }
 79.1100  
 79.1101 @@ -1017,29 +1574,29 @@
 79.1102      assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
 79.1103    }
 79.1104  
 79.1105 -  virtual bool is_MultiBranchData() { return true; }
 79.1106 +  virtual bool is_MultiBranchData() const { return true; }
 79.1107  
 79.1108    static int compute_cell_count(BytecodeStream* stream);
 79.1109  
 79.1110 -  int number_of_cases() {
 79.1111 +  int number_of_cases() const {
 79.1112      int alen = array_len() - 2; // get rid of default case here.
 79.1113      assert(alen % per_case_cell_count == 0, "must be even");
 79.1114      return (alen / per_case_cell_count);
 79.1115    }
 79.1116  
 79.1117 -  uint default_count() {
 79.1118 +  uint default_count() const {
 79.1119      return array_uint_at(default_count_off_set);
 79.1120    }
 79.1121 -  int default_displacement() {
 79.1122 +  int default_displacement() const {
 79.1123      return array_int_at(default_disaplacement_off_set);
 79.1124    }
 79.1125  
 79.1126 -  uint count_at(int index) {
 79.1127 +  uint count_at(int index) const {
 79.1128      return array_uint_at(case_array_start +
 79.1129                           index * per_case_cell_count +
 79.1130                           relative_count_off_set);
 79.1131    }
 79.1132 -  int displacement_at(int index) {
 79.1133 +  int displacement_at(int index) const {
 79.1134      return array_int_at(case_array_start +
 79.1135                          index * per_case_cell_count +
 79.1136                          relative_displacement_off_set);
 79.1137 @@ -1074,7 +1631,7 @@
 79.1138    void post_initialize(BytecodeStream* stream, MethodData* mdo);
 79.1139  
 79.1140  #ifndef PRODUCT
 79.1141 -  void print_data_on(outputStream* st);
 79.1142 +  void print_data_on(outputStream* st) const;
 79.1143  #endif
 79.1144  };
 79.1145  
 79.1146 @@ -1085,14 +1642,14 @@
 79.1147      assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
 79.1148    }
 79.1149  
 79.1150 -  virtual bool is_ArgInfoData() { return true; }
 79.1151 +  virtual bool is_ArgInfoData() const { return true; }
 79.1152  
 79.1153  
 79.1154 -  int number_of_args() {
 79.1155 +  int number_of_args() const {
 79.1156      return array_len();
 79.1157    }
 79.1158  
 79.1159 -  uint arg_modified(int arg) {
 79.1160 +  uint arg_modified(int arg) const {
 79.1161      return array_uint_at(arg);
 79.1162    }
 79.1163  
 79.1164 @@ -1101,7 +1658,7 @@
 79.1165    }
 79.1166  
 79.1167  #ifndef PRODUCT
 79.1168 -  void print_data_on(outputStream* st);
 79.1169 +  void print_data_on(outputStream* st) const;
 79.1170  #endif
 79.1171  };
 79.1172  
 79.1173 @@ -1271,6 +1828,21 @@
 79.1174    // return the argument info cell
 79.1175    ArgInfoData *arg_info();
 79.1176  
 79.1177 +  enum {
 79.1178 +    no_type_profile = 0,
 79.1179 +    type_profile_jsr292 = 1,
 79.1180 +    type_profile_all = 2
 79.1181 +  };
 79.1182 +
 79.1183 +  static bool profile_jsr292(methodHandle m, int bci);
 79.1184 +  static int profile_arguments_flag();
 79.1185 +  static bool profile_arguments_jsr292_only();
 79.1186 +  static bool profile_all_arguments();
 79.1187 +  static bool profile_arguments_for_invoke(methodHandle m, int bci);
 79.1188 +  static int profile_return_flag();
 79.1189 +  static bool profile_all_return();
 79.1190 +  static bool profile_return_for_invoke(methodHandle m, int bci);
 79.1191 +
 79.1192  public:
 79.1193    static int header_size() {
 79.1194      return sizeof(MethodData)/wordSize;
 79.1195 @@ -1510,6 +2082,10 @@
 79.1196    // verification
 79.1197    void verify_on(outputStream* st);
 79.1198    void verify_data_on(outputStream* st);
 79.1199 +
 79.1200 +  static bool profile_arguments();
 79.1201 +  static bool profile_return();
 79.1202 +  static bool profile_return_jsr292_only();
 79.1203  };
 79.1204  
 79.1205  #endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
    80.1 --- a/src/share/vm/opto/bytecodeInfo.cpp	Fri Oct 18 10:37:26 2013 +0000
    80.2 +++ b/src/share/vm/opto/bytecodeInfo.cpp	Fri Oct 18 19:44:40 2013 -0700
    80.3 @@ -197,6 +197,7 @@
    80.4  // negative filter: should callee NOT be inlined?
    80.5  bool InlineTree::should_not_inline(ciMethod *callee_method,
    80.6                                     ciMethod* caller_method,
    80.7 +                                   JVMState* jvms,
    80.8                                     WarmCallInfo* wci_result) {
    80.9  
   80.10    const char* fail_msg = NULL;
   80.11 @@ -226,7 +227,7 @@
   80.12      // don't inline exception code unless the top method belongs to an
   80.13      // exception class
   80.14      if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
   80.15 -      ciMethod* top_method = caller_jvms() ? caller_jvms()->of_depth(1)->method() : method();
   80.16 +      ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
   80.17        if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
   80.18          wci_result->set_profit(wci_result->profit() * 0.1);
   80.19        }
   80.20 @@ -328,7 +329,7 @@
   80.21  // return true if ok
   80.22  // Relocated from "InliningClosure::try_to_inline"
   80.23  bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
   80.24 -                               int caller_bci, ciCallProfile& profile,
   80.25 +                               int caller_bci, JVMState* jvms, ciCallProfile& profile,
   80.26                                 WarmCallInfo* wci_result, bool& should_delay) {
   80.27  
   80.28     // Old algorithm had funny accumulating BC-size counters
   80.29 @@ -346,7 +347,7 @@
   80.30                       wci_result)) {
   80.31      return false;
   80.32    }
   80.33 -  if (should_not_inline(callee_method, caller_method, wci_result)) {
   80.34 +  if (should_not_inline(callee_method, caller_method, jvms, wci_result)) {
   80.35      return false;
   80.36    }
   80.37  
   80.38 @@ -397,24 +398,35 @@
   80.39    }
   80.40  
   80.41    // detect direct and indirect recursive inlining
   80.42 -  if (!callee_method->is_compiled_lambda_form()) {
   80.43 +  {
   80.44      // count the current method and the callee
   80.45 -    int inline_level = (method() == callee_method) ? 1 : 0;
   80.46 -    if (inline_level > MaxRecursiveInlineLevel) {
   80.47 -      set_msg("recursively inlining too deep");
   80.48 -      return false;
   80.49 +    const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form();
   80.50 +    int inline_level = 0;
   80.51 +    if (!is_compiled_lambda_form) {
   80.52 +      if (method() == callee_method) {
   80.53 +        inline_level++;
   80.54 +      }
   80.55      }
   80.56      // count callers of current method and callee
   80.57 -    JVMState* jvms = caller_jvms();
   80.58 -    while (jvms != NULL && jvms->has_method()) {
   80.59 -      if (jvms->method() == callee_method) {
   80.60 -        inline_level++;
   80.61 -        if (inline_level > MaxRecursiveInlineLevel) {
   80.62 -          set_msg("recursively inlining too deep");
   80.63 -          return false;
   80.64 +    Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL;
   80.65 +    for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) {
   80.66 +      if (j->method() == callee_method) {
   80.67 +        if (is_compiled_lambda_form) {
   80.68 +          // Since compiled lambda forms are heavily reused we allow recursive inlining.  If it is truly
   80.69 +          // a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the
   80.70 +          // compiler stack.
   80.71 +          Node* caller_argument0 = j->map()->argument(j, 0)->uncast();
   80.72 +          if (caller_argument0 == callee_argument0) {
   80.73 +            inline_level++;
   80.74 +          }
   80.75 +        } else {
   80.76 +          inline_level++;
   80.77          }
   80.78        }
   80.79 -      jvms = jvms->caller();
   80.80 +    }
   80.81 +    if (inline_level > MaxRecursiveInlineLevel) {
   80.82 +      set_msg("recursive inlining is too deep");
   80.83 +      return false;
   80.84      }
   80.85    }
   80.86  
   80.87 @@ -536,7 +548,7 @@
   80.88    // Check if inlining policy says no.
   80.89    WarmCallInfo wci = *(initial_wci);
   80.90    bool success = try_to_inline(callee_method, caller_method, caller_bci,
   80.91 -                               profile, &wci, should_delay);
   80.92 +                               jvms, profile, &wci, should_delay);
   80.93  
   80.94  #ifndef PRODUCT
   80.95    if (UseOldInlining && InlineWarmCalls
    81.1 --- a/src/share/vm/opto/c2compiler.cpp	Fri Oct 18 10:37:26 2013 +0000
    81.2 +++ b/src/share/vm/opto/c2compiler.cpp	Fri Oct 18 19:44:40 2013 -0700
    81.3 @@ -44,9 +44,6 @@
    81.4  # include "adfiles/ad_ppc.hpp"
    81.5  #endif
    81.6  
    81.7 -
    81.8 -volatile int C2Compiler::_runtimes = uninitialized;
    81.9 -
   81.10  // register information defined by ADLC
   81.11  extern const char register_save_policy[];
   81.12  extern const int  register_save_type[];
   81.13 @@ -57,7 +54,7 @@
   81.14  const char* C2Compiler::retry_no_escape_analysis() {
   81.15    return "retry without escape analysis";
   81.16  }
   81.17 -void C2Compiler::initialize_runtime() {
   81.18 +bool C2Compiler::init_c2_runtime() {
   81.19  
   81.20    // Check assumptions used while running ADLC
   81.21    Compile::adlc_verification();
   81.22 @@ -90,41 +87,31 @@
   81.23  
   81.24    CompilerThread* thread = CompilerThread::current();
   81.25  
   81.26 -  HandleMark  handle_mark(thread);
   81.27 -
   81.28 -  OptoRuntime::generate(thread->env());
   81.29 -
   81.30 +  HandleMark handle_mark(thread);
   81.31 +  return OptoRuntime::generate(thread->env());
   81.32  }
   81.33  
   81.34  
   81.35  void C2Compiler::initialize() {
   81.36 -
   81.37 -  // This method can only be called once per C2Compiler object
   81.38    // The first compiler thread that gets here will initialize the
   81.39 -  // small amount of global state (and runtime stubs) that c2 needs.
   81.40 +  // small amount of global state (and runtime stubs) that C2 needs.
   81.41  
   81.42    // There is a race possible once at startup and then we're fine
   81.43  
   81.44    // Note that this is being called from a compiler thread not the
   81.45    // main startup thread.
   81.46 -
   81.47 -  if (_runtimes != initialized) {
   81.48 -    initialize_runtimes( initialize_runtime, &_runtimes);
   81.49 +  if (should_perform_init()) {
   81.50 +    bool successful = C2Compiler::init_c2_runtime();
   81.51 +    int new_state = (successful) ? initialized : failed;
   81.52 +    set_state(new_state);
   81.53    }
   81.54 -
   81.55 -  // Mark this compiler object as ready to roll
   81.56 -  mark_initialized();
   81.57  }
   81.58  
   81.59 -void C2Compiler::compile_method(ciEnv* env,
   81.60 -                                ciMethod* target,
   81.61 -                                int entry_bci) {
   81.62 -  if (!is_initialized()) {
   81.63 -    initialize();
   81.64 -  }
   81.65 +void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
   81.66 +  assert(is_initialized(), "Compiler thread must be initialized");
   81.67 +
   81.68    bool subsume_loads = SubsumeLoads;
   81.69 -  bool do_escape_analysis = DoEscapeAnalysis &&
   81.70 -    !env->jvmti_can_access_local_variables();
   81.71 +  bool do_escape_analysis = DoEscapeAnalysis && !env->jvmti_can_access_local_variables();
   81.72    bool eliminate_boxing = EliminateAutoBox;
   81.73    while (!env->failing()) {
   81.74      // Attempt to compile while subsuming loads into machine instructions.
    82.1 --- a/src/share/vm/opto/c2compiler.hpp	Fri Oct 18 10:37:26 2013 +0000
    82.2 +++ b/src/share/vm/opto/c2compiler.hpp	Fri Oct 18 19:44:40 2013 -0700
    82.3 @@ -28,24 +28,17 @@
    82.4  #include "compiler/abstractCompiler.hpp"
    82.5  
    82.6  class C2Compiler : public AbstractCompiler {
    82.7 -private:
    82.8 -
    82.9 -  static void initialize_runtime();
   82.10 + private:
   82.11 +  static bool init_c2_runtime();
   82.12  
   82.13  public:
   82.14    // Name
   82.15    const char *name() { return "C2"; }
   82.16  
   82.17 -  static volatile int _runtimes;
   82.18 -
   82.19  #ifdef TIERED
   82.20    virtual bool is_c2() { return true; };
   82.21  #endif // TIERED
   82.22  
   82.23 -  // Customization
   82.24 -  bool needs_adapters         () { return true; }
   82.25 -  bool needs_stubs            () { return true; }
   82.26 -
   82.27    void initialize();
   82.28  
   82.29    // Compilation entry point for methods
    83.1 --- a/src/share/vm/opto/chaitin.hpp	Fri Oct 18 10:37:26 2013 +0000
    83.2 +++ b/src/share/vm/opto/chaitin.hpp	Fri Oct 18 19:44:40 2013 -0700
    83.3 @@ -52,6 +52,7 @@
    83.4  class LRG : public ResourceObj {
    83.5    friend class VMStructs;
    83.6  public:
    83.7 +  static const uint AllStack_size = 0xFFFFF; // This mask size is used to tell that the mask of this LRG supports stack positions
    83.8    enum { SPILL_REG=29999 };     // Register number of a spilled LRG
    83.9  
   83.10    double _cost;                 // 2 for loads/1 for stores times block freq
   83.11 @@ -80,14 +81,21 @@
   83.12  private:
   83.13    uint _eff_degree;             // Effective degree: Sum of neighbors _num_regs
   83.14  public:
   83.15 -  int degree() const { assert( _degree_valid, "" ); return _eff_degree; }
   83.16 +  int degree() const { assert( _degree_valid , "" ); return _eff_degree; }
   83.17    // Degree starts not valid and any change to the IFG neighbor
   83.18    // set makes it not valid.
   83.19 -  void set_degree( uint degree ) { _eff_degree = degree; debug_only(_degree_valid = 1;) }
   83.20 +  void set_degree( uint degree ) {
   83.21 +    _eff_degree = degree;
   83.22 +    debug_only(_degree_valid = 1;)
   83.23 +    assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
   83.24 +  }
   83.25    // Made a change that hammered degree
   83.26    void invalid_degree() { debug_only(_degree_valid=0;) }
   83.27    // Incrementally modify degree.  If it was correct, it should remain correct
   83.28 -  void inc_degree( uint mod ) { _eff_degree += mod; }
   83.29 +  void inc_degree( uint mod ) {
   83.30 +    _eff_degree += mod;
   83.31 +    assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
   83.32 +  }
   83.33    // Compute the degree between 2 live ranges
   83.34    int compute_degree( LRG &l ) const;
   83.35  
   83.36 @@ -95,9 +103,9 @@
   83.37    RegMask _mask;                // Allowed registers for this LRG
   83.38    uint _mask_size;              // cache of _mask.Size();
   83.39  public:
   83.40 -  int compute_mask_size() const { return _mask.is_AllStack() ? 65535 : _mask.Size(); }
   83.41 +  int compute_mask_size() const { return _mask.is_AllStack() ? AllStack_size : _mask.Size(); }
   83.42    void set_mask_size( int size ) {
   83.43 -    assert((size == 65535) || (size == (int)_mask.Size()), "");
   83.44 +    assert((size == (int)AllStack_size) || (size == (int)_mask.Size()), "");
   83.45      _mask_size = size;
   83.46  #ifdef ASSERT
   83.47      _msize_valid=1;
    84.1 --- a/src/share/vm/opto/compile.cpp	Fri Oct 18 10:37:26 2013 +0000
    84.2 +++ b/src/share/vm/opto/compile.cpp	Fri Oct 18 19:44:40 2013 -0700
    84.3 @@ -47,6 +47,7 @@
    84.4  #include "opto/machnode.hpp"
    84.5  #include "opto/macro.hpp"
    84.6  #include "opto/matcher.hpp"
    84.7 +#include "opto/mathexactnode.hpp"
    84.8  #include "opto/memnode.hpp"
    84.9  #include "opto/mulnode.hpp"
   84.10  #include "opto/node.hpp"
   84.11 @@ -2986,6 +2987,32 @@
   84.12        n->set_req(MemBarNode::Precedent, top());
   84.13      }
   84.14      break;
   84.15 +    // Must set a control edge on all nodes that produce a FlagsProj
   84.16 +    // so they can't escape the block that consumes the flags.
   84.17 +    // Must also set the non throwing branch as the control
   84.18 +    // for all nodes that depends on the result. Unless the node
   84.19 +    // already have a control that isn't the control of the
   84.20 +    // flag producer
   84.21 +  case Op_FlagsProj:
   84.22 +    {
   84.23 +      MathExactNode* math = (MathExactNode*)  n->in(0);
   84.24 +      Node* ctrl = math->control_node();
   84.25 +      Node* non_throwing = math->non_throwing_branch();
   84.26 +      math->set_req(0, ctrl);
   84.27 +
   84.28 +      Node* result = math->result_node();
   84.29 +      if (result != NULL) {
   84.30 +        for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
   84.31 +          Node* out = result->fast_out(j);
   84.32 +          if (out->in(0) == NULL) {
   84.33 +            out->set_req(0, non_throwing);
   84.34 +          } else if (out->in(0) == ctrl) {
   84.35 +            out->set_req(0, non_throwing);
   84.36 +          }
   84.37 +        }
   84.38 +      }
   84.39 +    }
   84.40 +    break;
   84.41    default:
   84.42      assert( !n->is_Call(), "" );
   84.43      assert( !n->is_Mem(), "" );
    85.1 --- a/src/share/vm/opto/escape.cpp	Fri Oct 18 10:37:26 2013 +0000
    85.2 +++ b/src/share/vm/opto/escape.cpp	Fri Oct 18 19:44:40 2013 -0700
    85.3 @@ -780,6 +780,7 @@
    85.4        }
    85.5      } else {  // Allocate instance
    85.6        if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
    85.7 +          cik->is_subclass_of(_compile->env()->Reference_klass()) ||
    85.8           !cik->is_instance_klass() || // StressReflectiveCode
    85.9            cik->as_instance_klass()->has_finalizer()) {
   85.10          es = PointsToNode::GlobalEscape;
    86.1 --- a/src/share/vm/opto/graphKit.cpp	Fri Oct 18 10:37:26 2013 +0000
    86.2 +++ b/src/share/vm/opto/graphKit.cpp	Fri Oct 18 19:44:40 2013 -0700
    86.3 @@ -2122,7 +2122,7 @@
    86.4  // Null check oop.  Set null-path control into Region in slot 3.
    86.5  // Make a cast-not-nullness use the other not-null control.  Return cast.
    86.6  Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
    86.7 -                               bool never_see_null) {
    86.8 +                               bool never_see_null, bool safe_for_replace) {
    86.9    // Initial NULL check taken path
   86.10    (*null_control) = top();
   86.11    Node* cast = null_check_common(value, T_OBJECT, false, null_control);
   86.12 @@ -2140,6 +2140,9 @@
   86.13                    Deoptimization::Action_make_not_entrant);
   86.14      (*null_control) = top();    // NULL path is dead
   86.15    }
   86.16 +  if ((*null_control) == top() && safe_for_replace) {
   86.17 +    replace_in_map(value, cast);
   86.18 +  }
   86.19  
   86.20    // Cast away null-ness on the result
   86.21    return cast;
   86.22 @@ -2634,15 +2637,17 @@
   86.23    C->set_has_split_ifs(true); // Has chance for split-if optimization
   86.24  
   86.25    ciProfileData* data = NULL;
   86.26 +  bool safe_for_replace = false;
   86.27    if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
   86.28      data = method()->method_data()->bci_to_data(bci());
   86.29 +    safe_for_replace = true;
   86.30    }
   86.31    bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
   86.32                           && seems_never_null(obj, data));
   86.33  
   86.34    // Null check; get casted pointer; set region slot 3
   86.35    Node* null_ctl = top();
   86.36 -  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
   86.37 +  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
   86.38  
   86.39    // If not_null_obj is dead, only null-path is taken
   86.40    if (stopped()) {              // Doing instance-of on a NULL?
   86.41 @@ -2723,11 +2728,13 @@
   86.42    }
   86.43  
   86.44    ciProfileData* data = NULL;
   86.45 +  bool safe_for_replace = false;
   86.46    if (failure_control == NULL) {        // use MDO in regular case only
   86.47      assert(java_bc() == Bytecodes::_aastore ||
   86.48             java_bc() == Bytecodes::_checkcast,
   86.49             "interpreter profiles type checks only for these BCs");
   86.50      data = method()->method_data()->bci_to_data(bci());
   86.51 +    safe_for_replace = true;
   86.52    }
   86.53  
   86.54    // Make the merge point
   86.55 @@ -2742,7 +2749,7 @@
   86.56  
   86.57    // Null check; get casted pointer; set region slot 3
   86.58    Node* null_ctl = top();
   86.59 -  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
   86.60 +  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
   86.61  
   86.62    // If not_null_obj is dead, only null-path is taken
   86.63    if (stopped()) {              // Doing instance-of on a NULL?
   86.64 @@ -3608,7 +3615,7 @@
   86.65    Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
   86.66  
   86.67    // if (!marking)
   86.68 -  __ if_then(marking, BoolTest::ne, zero); {
   86.69 +  __ if_then(marking, BoolTest::ne, zero, unlikely); {
   86.70      BasicType index_bt = TypeX_X->basic_type();
   86.71      assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
   86.72      Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
    87.1 --- a/src/share/vm/opto/graphKit.hpp	Fri Oct 18 10:37:26 2013 +0000
    87.2 +++ b/src/share/vm/opto/graphKit.hpp	Fri Oct 18 19:44:40 2013 -0700
    87.3 @@ -378,8 +378,10 @@
    87.4    // Return a cast-not-null node which depends on the not-null control.
    87.5    // If never_see_null, use an uncommon trap (*null_control sees a top).
    87.6    // The cast is not valid along the null path; keep a copy of the original.
    87.7 +  // If safe_for_replace, then we can replace the value with the cast
    87.8 +  // in the parsing map (the cast is guaranteed to dominate the map)
    87.9    Node* null_check_oop(Node* value, Node* *null_control,
   87.10 -                       bool never_see_null = false);
   87.11 +                       bool never_see_null = false, bool safe_for_replace = false);
   87.12  
   87.13    // Check the null_seen bit.
   87.14    bool seems_never_null(Node* obj, ciProfileData* data);
    88.1 --- a/src/share/vm/opto/idealGraphPrinter.cpp	Fri Oct 18 10:37:26 2013 +0000
    88.2 +++ b/src/share/vm/opto/idealGraphPrinter.cpp	Fri Oct 18 19:44:40 2013 -0700
    88.3 @@ -616,7 +616,11 @@
    88.4        buffer[0] = 0;
    88.5        _chaitin->dump_register(node, buffer);
    88.6        print_prop("reg", buffer);
    88.7 -      print_prop("lrg", _chaitin->_lrg_map.live_range_id(node));
    88.8 +      uint lrg_id = 0;
    88.9 +      if (node->_idx < _chaitin->_lrg_map.size()) {
   88.10 +        lrg_id = _chaitin->_lrg_map.live_range_id(node);
   88.11 +      }
   88.12 +      print_prop("lrg", lrg_id);
   88.13      }
   88.14  
   88.15      node->_in_dump_cnt--;
    89.1 --- a/src/share/vm/opto/ifg.cpp	Fri Oct 18 10:37:26 2013 +0000
    89.2 +++ b/src/share/vm/opto/ifg.cpp	Fri Oct 18 19:44:40 2013 -0700
    89.3 @@ -677,7 +677,7 @@
    89.4              } else {            // Common case: size 1 bound removal
    89.5                if( lrg.mask().Member(r_reg) ) {
    89.6                  lrg.Remove(r_reg);
    89.7 -                lrg.set_mask_size(lrg.mask().is_AllStack() ? 65535:old_size-1);
    89.8 +                lrg.set_mask_size(lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1);
    89.9                }
   89.10              }
   89.11              // If 'l' goes completely dry, it must spill.
    90.1 --- a/src/share/vm/opto/ifnode.cpp	Fri Oct 18 10:37:26 2013 +0000
    90.2 +++ b/src/share/vm/opto/ifnode.cpp	Fri Oct 18 19:44:40 2013 -0700
    90.3 @@ -689,6 +689,7 @@
    90.4          ctrl->in(0)->in(1)->is_Bool() &&
    90.5          ctrl->in(0)->in(1)->in(1)->Opcode() == Op_CmpI &&
    90.6          ctrl->in(0)->in(1)->in(1)->in(2)->is_Con() &&
    90.7 +        ctrl->in(0)->in(1)->in(1)->in(2) != phase->C->top() &&
    90.8          ctrl->in(0)->in(1)->in(1)->in(1) == n) {
    90.9        IfNode* dom_iff = ctrl->in(0)->as_If();
   90.10        Node* otherproj = dom_iff->proj_out(!ctrl->as_Proj()->_con);
    91.1 --- a/src/share/vm/opto/mathexactnode.cpp	Fri Oct 18 10:37:26 2013 +0000
    91.2 +++ b/src/share/vm/opto/mathexactnode.cpp	Fri Oct 18 19:44:40 2013 -0700
    91.3 @@ -25,9 +25,10 @@
    91.4  #include "precompiled.hpp"
    91.5  #include "memory/allocation.inline.hpp"
    91.6  #include "opto/addnode.hpp"
    91.7 +#include "opto/cfgnode.hpp"
    91.8  #include "opto/machnode.hpp"
    91.9 +#include "opto/matcher.hpp"
   91.10  #include "opto/mathexactnode.hpp"
   91.11 -#include "opto/matcher.hpp"
   91.12  #include "opto/subnode.hpp"
   91.13  
   91.14  MathExactNode::MathExactNode(Node* ctrl, Node* n1, Node* n2) : MultiNode(3) {
   91.15 @@ -36,6 +37,33 @@
   91.16    init_req(2, n2);
   91.17  }
   91.18  
   91.19 +BoolNode* MathExactNode::bool_node() const {
   91.20 +  Node* flags = flags_node();
   91.21 +  BoolNode* boolnode = flags->unique_out()->as_Bool();
   91.22 +  assert(boolnode != NULL, "must have BoolNode");
   91.23 +  return boolnode;
   91.24 +}
   91.25 +
   91.26 +IfNode* MathExactNode::if_node() const {
   91.27 +  BoolNode* boolnode = bool_node();
   91.28 +  IfNode* ifnode = boolnode->unique_out()->as_If();
   91.29 +  assert(ifnode != NULL, "must have IfNode");
   91.30 +  return ifnode;
   91.31 +}
   91.32 +
   91.33 +Node* MathExactNode::control_node() const {
   91.34 +  IfNode* ifnode = if_node();
   91.35 +  return ifnode->in(0);
   91.36 +}
   91.37 +
   91.38 +Node* MathExactNode::non_throwing_branch() const {
   91.39 +  IfNode* ifnode = if_node();
   91.40 +  if (bool_node()->_test._test == BoolTest::overflow) {
   91.41 +    return ifnode->proj_out(0);
   91.42 +  }
   91.43 +  return ifnode->proj_out(1);
   91.44 +}
   91.45 +
   91.46  Node* AddExactINode::match(const ProjNode* proj, const Matcher* m) {
   91.47    uint ideal_reg = proj->ideal_reg();
   91.48    RegMask rm;
   91.49 @@ -62,15 +90,15 @@
   91.50      }
   91.51  
   91.52      if (flags != NULL) {
   91.53 -      BoolNode* bolnode = (BoolNode *) flags->unique_out();
   91.54 -      switch (bolnode->_test._test) {
   91.55 +      BoolNode* boolnode = bool_node();
   91.56 +      switch (boolnode->_test._test) {
   91.57          case BoolTest::overflow:
   91.58            // if the check is for overflow - never taken
   91.59 -          igvn->replace_node(bolnode, phase->intcon(0));
   91.60 +          igvn->replace_node(boolnode, phase->intcon(0));
   91.61            break;
   91.62          case BoolTest::no_overflow:
   91.63            // if the check is for no overflow - always taken
   91.64 -          igvn->replace_node(bolnode, phase->intcon(1));
   91.65 +          igvn->replace_node(boolnode, phase->intcon(1));
   91.66            break;
   91.67          default:
   91.68            fatal("Unexpected value of BoolTest");
    92.1 --- a/src/share/vm/opto/mathexactnode.hpp	Fri Oct 18 10:37:26 2013 +0000
    92.2 +++ b/src/share/vm/opto/mathexactnode.hpp	Fri Oct 18 19:44:40 2013 -0700
    92.3 @@ -27,8 +27,11 @@
    92.4  
    92.5  #include "opto/multnode.hpp"
    92.6  #include "opto/node.hpp"
    92.7 +#include "opto/subnode.hpp"
    92.8  #include "opto/type.hpp"
    92.9  
   92.10 +class BoolNode;
   92.11 +class IfNode;
   92.12  class Node;
   92.13  
   92.14  class PhaseGVN;
   92.15 @@ -49,9 +52,13 @@
   92.16    virtual bool is_CFG() const { return false; }
   92.17    virtual uint ideal_reg() const { return NotAMachineReg; }
   92.18  
   92.19 -  ProjNode* result_node() { return proj_out(result_proj_node); }
   92.20 -  ProjNode* flags_node() { return proj_out(flags_proj_node); }
   92.21 +  ProjNode* result_node() const { return proj_out(result_proj_node); }
   92.22 +  ProjNode* flags_node() const { return proj_out(flags_proj_node); }
   92.23 +  Node* control_node() const;
   92.24 +  Node* non_throwing_branch() const;
   92.25  protected:
   92.26 +  IfNode* if_node() const;
   92.27 +  BoolNode* bool_node() const;
   92.28    Node* no_overflow(PhaseGVN *phase, Node* new_result);
   92.29  };
   92.30  
    93.1 --- a/src/share/vm/opto/parse.hpp	Fri Oct 18 10:37:26 2013 +0000
    93.2 +++ b/src/share/vm/opto/parse.hpp	Fri Oct 18 19:44:40 2013 -0700
    93.3 @@ -73,6 +73,7 @@
    93.4    bool        try_to_inline(ciMethod* callee_method,
    93.5                              ciMethod* caller_method,
    93.6                              int caller_bci,
    93.7 +                            JVMState* jvms,
    93.8                              ciCallProfile& profile,
    93.9                              WarmCallInfo* wci_result,
   93.10                              bool& should_delay);
   93.11 @@ -83,6 +84,7 @@
   93.12                              WarmCallInfo* wci_result);
   93.13    bool        should_not_inline(ciMethod* callee_method,
   93.14                                  ciMethod* caller_method,
   93.15 +                                JVMState* jvms,
   93.16                                  WarmCallInfo* wci_result);
   93.17    void        print_inlining(ciMethod* callee_method, int caller_bci,
   93.18                               bool success) const;
    94.1 --- a/src/share/vm/opto/parse2.cpp	Fri Oct 18 10:37:26 2013 +0000
    94.2 +++ b/src/share/vm/opto/parse2.cpp	Fri Oct 18 19:44:40 2013 -0700
    94.3 @@ -268,7 +268,7 @@
    94.4      return adjoinRange(value, value, dest, table_index);
    94.5    }
    94.6  
    94.7 -  void print(ciEnv* env) {
    94.8 +  void print() {
    94.9      if (is_singleton())
   94.10        tty->print(" {%d}=>%d", lo(), dest());
   94.11      else if (lo() == min_jint)
   94.12 @@ -471,8 +471,8 @@
   94.13    // These are the switch destinations hanging off the jumpnode
   94.14    int i = 0;
   94.15    for (SwitchRange* r = lo; r <= hi; r++) {
   94.16 -    for (int j = r->lo(); j <= r->hi(); j++, i++) {
   94.17 -      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), j - lowval));
   94.18 +    for (int64 j = r->lo(); j <= r->hi(); j++, i++) {
   94.19 +      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
   94.20        {
   94.21          PreserveJVMState pjvms(this);
   94.22          set_control(input);
   94.23 @@ -632,7 +632,7 @@
   94.24      }
   94.25      tty->print("   ");
   94.26      for( r = lo; r <= hi; r++ ) {
   94.27 -      r->print(env());
   94.28 +      r->print();
   94.29      }
   94.30      tty->print_cr("");
   94.31    }
    95.1 --- a/src/share/vm/opto/parseHelper.cpp	Fri Oct 18 10:37:26 2013 +0000
    95.2 +++ b/src/share/vm/opto/parseHelper.cpp	Fri Oct 18 19:44:40 2013 -0700
    95.3 @@ -343,10 +343,14 @@
    95.4  
    95.5    // Get the Method* node.
    95.6    ciMethod* m = method();
    95.7 -  address counters_adr = m->ensure_method_counters();
    95.8 +  MethodCounters* counters_adr = m->ensure_method_counters();
    95.9 +  if (counters_adr == NULL) {
   95.10 +    C->record_failure("method counters allocation failed");
   95.11 +    return;
   95.12 +  }
   95.13  
   95.14    Node* ctrl = control();
   95.15 -  const TypePtr* adr_type = TypeRawPtr::make(counters_adr);
   95.16 +  const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
   95.17    Node *counters_node = makecon(adr_type);
   95.18    Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
   95.19      MethodCounters::interpreter_invocation_counter_offset_in_bytes());
    96.1 --- a/src/share/vm/opto/reg_split.cpp	Fri Oct 18 10:37:26 2013 +0000
    96.2 +++ b/src/share/vm/opto/reg_split.cpp	Fri Oct 18 19:44:40 2013 -0700
    96.3 @@ -375,6 +375,7 @@
    96.4        }
    96.5  
    96.6        if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) {
    96.7 +        assert(Reachblock != NULL, "Reachblock must be non-NULL");
    96.8          Node *rdef = Reachblock[lrg2reach[lidx]];
    96.9          if (rdef) {
   96.10            spill->set_req(i, rdef);
   96.11 @@ -1336,7 +1337,8 @@
   96.12                 _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
   96.13            insert--;
   96.14          }
   96.15 -        def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
   96.16 +        // since the def cannot contain any live range input, we can pass in NULL as Reachlock parameter
   96.17 +        def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, NULL, false);
   96.18          if (!def) {
   96.19            return 0;    // Bail out
   96.20          }
    97.1 --- a/src/share/vm/opto/runtime.cpp	Fri Oct 18 10:37:26 2013 +0000
    97.2 +++ b/src/share/vm/opto/runtime.cpp	Fri Oct 18 19:44:40 2013 -0700
    97.3 @@ -138,9 +138,10 @@
    97.4  
    97.5  
    97.6  #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \
    97.7 -  var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc)
    97.8 +  var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc); \
    97.9 +  if (var == NULL) { return false; }
   97.10  
   97.11 -void OptoRuntime::generate(ciEnv* env) {
   97.12 +bool OptoRuntime::generate(ciEnv* env) {
   97.13  
   97.14    generate_exception_blob();
   97.15  
   97.16 @@ -158,7 +159,7 @@
   97.17    gen(env, _multianewarrayN_Java           , multianewarrayN_Type         , multianewarrayN_C               ,    0 , true , false, false);
   97.18    gen(env, _g1_wb_pre_Java                 , g1_wb_pre_Type               , SharedRuntime::g1_wb_pre        ,    0 , false, false, false);
   97.19    gen(env, _g1_wb_post_Java                , g1_wb_post_Type              , SharedRuntime::g1_wb_post       ,    0 , false, false, false);
   97.20 -  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C      ,    0 , false, false, false);
   97.21 +  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C, 0, false, false, false);
   97.22    gen(env, _rethrow_Java                   , rethrow_Type                 , rethrow_C                       ,    2 , true , false, true );
   97.23  
   97.24    gen(env, _slow_arraycopy_Java            , slow_arraycopy_Type          , SharedRuntime::slow_arraycopy_C ,    0 , false, false, false);
   97.25 @@ -168,7 +169,7 @@
   97.26    gen(env, _zap_dead_Java_locals_Java      , zap_dead_locals_Type         , zap_dead_Java_locals_C          ,    0 , false, true , false );
   97.27    gen(env, _zap_dead_native_locals_Java    , zap_dead_locals_Type         , zap_dead_native_locals_C        ,    0 , false, true , false );
   97.28  # endif
   97.29 -
   97.30 +  return true;
   97.31  }
   97.32  
   97.33  #undef gen
   97.34 @@ -976,30 +977,36 @@
   97.35    address handler_address = NULL;
   97.36  
   97.37    Handle exception(thread, thread->exception_oop());
   97.38 +  address pc = thread->exception_pc();
   97.39 +
   97.40 +  // Clear out the exception oop and pc since looking up an
   97.41 +  // exception handler can cause class loading, which might throw an
   97.42 +  // exception and those fields are expected to be clear during
   97.43 +  // normal bytecode execution.
   97.44 +  thread->clear_exception_oop_and_pc();
   97.45  
   97.46    if (TraceExceptions) {
   97.47 -    trace_exception(exception(), thread->exception_pc(), "");
   97.48 +    trace_exception(exception(), pc, "");
   97.49    }
   97.50 +
   97.51    // for AbortVMOnException flag
   97.52    NOT_PRODUCT(Exceptions::debug_check_abort(exception));
   97.53  
   97.54 -  #ifdef ASSERT
   97.55 -    if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
   97.56 -      // should throw an exception here
   97.57 -      ShouldNotReachHere();
   97.58 -    }
   97.59 -  #endif
   97.60 -
   97.61 +#ifdef ASSERT
   97.62 +  if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
   97.63 +    // should throw an exception here
   97.64 +    ShouldNotReachHere();
   97.65 +  }
   97.66 +#endif
   97.67  
   97.68    // new exception handling: this method is entered only from adapters
   97.69    // exceptions from compiled java methods are handled in compiled code
   97.70    // using rethrow node
   97.71  
   97.72 -  address pc = thread->exception_pc();
   97.73    nm = CodeCache::find_nmethod(pc);
   97.74    assert(nm != NULL, "No NMethod found");
   97.75    if (nm->is_native_method()) {
   97.76 -    fatal("Native mathod should not have path to exception handling");
   97.77 +    fatal("Native method should not have path to exception handling");
   97.78    } else {
   97.79      // we are switching to old paradigm: search for exception handler in caller_frame
   97.80      // instead in exception handler of caller_frame.sender()
   97.81 @@ -1346,7 +1353,8 @@
   97.82    tty->print(" in ");
   97.83    CodeBlob* blob = CodeCache::find_blob(exception_pc);
   97.84    if (blob->is_nmethod()) {
   97.85 -    ((nmethod*)blob)->method()->print_value();
   97.86 +    nmethod* nm = blob->as_nmethod_or_null();
   97.87 +    nm->method()->print_value();
   97.88    } else if (blob->is_runtime_stub()) {
   97.89      tty->print("<runtime-stub>");
   97.90    } else {
    98.1 --- a/src/share/vm/opto/runtime.hpp	Fri Oct 18 10:37:26 2013 +0000
    98.2 +++ b/src/share/vm/opto/runtime.hpp	Fri Oct 18 19:44:40 2013 -0700
    98.3 @@ -203,8 +203,10 @@
    98.4  
    98.5    static bool is_callee_saved_register(MachRegisterNumbers reg);
    98.6  
    98.7 -  // One time only generate runtime code stubs
    98.8 -  static void generate(ciEnv* env);
    98.9 +  // One time only generate runtime code stubs. Returns true
   98.10 +  // when runtime stubs have been generated successfully and
   98.11 +  // false otherwise.
   98.12 +  static bool generate(ciEnv* env);
   98.13  
   98.14    // Returns the name of a stub
   98.15    static const char* stub_name(address entry);
    99.1 --- a/src/share/vm/opto/stringopts.cpp	Fri Oct 18 10:37:26 2013 +0000
    99.2 +++ b/src/share/vm/opto/stringopts.cpp	Fri Oct 18 19:44:40 2013 -0700
    99.3 @@ -1,5 +1,5 @@
    99.4  /*
    99.5 - * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
    99.6 + * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
    99.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    99.8   *
    99.9   * This code is free software; you can redistribute it and/or modify it
   99.10 @@ -50,10 +50,11 @@
   99.11    Node*               _arguments;      // The list of arguments to be concatenated
   99.12    GrowableArray<int>  _mode;           // into a String along with a mode flag
   99.13                                         // indicating how to treat the value.
   99.14 -
   99.15 +  Node_List           _constructors;   // List of constructors (many in case of stacked concat)
   99.16    Node_List           _control;        // List of control nodes that will be deleted
   99.17    Node_List           _uncommon_traps; // Uncommon traps that needs to be rewritten
   99.18                                         // to restart at the initial JVMState.
   99.19 +
   99.20   public:
   99.21    // Mode for converting arguments to Strings
   99.22    enum {
   99.23 @@ -73,6 +74,7 @@
   99.24      _arguments->del_req(0);
   99.25    }
   99.26  
   99.27 +  bool validate_mem_flow();
   99.28    bool validate_control_flow();
   99.29  
   99.30    void merge_add() {
   99.31 @@ -189,6 +191,10 @@
   99.32      assert(!_control.contains(ctrl), "only push once");
   99.33      _control.push(ctrl);
   99.34    }
   99.35 +  void add_constructor(Node* init) {
   99.36 +    assert(!_constructors.contains(init), "only push once");
   99.37 +    _constructors.push(init);
   99.38 +  }
   99.39    CallStaticJavaNode* end() { return _end; }
   99.40    AllocateNode* begin() { return _begin; }
   99.41    Node* string_alloc() { return _string_alloc; }
   99.42 @@ -301,6 +307,12 @@
   99.43      }
   99.44    }
   99.45    result->set_allocation(other->_begin);
   99.46 +  for (uint i = 0; i < _constructors.size(); i++) {
   99.47 +    result->add_constructor(_constructors.at(i));
   99.48 +  }
   99.49 +  for (uint i = 0; i < other->_constructors.size(); i++) {
   99.50 +    result->add_constructor(other->_constructors.at(i));
   99.51 +  }
   99.52    result->_multiple = true;
   99.53    return result;
   99.54  }
   99.55 @@ -510,7 +522,8 @@
   99.56        sc->add_control(constructor);
   99.57        sc->add_control(alloc);
   99.58        sc->set_allocation(alloc);
   99.59 -      if (sc->validate_control_flow()) {
   99.60 +      sc->add_constructor(constructor);
   99.61 +      if (sc->validate_control_flow() && sc->validate_mem_flow()) {
   99.62          return sc;
   99.63        } else {
   99.64          return NULL;
   99.65 @@ -620,7 +633,7 @@
   99.66  #endif
   99.67  
   99.68              StringConcat* merged = sc->merge(other, arg);
   99.69 -            if (merged->validate_control_flow()) {
   99.70 +            if (merged->validate_control_flow() && merged->validate_mem_flow()) {
   99.71  #ifndef PRODUCT
   99.72                if (PrintOptimizeStringConcat) {
   99.73                  tty->print_cr("stacking would succeed");
   99.74 @@ -708,6 +721,139 @@
   99.75  }
   99.76  
   99.77  
   99.78 +bool StringConcat::validate_mem_flow() {
   99.79 +  Compile* C = _stringopts->C;
   99.80 +
   99.81 +  for (uint i = 0; i < _control.size(); i++) {
   99.82 +#ifndef PRODUCT
   99.83 +    Node_List path;
   99.84 +#endif
   99.85 +    Node* curr = _control.at(i);
   99.86 +    if (curr->is_Call() && curr != _begin) { // For all calls except the first allocation
   99.87 +      // Now here's the main invariant in our case:
   99.88 +      // For memory between the constructor, and appends, and toString we should only see bottom memory,
   99.89 +      // produced by the previous call we know about.
   99.90 +      if (!_constructors.contains(curr)) {
   99.91 +        NOT_PRODUCT(path.push(curr);)
   99.92 +        Node* mem = curr->in(TypeFunc::Memory);
   99.93 +        assert(mem != NULL, "calls should have memory edge");
   99.94 +        assert(!mem->is_Phi(), "should be handled by control flow validation");
   99.95 +        NOT_PRODUCT(path.push(mem);)
   99.96 +        while (mem->is_MergeMem()) {
   99.97 +          for (uint i = 1; i < mem->req(); i++) {
   99.98 +            if (i != Compile::AliasIdxBot && mem->in(i) != C->top()) {
   99.99 +#ifndef PRODUCT
  99.100 +              if (PrintOptimizeStringConcat) {
  99.101 +                tty->print("fusion has incorrect memory flow (side effects) for ");
  99.102 +                _begin->jvms()->dump_spec(tty); tty->cr();
  99.103 +                path.dump();
  99.104 +              }
  99.105 +#endif
  99.106 +              return false;
  99.107 +            }
  99.108 +          }
  99.109 +          // skip through a potential MergeMem chain, linked through Bot
  99.110 +          mem = mem->in(Compile::AliasIdxBot);
  99.111 +          NOT_PRODUCT(path.push(mem);)
  99.112 +        }
  99.113 +        // now let it fall through, and see if we have a projection
  99.114 +        if (mem->is_Proj()) {
  99.115 +          // Should point to a previous known call
  99.116 +          Node *prev = mem->in(0);
  99.117 +          NOT_PRODUCT(path.push(prev);)
  99.118 +          if (!prev->is_Call() || !_control.contains(prev)) {
  99.119 +#ifndef PRODUCT
  99.120 +            if (PrintOptimizeStringConcat) {
  99.121 +              tty->print("fusion has incorrect memory flow (unknown call) for ");
  99.122 +              _begin->jvms()->dump_spec(tty); tty->cr();
  99.123 +              path.dump();
  99.124 +            }
  99.125 +#endif
  99.126 +            return false;
  99.127 +          }
  99.128 +        } else {
  99.129 +          assert(mem->is_Store() || mem->is_LoadStore(), err_msg_res("unexpected node type: %s", mem->Name()));
  99.130 +#ifndef PRODUCT
  99.131 +          if (PrintOptimizeStringConcat) {
  99.132 +            tty->print("fusion has incorrect memory flow (unexpected source) for ");
  99.133 +            _begin->jvms()->dump_spec(tty); tty->cr();
  99.134 +            path.dump();
  99.135 +          }
  99.136 +#endif
  99.137 +          return false;
  99.138 +        }
  99.139 +      } else {
  99.140 +        // For memory that feeds into constructors it's more complicated.
  99.141 +        // However the advantage is that any side effect that happens between the Allocate/Initialize and
  99.142 +        // the constructor will have to be control-dependent on Initialize.
  99.143 +        // So we actually don't have to do anything, since it's going to be caught by the control flow
  99.144 +        // analysis.
  99.145 +#ifdef ASSERT
  99.146 +        // Do a quick verification of the control pattern between the constructor and the initialize node
  99.147 +        assert(curr->is_Call(), "constructor should be a call");
  99.148 +        // Go up the control starting from the constructor call
  99.149 +        Node* ctrl = curr->in(0);
  99.150 +        IfNode* iff = NULL;
  99.151 +        RegionNode* copy = NULL;
  99.152 +
  99.153 +        while (true) {
  99.154 +          // skip known check patterns
  99.155 +          if (ctrl->is_Region()) {
  99.156 +            if (ctrl->as_Region()->is_copy()) {
  99.157 +              copy = ctrl->as_Region();
  99.158 +              ctrl = copy->is_copy();
  99.159 +            } else { // a cast
  99.160 +              assert(ctrl->req() == 3 &&
  99.161 +                     ctrl->in(1) != NULL && ctrl->in(1)->is_Proj() &&
  99.162 +                     ctrl->in(2) != NULL && ctrl->in(2)->is_Proj() &&
  99.163 +                     ctrl->in(1)->in(0) == ctrl->in(2)->in(0) &&
  99.164 +                     ctrl->in(1)->in(0) != NULL && ctrl->in(1)->in(0)->is_If(),
  99.165 +                     "must be a simple diamond");
  99.166 +              Node* true_proj = ctrl->in(1)->is_IfTrue() ? ctrl->in(1) : ctrl->in(2);
  99.167 +              for (SimpleDUIterator i(true_proj); i.has_next(); i.next()) {
  99.168 +                Node* use = i.get();
  99.169 +                assert(use == ctrl || use->is_ConstraintCast(),
  99.170 +                       err_msg_res("unexpected user: %s", use->Name()));
  99.171 +              }
  99.172 +
  99.173 +              iff = ctrl->in(1)->in(0)->as_If();
  99.174 +              ctrl = iff->in(0);
  99.175 +            }
  99.176 +          } else if (ctrl->is_IfTrue()) { // null checks, class checks
  99.177 +            iff = ctrl->in(0)->as_If();
  99.178 +            assert(iff->is_If(), "must be if");
  99.179 +            // Verify that the other arm is an uncommon trap
  99.180 +            Node* otherproj = iff->proj_out(1 - ctrl->as_Proj()->_con);
  99.181 +            CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
  99.182 +            assert(strcmp(call->_name, "uncommon_trap") == 0, "must be uncommond trap");
  99.183 +            ctrl = iff->in(0);
  99.184 +          } else {
  99.185 +            break;
  99.186 +          }
  99.187 +        }
  99.188 +
  99.189 +        assert(ctrl->is_Proj(), "must be a projection");
  99.190 +        assert(ctrl->in(0)->is_Initialize(), "should be initialize");
  99.191 +        for (SimpleDUIterator i(ctrl); i.has_next(); i.next()) {
  99.192 +          Node* use = i.get();
  99.193 +          assert(use == copy || use == iff || use == curr || use->is_CheckCastPP() || use->is_Load(),
  99.194 +                 err_msg_res("unexpected user: %s", use->Name()));
  99.195 +        }
  99.196 +#endif // ASSERT
  99.197 +      }
  99.198 +    }
  99.199 +  }
  99.200 +
  99.201 +#ifndef PRODUCT
  99.202 +  if (PrintOptimizeStringConcat) {
  99.203 +    tty->print("fusion has correct memory flow for ");
  99.204 +    _begin->jvms()->dump_spec(tty); tty->cr();
  99.205 +    tty->cr();
  99.206 +  }
  99.207 +#endif
  99.208 +  return true;
  99.209 +}
  99.210 +
  99.211  bool StringConcat::validate_control_flow() {
  99.212    // We found all the calls and arguments now lets see if it's
  99.213    // safe to transform the graph as we would expect.
  99.214 @@ -753,7 +899,7 @@
  99.215      }
  99.216    }
  99.217  
  99.218 -  // Skip backwards through the control checking for unexpected contro flow
  99.219 +  // Skip backwards through the control checking for unexpected control flow
  99.220    Node* ptr = _end;
  99.221    bool fail = false;
  99.222    while (ptr != _begin) {
  99.223 @@ -936,7 +1082,7 @@
  99.224    if (PrintOptimizeStringConcat && !fail) {
  99.225      ttyLocker ttyl;
  99.226      tty->cr();
  99.227 -    tty->print("fusion would succeed (%d %d) for ", null_check_count, _uncommon_traps.size());
  99.228 +    tty->print("fusion has correct control flow (%d %d) for ", null_check_count, _uncommon_traps.size());
  99.229      _begin->jvms()->dump_spec(tty); tty->cr();
  99.230      for (int i = 0; i < num_arguments(); i++) {
  99.231        argument(i)->dump();
   100.1 --- a/src/share/vm/prims/jni.cpp	Fri Oct 18 10:37:26 2013 +0000
   100.2 +++ b/src/share/vm/prims/jni.cpp	Fri Oct 18 19:44:40 2013 -0700
   100.3 @@ -5059,6 +5059,7 @@
   100.4  void TestReserveMemorySpecial_test();
   100.5  void TestVirtualSpace_test();
   100.6  void TestMetaspaceAux_test();
   100.7 +void TestMetachunk_test();
   100.8  #if INCLUDE_ALL_GCS
   100.9  void TestG1BiasedArray_test();
  100.10  #endif
  100.11 @@ -5070,6 +5071,7 @@
  100.12      run_unit_test(TestReserveMemorySpecial_test());
  100.13      run_unit_test(TestVirtualSpace_test());
  100.14      run_unit_test(TestMetaspaceAux_test());
  100.15 +    run_unit_test(TestMetachunk_test());
  100.16      run_unit_test(GlobalDefinitions::test_globals());
  100.17      run_unit_test(GCTimerAllTest::all());
  100.18      run_unit_test(arrayOopDesc::test_max_array_length());
   101.1 --- a/src/share/vm/runtime/arguments.cpp	Fri Oct 18 10:37:26 2013 +0000
   101.2 +++ b/src/share/vm/runtime/arguments.cpp	Fri Oct 18 19:44:40 2013 -0700
   101.3 @@ -2694,8 +2694,9 @@
   101.4        FLAG_SET_CMDLINE(uintx, MaxHeapSize, (uintx)long_max_heap_size);
   101.5      // Xmaxf
   101.6      } else if (match_option(option, "-Xmaxf", &tail)) {
   101.7 -      int maxf = (int)(atof(tail) * 100);
   101.8 -      if (maxf < 0 || maxf > 100) {
   101.9 +      char* err;
  101.10 +      int maxf = (int)(strtod(tail, &err) * 100);
  101.11 +      if (*err != '\0' || maxf < 0 || maxf > 100) {
  101.12          jio_fprintf(defaultStream::error_stream(),
  101.13                      "Bad max heap free percentage size: %s\n",
  101.14                      option->optionString);
  101.15 @@ -2705,8 +2706,9 @@
  101.16        }
  101.17      // Xminf
  101.18      } else if (match_option(option, "-Xminf", &tail)) {
  101.19 -      int minf = (int)(atof(tail) * 100);
  101.20 -      if (minf < 0 || minf > 100) {
  101.21 +      char* err;
  101.22 +      int minf = (int)(strtod(tail, &err) * 100);
  101.23 +      if (*err != '\0' || minf < 0 || minf > 100) {
  101.24          jio_fprintf(defaultStream::error_stream(),
  101.25                      "Bad min heap free percentage size: %s\n",
  101.26                      option->optionString);
   102.1 --- a/src/share/vm/runtime/globals.hpp	Fri Oct 18 10:37:26 2013 +0000
   102.2 +++ b/src/share/vm/runtime/globals.hpp	Fri Oct 18 19:44:40 2013 -0700
   102.3 @@ -1979,13 +1979,6 @@
   102.4    develop(uintx, MetadataAllocationFailALotInterval, 1000,                  \
   102.5            "Metadata allocation failure a lot interval")                     \
   102.6                                                                              \
   102.7 -  develop(bool, MetaDataDeallocateALot, false,                              \
   102.8 -          "Deallocation bunches of metadata at intervals controlled by "    \
   102.9 -          "MetaDataAllocateALotInterval")                                   \
  102.10 -                                                                            \
  102.11 -  develop(uintx, MetaDataDeallocateALotInterval, 100,                       \
  102.12 -          "Metadata deallocation alot interval")                            \
  102.13 -                                                                            \
  102.14    develop(bool, TraceMetadataChunkAllocation, false,                        \
  102.15            "Trace chunk metadata allocations")                               \
  102.16                                                                              \
  102.17 @@ -2175,7 +2168,7 @@
  102.18            "Minimum ratio of young generation/survivor space size")          \
  102.19                                                                              \
  102.20    product(uintx, InitialSurvivorRatio, 8,                                   \
  102.21 -          "Initial ratio of eden/survivor space size")                      \
  102.22 +          "Initial ratio of young generation/survivor space size")          \
  102.23                                                                              \
  102.24    product(uintx, BaseFootPrintEstimate, 256*M,                              \
  102.25            "Estimate of footprint other than Java Heap")                     \
  102.26 @@ -2677,6 +2670,14 @@
  102.27    product(bool, AggressiveOpts, false,                                      \
  102.28            "Enable aggressive optimizations - see arguments.cpp")            \
  102.29                                                                              \
  102.30 +  product_pd(uintx, TypeProfileLevel,                                       \
  102.31 +          "=XY, with Y, Type profiling of arguments at call"                \
  102.32 +          "          X, Type profiling of return value at call"             \
  102.33 +          "X and Y in 0->off ; 1->js292 only; 2->all methods")              \
  102.34 +                                                                            \
  102.35 +  product(intx, TypeProfileArgsLimit,     2,                                \
  102.36 +          "max number of call arguments to consider for type profiling")    \
  102.37 +                                                                            \
  102.38    /* statistics */                                                          \
  102.39    develop(bool, CountCompiledCalls, false,                                  \
  102.40            "Count method invocations")                                       \
  102.41 @@ -3125,10 +3126,14 @@
  102.42            "class pointers are used")                                        \
  102.43                                                                              \
  102.44    product(uintx, MinHeapFreeRatio,    40,                                   \
  102.45 -          "The minimum percentage of heap free after GC to avoid expansion")\
  102.46 +          "The minimum percentage of heap free after GC to avoid expansion."\
  102.47 +          " For most GCs this applies to the old generation. In G1 it"      \
  102.48 +          " applies to the whole heap. Not supported by ParallelGC.")       \
  102.49                                                                              \
  102.50    product(uintx, MaxHeapFreeRatio,    70,                                   \
  102.51 -          "The maximum percentage of heap free after GC to avoid shrinking")\
  102.52 +          "The maximum percentage of heap free after GC to avoid shrinking."\
  102.53 +          " For most GCs this applies to the old generation. In G1 it"      \
  102.54 +          " applies to the whole heap. Not supported by ParallelGC.")       \
  102.55                                                                              \
  102.56    product(intx, SoftRefLRUPolicyMSPerMB, 1000,                              \
  102.57            "Number of milliseconds per MB of free space in the heap")        \
  102.58 @@ -3823,7 +3828,6 @@
  102.59    product(bool, UseLockedTracing, false,                                    \
  102.60            "Use locked-tracing when doing event-based tracing")
  102.61  
  102.62 -
  102.63  /*
  102.64   *  Macros for factoring of globals
  102.65   */
   103.1 --- a/src/share/vm/runtime/java.cpp	Fri Oct 18 10:37:26 2013 +0000
   103.2 +++ b/src/share/vm/runtime/java.cpp	Fri Oct 18 19:44:40 2013 -0700
   103.3 @@ -183,6 +183,7 @@
   103.4    collected_profiled_methods->sort(&compare_methods);
   103.5  
   103.6    int count = collected_profiled_methods->length();
   103.7 +  int total_size = 0;
   103.8    if (count > 0) {
   103.9      for (int index = 0; index < count; index++) {
  103.10        Method* m = collected_profiled_methods->at(index);
  103.11 @@ -190,10 +191,13 @@
  103.12        tty->print_cr("------------------------------------------------------------------------");
  103.13        //m->print_name(tty);
  103.14        m->print_invocation_count();
  103.15 +      tty->print_cr("  mdo size: %d bytes", m->method_data()->size_in_bytes());
  103.16        tty->cr();
  103.17        m->print_codes();
  103.18 +      total_size += m->method_data()->size_in_bytes();
  103.19      }
  103.20      tty->print_cr("------------------------------------------------------------------------");
  103.21 +    tty->print_cr("Total MDO size: %d bytes", total_size);
  103.22    }
  103.23  }
  103.24  
   104.1 --- a/src/share/vm/runtime/signature.cpp	Fri Oct 18 10:37:26 2013 +0000
   104.2 +++ b/src/share/vm/runtime/signature.cpp	Fri Oct 18 19:44:40 2013 -0700
   104.3 @@ -378,6 +378,16 @@
   104.4    return result;
   104.5  }
   104.6  
   104.7 +int SignatureStream::reference_parameter_count() {
   104.8 +  int args_count = 0;
   104.9 +  for ( ; !at_return_type(); next()) {
  104.10 +    if (is_object()) {
  104.11 +      args_count++;
  104.12 +    }
  104.13 +  }
  104.14 +  return args_count;
  104.15 +}
  104.16 +
  104.17  bool SignatureVerifier::is_valid_signature(Symbol* sig) {
  104.18    const char* signature = (const char*)sig->bytes();
  104.19    ssize_t len = sig->utf8_length();
   105.1 --- a/src/share/vm/runtime/signature.hpp	Fri Oct 18 10:37:26 2013 +0000
   105.2 +++ b/src/share/vm/runtime/signature.hpp	Fri Oct 18 19:44:40 2013 -0700
   105.3 @@ -401,6 +401,9 @@
   105.4  
   105.5    // return same as_symbol except allocation of new symbols is avoided.
   105.6    Symbol* as_symbol_or_null();
   105.7 +
   105.8 +  // count the number of references in the signature
   105.9 +  int reference_parameter_count();
  105.10  };
  105.11  
  105.12  class SignatureVerifier : public StackObj {
   106.1 --- a/src/share/vm/runtime/thread.cpp	Fri Oct 18 10:37:26 2013 +0000
   106.2 +++ b/src/share/vm/runtime/thread.cpp	Fri Oct 18 19:44:40 2013 -0700
   106.3 @@ -1454,7 +1454,6 @@
   106.4    _interp_only_mode    = 0;
   106.5    _special_runtime_exit_condition = _no_async_condition;
   106.6    _pending_async_exception = NULL;
   106.7 -  _is_compiling = false;
   106.8    _thread_stat = NULL;
   106.9    _thread_stat = new ThreadStatistics();
  106.10    _blocked_on_compilation = false;
  106.11 @@ -1815,7 +1814,8 @@
  106.12      // Call Thread.exit(). We try 3 times in case we got another Thread.stop during
  106.13      // the execution of the method. If that is not enough, then we don't really care. Thread.stop
  106.14      // is deprecated anyhow.
  106.15 -    { int count = 3;
  106.16 +    if (!is_Compiler_thread()) {
  106.17 +      int count = 3;
  106.18        while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) {
  106.19          EXCEPTION_MARK;
  106.20          JavaValue result(T_VOID);
  106.21 @@ -1828,7 +1828,6 @@
  106.22          CLEAR_PENDING_EXCEPTION;
  106.23        }
  106.24      }
  106.25 -
  106.26      // notify JVMTI
  106.27      if (JvmtiExport::should_post_thread_life()) {
  106.28        JvmtiExport::post_thread_end(this);
  106.29 @@ -3239,6 +3238,7 @@
  106.30    _counters = counters;
  106.31    _buffer_blob = NULL;
  106.32    _scanned_nmethod = NULL;
  106.33 +  _compiler = NULL;
  106.34  
  106.35  #ifndef PRODUCT
  106.36    _ideal_graph_printer = NULL;
  106.37 @@ -3255,6 +3255,7 @@
  106.38    }
  106.39  }
  106.40  
  106.41 +
  106.42  // ======= Threads ========
  106.43  
  106.44  // The Threads class links together all active threads, and provides
  106.45 @@ -3275,8 +3276,6 @@
  106.46  // All JavaThreads
  106.47  #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
  106.48  
  106.49 -void os_stream();
  106.50 -
  106.51  // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
  106.52  void Threads::threads_do(ThreadClosure* tc) {
  106.53    assert_locked_or_safepoint(Threads_lock);
   107.1 --- a/src/share/vm/runtime/thread.hpp	Fri Oct 18 10:37:26 2013 +0000
   107.2 +++ b/src/share/vm/runtime/thread.hpp	Fri Oct 18 19:44:40 2013 -0700
   107.3 @@ -923,9 +923,6 @@
   107.4    volatile address _exception_handler_pc;        // PC for handler of exception
   107.5    volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
   107.6  
   107.7 -  // support for compilation
   107.8 -  bool    _is_compiling;                         // is true if a compilation is active inthis thread (one compilation per thread possible)
   107.9 -
  107.10    // support for JNI critical regions
  107.11    jint    _jni_active_critical;                  // count of entries into JNI critical region
  107.12  
  107.13 @@ -1005,10 +1002,6 @@
  107.14    // Testers
  107.15    virtual bool is_Java_thread() const            { return true;  }
  107.16  
  107.17 -  // compilation
  107.18 -  void set_is_compiling(bool f)                  { _is_compiling = f; }
  107.19 -  bool is_compiling() const                      { return _is_compiling; }
  107.20 -
  107.21    // Thread chain operations
  107.22    JavaThread* next() const                       { return _next; }
  107.23    void set_next(JavaThread* p)                   { _next = p; }
  107.24 @@ -1283,6 +1276,11 @@
  107.25    void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
  107.26    void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
  107.27  
  107.28 +  void clear_exception_oop_and_pc() {
  107.29 +    set_exception_oop(NULL);
  107.30 +    set_exception_pc(NULL);
  107.31 +  }
  107.32 +
  107.33    // Stack overflow support
  107.34    inline size_t stack_available(address cur_sp);
  107.35    address stack_yellow_zone_base()
  107.36 @@ -1811,13 +1809,14 @@
  107.37   private:
  107.38    CompilerCounters* _counters;
  107.39  
  107.40 -  ciEnv*        _env;
  107.41 -  CompileLog*   _log;
  107.42 -  CompileTask*  _task;
  107.43 -  CompileQueue* _queue;
  107.44 -  BufferBlob*   _buffer_blob;
  107.45 +  ciEnv*            _env;
  107.46 +  CompileLog*       _log;
  107.47 +  CompileTask*      _task;
  107.48 +  CompileQueue*     _queue;
  107.49 +  BufferBlob*       _buffer_blob;
  107.50  
  107.51 -  nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
  107.52 +  nmethod*          _scanned_nmethod;  // nmethod being scanned by the sweeper
  107.53 +  AbstractCompiler* _compiler;
  107.54  
  107.55   public:
  107.56  
  107.57 @@ -1829,14 +1828,17 @@
  107.58    // Hide this compiler thread from external view.
  107.59    bool is_hidden_from_external_view() const      { return true; }
  107.60  
  107.61 -  CompileQueue* queue()                          { return _queue; }
  107.62 -  CompilerCounters* counters()                   { return _counters; }
  107.63 +  void set_compiler(AbstractCompiler* c)         { _compiler = c; }
  107.64 +  AbstractCompiler* compiler() const             { return _compiler; }
  107.65 +
  107.66 +  CompileQueue* queue()        const             { return _queue; }
  107.67 +  CompilerCounters* counters() const             { return _counters; }
  107.68  
  107.69    // Get/set the thread's compilation environment.
  107.70    ciEnv*        env()                            { return _env; }
  107.71    void          set_env(ciEnv* env)              { _env = env; }
  107.72  
  107.73 -  BufferBlob*   get_buffer_blob()                { return _buffer_blob; }
  107.74 +  BufferBlob*   get_buffer_blob() const          { return _buffer_blob; }
  107.75    void          set_buffer_blob(BufferBlob* b)   { _buffer_blob = b; };
  107.76  
  107.77    // Get/set the thread's logging information
   108.1 --- a/src/share/vm/runtime/vmStructs.cpp	Fri Oct 18 10:37:26 2013 +0000
   108.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Fri Oct 18 19:44:40 2013 -0700
   108.3 @@ -58,7 +58,7 @@
   108.4  #include "memory/generation.hpp"
   108.5  #include "memory/generationSpec.hpp"
   108.6  #include "memory/heap.hpp"
   108.7 -#include "memory/metablock.hpp"
   108.8 +#include "memory/metachunk.hpp"
   108.9  #include "memory/referenceType.hpp"
  108.10  #include "memory/space.hpp"
  108.11  #include "memory/tenuredGeneration.hpp"
  108.12 @@ -917,7 +917,6 @@
  108.13    volatile_nonstatic_field(JavaThread,         _exception_oop,                                oop)                                   \
  108.14    volatile_nonstatic_field(JavaThread,         _exception_pc,                                 address)                               \
  108.15    volatile_nonstatic_field(JavaThread,         _is_method_handle_return,                      int)                                   \
  108.16 -  nonstatic_field(JavaThread,                  _is_compiling,                                 bool)                                  \
  108.17    nonstatic_field(JavaThread,                  _special_runtime_exit_condition,               JavaThread::AsyncRequests)             \
  108.18    nonstatic_field(JavaThread,                  _saved_exception_pc,                           address)                               \
  108.19     volatile_nonstatic_field(JavaThread,        _thread_state,                                 JavaThreadState)                       \
   109.1 --- a/src/share/vm/shark/sharkCompiler.cpp	Fri Oct 18 10:37:26 2013 +0000
   109.2 +++ b/src/share/vm/shark/sharkCompiler.cpp	Fri Oct 18 19:44:40 2013 -0700
   109.3 @@ -133,11 +133,10 @@
   109.4      exit(1);
   109.5    }
   109.6  
   109.7 -  execution_engine()->addModule(
   109.8 -    _native_context->module());
   109.9 +  execution_engine()->addModule(_native_context->module());
  109.10  
  109.11    // All done
  109.12 -  mark_initialized();
  109.13 +  set_state(initialized);
  109.14  }
  109.15  
  109.16  void SharkCompiler::initialize() {
   110.1 --- a/src/share/vm/shark/sharkCompiler.hpp	Fri Oct 18 10:37:26 2013 +0000
   110.2 +++ b/src/share/vm/shark/sharkCompiler.hpp	Fri Oct 18 19:44:40 2013 -0700
   110.3 @@ -50,10 +50,6 @@
   110.4      return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form());
   110.5    }
   110.6  
   110.7 -  // Customization
   110.8 -  bool needs_adapters()  { return false; }
   110.9 -  bool needs_stubs()     { return false; }
  110.10 -
  110.11    // Initialization
  110.12    void initialize();
  110.13  
   111.1 --- a/src/share/vm/utilities/ostream.cpp	Fri Oct 18 10:37:26 2013 +0000
   111.2 +++ b/src/share/vm/utilities/ostream.cpp	Fri Oct 18 19:44:40 2013 -0700
   111.3 @@ -465,7 +465,7 @@
   111.4  }
   111.5  
   111.6  // log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
   111.7 -// in log_name, %p => pipd1234 and
   111.8 +// in log_name, %p => pid1234 and
   111.9  //              %t => YYYY-MM-DD_HH-MM-SS
  111.10  static const char* make_log_name(const char* log_name, const char* force_directory) {
  111.11    char timestr[32];
  111.12 @@ -792,7 +792,7 @@
  111.13  
  111.14  void defaultStream::init_log() {
  111.15    // %%% Need a MutexLocker?
  111.16 -  const char* log_name = LogFile != NULL ? LogFile : "hotspot_pid%p.log";
  111.17 +  const char* log_name = LogFile != NULL ? LogFile : "hotspot_%p.log";
  111.18    const char* try_name = make_log_name(log_name, NULL);
  111.19    fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
  111.20    if (!file->is_open()) {
   112.1 --- a/src/share/vm/utilities/vmError.cpp	Fri Oct 18 10:37:26 2013 +0000
   112.2 +++ b/src/share/vm/utilities/vmError.cpp	Fri Oct 18 19:44:40 2013 -0700
   112.3 @@ -1050,7 +1050,7 @@
   112.4          FILE* replay_data_file = os::open(fd, "w");
   112.5          if (replay_data_file != NULL) {
   112.6            fileStream replay_data_stream(replay_data_file, /*need_close=*/true);
   112.7 -          env->dump_replay_data(&replay_data_stream);
   112.8 +          env->dump_replay_data_unsafe(&replay_data_stream);
   112.9            out.print_raw("#\n# Compiler replay data is saved as:\n# ");
  112.10            out.print_raw_cr(buffer);
  112.11          } else {
   113.1 --- a/test/TEST.groups	Fri Oct 18 10:37:26 2013 +0000
   113.2 +++ b/test/TEST.groups	Fri Oct 18 19:44:40 2013 -0700
   113.3 @@ -27,7 +27,7 @@
   113.4  # - compact1, compact2, compact3, full JRE, JDK
   113.5  #
   113.6  # In addition they support testing of the minimal VM on compact1 and compact2.
   113.7 -# Essentially this defines groups based around the specified API's and VM 
   113.8 +# Essentially this defines groups based around the specified API's and VM
   113.9  # services available in the runtime.
  113.10  #
  113.11  # The groups are defined hierarchically in two forms:
  113.12 @@ -44,9 +44,9 @@
  113.13  # by listing the top-level test directories.
  113.14  #
  113.15  # To use a group simply list it on the jtreg command line eg:
  113.16 -#   jtreg :jdk    
  113.17 +#   jtreg :jdk
  113.18  # runs all tests. While
  113.19 -#   jtreg :compact2  
  113.20 +#   jtreg :compact2
  113.21  # runs those tests that only require compact1 and compact2 API's.
  113.22  #
  113.23  
  113.24 @@ -69,6 +69,7 @@
  113.25    runtime/7107135/Test7107135.sh \
  113.26    runtime/7158988/FieldMonitor.java \
  113.27    runtime/7194254/Test7194254.java \
  113.28 +  runtime/8026365/InvokeSpecialAnonTest.java \
  113.29    runtime/jsig/Test8017498.sh \
  113.30    runtime/Metaspace/FragmentMetaspace.java \
  113.31    runtime/NMT/BaselineWithParameter.java \
  113.32 @@ -124,7 +125,7 @@
  113.33    compiler/whitebox/IsMethodCompilableTest.java \
  113.34    gc/6581734/Test6581734.java \
  113.35    gc/7072527/TestFullGCCount.java \
  113.36 -  gc/7168848/HumongousAlloc.java \
  113.37 +  gc/g1/TestHumongousAllocInitialMark.java \
  113.38    gc/arguments/TestG1HeapRegionSize.java \
  113.39    gc/metaspace/TestMetaspaceMemoryPool.java \
  113.40    runtime/InternalApi/ThreadCpuTimesDeadlock.java \
  113.41 @@ -140,7 +141,7 @@
  113.42   -:needs_jdk
  113.43  
  113.44  # Tests that require compact2 API's and a full VM
  113.45 -#  
  113.46 +#
  113.47  needs_full_vm_compact2 =
  113.48  
  113.49  # Compact 1 adds full VM tests
   114.1 --- a/test/compiler/8013496/Test8013496.sh	Fri Oct 18 10:37:26 2013 +0000
   114.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   114.3 @@ -1,55 +0,0 @@
   114.4 -#!/bin/sh
   114.5 -# 
   114.6 -# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   114.7 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   114.8 -# 
   114.9 -# This code is free software; you can redistribute it and/or modify it
  114.10 -# under the terms of the GNU General Public License version 2 only, as
  114.11 -# published by the Free Software Foundation.
  114.12 -# 
  114.13 -# This code is distributed in the hope that it will be useful, but WITHOUT
  114.14 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  114.15 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  114.16 -# version 2 for more details (a copy is included in the LICENSE file that
  114.17 -# accompanied this code).
  114.18 -# 
  114.19 -# You should have received a copy of the GNU General Public License version
  114.20 -# 2 along with this work; if not, write to the Free Software Foundation,
  114.21 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  114.22 -# 
  114.23 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  114.24 -# or visit www.oracle.com if you need additional information or have any
  114.25 -# questions.
  114.26 -# 
  114.27 -#
  114.28 -# @test
  114.29 -# @bug 8013496
  114.30 -# @summary Test checks that the order in which ReversedCodeCacheSize and 
  114.31 -#          InitialCodeCacheSize are passed to the VM is irrelevant.  
  114.32 -# @run shell Test8013496.sh
  114.33 -#
  114.34 -#
  114.35 -## some tests require path to find test source dir
  114.36 -if [ "${TESTSRC}" = "" ]
  114.37 -then
  114.38 -  TESTSRC=${PWD}
  114.39 -  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
  114.40 -fi
  114.41 -echo "TESTSRC=${TESTSRC}"
  114.42 -## Adding common setup Variables for running shell tests.
  114.43 -. ${TESTSRC}/../../test_env.sh
  114.44 -set -x
  114.45 -
  114.46 -${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:ReservedCodeCacheSize=2m -XX:InitialCodeCacheSize=500K -version > 1.out 2>&1
  114.47 -${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:InitialCodeCacheSize=500K -XX:ReservedCodeCacheSize=2m -version > 2.out 2>&1
  114.48 -
  114.49 -diff 1.out 2.out
  114.50 -
  114.51 -result=$?
  114.52 -if [ $result -eq 0 ] ; then  
  114.53 -  echo "Test Passed"
  114.54 -  exit 0
  114.55 -else
  114.56 -  echo "Test Failed"
  114.57 -  exit 1
  114.58 -fi
   115.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   115.2 +++ b/test/compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java	Fri Oct 18 19:44:40 2013 -0700
   115.3 @@ -0,0 +1,53 @@
   115.4 +/*
   115.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   115.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   115.7 + *
   115.8 + * This code is free software; you can redistribute it and/or modify it
   115.9 + * under the terms of the GNU General Public License version 2 only, as
  115.10 + * published by the Free Software Foundation.
  115.11 + *
  115.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  115.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  115.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  115.15 + * version 2 for more details (a copy is included in the LICENSE file that
  115.16 + * accompanied this code).
  115.17 + *
  115.18 + * You should have received a copy of the GNU General Public License version
  115.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  115.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  115.21 + *
  115.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  115.23 + * or visit www.oracle.com if you need additional information or have any
  115.24 + * questions.
  115.25 + */
  115.26 +
  115.27 +/*
  115.28 + * @test
  115.29 + * @bug 8013496
  115.30 + * @summary Test checks that the order in which ReversedCodeCacheSize and
  115.31 + *          InitialCodeCacheSize are passed to the VM is irrelevant.
  115.32 + * @library /testlibrary
  115.33 + *
  115.34 + */
  115.35 +import com.oracle.java.testlibrary.*;
  115.36 +
  115.37 +public class CheckReservedInitialCodeCacheSizeArgOrder {
  115.38 +  public static void main(String[] args) throws Exception {
  115.39 +    ProcessBuilder pb1,  pb2;
  115.40 +    OutputAnalyzer out1, out2;
  115.41 +
  115.42 +    pb1 = ProcessTools.createJavaProcessBuilder("-XX:InitialCodeCacheSize=4m", "-XX:ReservedCodeCacheSize=8m", "-version");
  115.43 +    pb2 = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=8m", "-XX:InitialCodeCacheSize=4m", "-version");
  115.44 +
  115.45 +    out1 = new OutputAnalyzer(pb1.start());
  115.46 +    out2 = new OutputAnalyzer(pb2.start());
  115.47 +
  115.48 +    // Check that the outputs are equal
  115.49 +    if (out1.getStdout().compareTo(out2.getStdout()) != 0) {
  115.50 +      throw new RuntimeException("Test failed");
  115.51 +    }
  115.52 +
  115.53 +    out1.shouldHaveExitValue(0);
  115.54 +    out2.shouldHaveExitValue(0);
  115.55 +  }
  115.56 +}
   116.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   116.2 +++ b/test/compiler/intrinsics/mathexact/RepeatTest.java	Fri Oct 18 19:44:40 2013 -0700
   116.3 @@ -0,0 +1,107 @@
   116.4 +/*
   116.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   116.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   116.7 + *
   116.8 + * This code is free software; you can redistribute it and/or modify it
   116.9 + * under the terms of the GNU General Public License version 2 only, as
  116.10 + * published by the Free Software Foundation.
  116.11 + *
  116.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  116.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  116.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  116.15 + * version 2 for more details (a copy is included in the LICENSE file that
  116.16 + * accompanied this code).
  116.17 + *
  116.18 + * You should have received a copy of the GNU General Public License version
  116.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  116.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  116.21 + *
  116.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  116.23 + * or visit www.oracle.com if you need additional information or have any
  116.24 + * questions.
  116.25 + */
  116.26 +
  116.27 +/*
  116.28 + * @test
  116.29 + * @bug 8025657
  116.30 + * @summary Test repeating addExact
  116.31 + * @compile RepeatTest.java
  116.32 + * @run main RepeatTest
  116.33 + *
  116.34 + */
  116.35 +
  116.36 +import java.lang.ArithmeticException;
  116.37 +
  116.38 +public class RepeatTest {
  116.39 +  public static void main(String[] args) {
  116.40 +    java.util.Random rnd = new java.util.Random();
  116.41 +    for (int i = 0; i < 50000; ++i) {
  116.42 +      int x = Integer.MAX_VALUE - 10;
  116.43 +      int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5); //rnd.nextInt() / 2;
  116.44 +
  116.45 +      int c = rnd.nextInt() / 2;
  116.46 +      int d = rnd.nextInt() / 2;
  116.47 +
  116.48 +      int a = addExact(x, y);
  116.49 +
  116.50 +      if (a != 36) {
  116.51 +          throw new RuntimeException("a != 0 : " + a);
  116.52 +      }
  116.53 +
  116.54 +      int b = nonExact(c, d);
  116.55 +      int n = addExact2(c, d);
  116.56 +
  116.57 +
  116.58 +      if (n != b) {
  116.59 +        throw new RuntimeException("n != b : " + n + " != " + b);
  116.60 +      }
  116.61 +    }
  116.62 +  }
  116.63 +
  116.64 +  public static int addExact2(int x, int y) {
  116.65 +      int result = 0;
  116.66 +      result += java.lang.Math.addExact(x, y);
  116.67 +      result += java.lang.Math.addExact(x, y);
  116.68 +      result += java.lang.Math.addExact(x, y);
  116.69 +      result += java.lang.Math.addExact(x, y);
  116.70 +      return result;
  116.71 +  }
  116.72 +
  116.73 +  public static int addExact(int x, int y) {
  116.74 +    int result = 0;
  116.75 +    try {
  116.76 +        result += 5;
  116.77 +        result = java.lang.Math.addExact(x, y);
  116.78 +    } catch (ArithmeticException e) {
  116.79 +        result += 1;
  116.80 +    }
  116.81 +    try {
  116.82 +        result += 6;
  116.83 +
  116.84 +        result += java.lang.Math.addExact(x, y);
  116.85 +    } catch (ArithmeticException e) {
  116.86 +        result += 2;
  116.87 +    }
  116.88 +    try {
  116.89 +        result += 7;
  116.90 +        result += java.lang.Math.addExact(x, y);
  116.91 +    } catch (ArithmeticException e) {
  116.92 +        result += 3;
  116.93 +    }
  116.94 +    try {
  116.95 +        result += 8;
  116.96 +        result += java.lang.Math.addExact(x, y);
  116.97 +    } catch (ArithmeticException e) {
  116.98 +        result += 4;
  116.99 +    }
 116.100 +    return result;
 116.101 +  }
 116.102 +
 116.103 +  public static int nonExact(int x, int y) {
 116.104 +    int result = x + y;
 116.105 +    result += x + y;
 116.106 +    result += x + y;
 116.107 +    result += x + y;
 116.108 +    return result;
 116.109 +  }
 116.110 +}
   117.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   117.2 +++ b/test/compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java	Fri Oct 18 19:44:40 2013 -0700
   117.3 @@ -0,0 +1,40 @@
   117.4 +/*
   117.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   117.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   117.7 + *
   117.8 + * This code is free software; you can redistribute it and/or modify it
   117.9 + * under the terms of the GNU General Public License version 2 only, as
  117.10 + * published by the Free Software Foundation.
  117.11 + *
  117.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  117.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  117.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  117.15 + * version 2 for more details (a copy is included in the LICENSE file that
  117.16 + * accompanied this code).
  117.17 + *
  117.18 + * You should have received a copy of the GNU General Public License version
  117.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  117.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  117.21 + *
  117.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  117.23 + * or visit www.oracle.com if you need additional information or have any
  117.24 + * questions.
  117.25 + *
  117.26 + */
  117.27 +
  117.28 +/**
  117.29 + * @test
  117.30 + * @bug 8026124
  117.31 + * @summary Javascript file provoked assertion failure in linkResolver.cpp
  117.32 + *
  117.33 + * @run main/othervm CreatesInterfaceDotEqualsCallInfo
  117.34 + */
  117.35 +
  117.36 +public class CreatesInterfaceDotEqualsCallInfo {
  117.37 +  public static void main(String[] args) throws java.io.IOException {
  117.38 +    String[] jsargs = { System.getProperty("test.src", ".") +
  117.39 +                        "/createsInterfaceDotEqualsCallInfo.js" };
  117.40 +    jdk.nashorn.tools.Shell.main(System.in, System.out, System.err, jsargs);
  117.41 +    System.out.println("PASS, did not crash running Javascript");
  117.42 +  }
  117.43 +}
   118.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   118.2 +++ b/test/compiler/jsr292/createsInterfaceDotEqualsCallInfo.js	Fri Oct 18 19:44:40 2013 -0700
   118.3 @@ -0,0 +1,26 @@
   118.4 +/*
   118.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   118.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   118.7 + *
   118.8 + * This code is free software; you can redistribute it and/or modify it
   118.9 + * under the terms of the GNU General Public License version 2 only, as
  118.10 + * published by the Free Software Foundation.
  118.11 + *
  118.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  118.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  118.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  118.15 + * version 2 for more details (a copy is included in the LICENSE file that
  118.16 + * accompanied this code).
  118.17 + *
  118.18 + * You should have received a copy of the GNU General Public License version
  118.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  118.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  118.21 + *
  118.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  118.23 + * or visit www.oracle.com if you need additional information or have any
  118.24 + * questions.
  118.25 + *
  118.26 + */
  118.27 +
  118.28 +var path = new java.io.File("/Users/someone").toPath();
  118.29 +path.toString();
   119.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   119.2 +++ b/test/compiler/startup/SmallCodeCacheStartup.java	Fri Oct 18 19:44:40 2013 -0700
   119.3 @@ -0,0 +1,43 @@
   119.4 +/*
   119.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   119.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   119.7 + *
   119.8 + * This code is free software; you can redistribute it and/or modify it
   119.9 + * under the terms of the GNU General Public License version 2 only, as
  119.10 + * published by the Free Software Foundation.
  119.11 + *
  119.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  119.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  119.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  119.15 + * version 2 for more details (a copy is included in the LICENSE file that
  119.16 + * accompanied this code).
  119.17 + *
  119.18 + * You should have received a copy of the GNU General Public License version
  119.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  119.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  119.21 + *
  119.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  119.23 + * or visit www.oracle.com if you need additional information or have any
  119.24 + * questions.
  119.25 + */
  119.26 +
  119.27 +/*
  119.28 + * @test
  119.29 + * @bug 8023014
  119.30 + * @summary Test ensures that there is no crash when compiler initialization fails
  119.31 + * @library /testlibrary
  119.32 + *
  119.33 + */
  119.34 +import com.oracle.java.testlibrary.*;
  119.35 +
  119.36 +public class SmallCodeCacheStartup {
  119.37 +  public static void main(String[] args) throws Exception {
  119.38 +    ProcessBuilder pb;
  119.39 +    OutputAnalyzer out;
  119.40 +
  119.41 +    pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m", "-XX:CICompilerCount=64", "-version");
  119.42 +    out = new OutputAnalyzer(pb.start());
  119.43 +    out.shouldContain("no space to run compiler");
  119.44 +    out.shouldHaveExitValue(0);
  119.45 +  }
  119.46 +}
   120.1 --- a/test/gc/7168848/HumongousAlloc.java	Fri Oct 18 10:37:26 2013 +0000
   120.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   120.3 @@ -1,74 +0,0 @@
   120.4 -/*
   120.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   120.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   120.7 - *
   120.8 - * This code is free software; you can redistribute it and/or modify it
   120.9 - * under the terms of the GNU General Public License version 2 only, as
  120.10 - * published by the Free Software Foundation.
  120.11 - *
  120.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
  120.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  120.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  120.15 - * version 2 for more details (a copy is included in the LICENSE file that
  120.16 - * accompanied this code).
  120.17 - *
  120.18 - * You should have received a copy of the GNU General Public License version
  120.19 - * 2 along with this work; if not, write to the Free Software Foundation,
  120.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  120.21 - *
  120.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  120.23 - * or visit www.oracle.com if you need additional information or have any
  120.24 - * questions.
  120.25 - */
  120.26 -
  120.27 -/*
  120.28 - * @test Humongous.java
  120.29 - * @bug 7168848
  120.30 - * @summary G1: humongous object allocations should initiate marking cycles when necessary
  120.31 - * @run main/othervm -Xms100m -Xmx100m -XX:+PrintGC -XX:G1HeapRegionSize=1m -XX:+UseG1GC  HumongousAlloc
  120.32 - *
  120.33 - */
  120.34 -import java.lang.management.GarbageCollectorMXBean;
  120.35 -import java.lang.management.ManagementFactory;
  120.36 -import java.util.List;
  120.37 -
  120.38 -public class HumongousAlloc {
  120.39 -
  120.40 -    public static byte[] dummy;
  120.41 -    private static int sleepFreq = 40;
  120.42 -    private static int sleepTime = 1000;
  120.43 -    private static double size = 0.75;
  120.44 -    private static int iterations = 50;
  120.45 -    private static int MB = 1024 * 1024;
  120.46 -
  120.47 -    public static void allocate(int size, int sleepTime, int sleepFreq) throws InterruptedException {
  120.48 -        System.out.println("Will allocate objects of size: " + size
  120.49 -                + " bytes and sleep for " + sleepTime
  120.50 -                + " ms after every " + sleepFreq + "th allocation.");
  120.51 -        int count = 0;
  120.52 -        while (count < iterations) {
  120.53 -            for (int i = 0; i < sleepFreq; i++) {
  120.54 -                dummy = new byte[size - 16];
  120.55 -            }
  120.56 -            Thread.sleep(sleepTime);
  120.57 -            count++;
  120.58 -        }
  120.59 -    }
  120.60 -
  120.61 -    public static void main(String[] args) throws InterruptedException {
  120.62 -        allocate((int) (size * MB), sleepTime, sleepFreq);
  120.63 -        List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
  120.64 -        for (GarbageCollectorMXBean collector : collectors) {
  120.65 -            if (collector.getName().contains("G1 Old")) {
  120.66 -               long count = collector.getCollectionCount();
  120.67 -                if (count > 0) {
  120.68 -                    throw new RuntimeException("Failed: FullGCs should not have happened. The number of FullGC run is " + count);
  120.69 -                }
  120.70 -                else {
  120.71 -                    System.out.println("Passed.");
  120.72 -                }
  120.73 -            }
  120.74 -        }
  120.75 -    }
  120.76 -}
  120.77 -
   121.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   121.2 +++ b/test/gc/arguments/TestHeapFreeRatio.java	Fri Oct 18 19:44:40 2013 -0700
   121.3 @@ -0,0 +1,105 @@
   121.4 +/*
   121.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   121.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   121.7 + *
   121.8 + * This code is free software; you can redistribute it and/or modify it
   121.9 + * under the terms of the GNU General Public License version 2 only, as
  121.10 + * published by the Free Software Foundation.
  121.11 + *
  121.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  121.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  121.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  121.15 + * version 2 for more details (a copy is included in the LICENSE file that
  121.16 + * accompanied this code).
  121.17 + *
  121.18 + * You should have received a copy of the GNU General Public License version
  121.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  121.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  121.21 + *
  121.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  121.23 + * or visit www.oracle.com if you need additional information or have any
  121.24 + * questions.
  121.25 + */
  121.26 +
  121.27 +/*
  121.28 + * @test TestHeapFreeRatio
  121.29 + * @key gc
  121.30 + * @bug 8025661
  121.31 + * @summary Test parsing of -Xminf and -Xmaxf
  121.32 + * @library /testlibrary
  121.33 + * @run main/othervm TestHeapFreeRatio
  121.34 + */
  121.35 +
  121.36 +import com.oracle.java.testlibrary.*;
  121.37 +
  121.38 +public class TestHeapFreeRatio {
  121.39 +
  121.40 +  enum Validation {
  121.41 +    VALID,
  121.42 +    MIN_INVALID,
  121.43 +    MAX_INVALID,
  121.44 +    COMBINATION_INVALID
  121.45 +  }
  121.46 +
  121.47 +  private static void testMinMaxFreeRatio(String min, String max, Validation type) throws Exception {
  121.48 +    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
  121.49 +        "-Xminf" + min,
  121.50 +        "-Xmaxf" + max,
  121.51 +        "-version");
  121.52 +    OutputAnalyzer output = new OutputAnalyzer(pb.start());
  121.53 +
  121.54 +    switch (type) {
  121.55 +    case VALID:
  121.56 +      output.shouldNotContain("Error");
  121.57 +      output.shouldHaveExitValue(0);
  121.58 +      break;
  121.59 +    case MIN_INVALID:
  121.60 +      output.shouldContain("Bad min heap free percentage size: -Xminf" + min);
  121.61 +      output.shouldContain("Error");
  121.62 +      output.shouldHaveExitValue(1);
  121.63 +      break;
  121.64 +    case MAX_INVALID:
  121.65 +      output.shouldContain("Bad max heap free percentage size: -Xmaxf" + max);
  121.66 +      output.shouldContain("Error");
  121.67 +      output.shouldHaveExitValue(1);
  121.68 +      break;
  121.69 +    case COMBINATION_INVALID:
  121.70 +      output.shouldContain("must be less than or equal to MaxHeapFreeRatio");
  121.71 +      output.shouldContain("Error");
  121.72 +      output.shouldHaveExitValue(1);
  121.73 +      break;
  121.74 +    default:
  121.75 +      throw new IllegalStateException("Must specify expected validation type");
  121.76 +    }
  121.77 +
  121.78 +    System.out.println(output.getOutput());
  121.79 +  }
  121.80 +
  121.81 +  public static void main(String args[]) throws Exception {
  121.82 +    testMinMaxFreeRatio( "0.1", "0.5", Validation.VALID);
  121.83 +    testMinMaxFreeRatio(  ".1",  ".5", Validation.VALID);
  121.84 +    testMinMaxFreeRatio( "0.5", "0.5", Validation.VALID);
  121.85 +
  121.86 +    testMinMaxFreeRatio("-0.1", "0.5", Validation.MIN_INVALID);
  121.87 +    testMinMaxFreeRatio( "1.1", "0.5", Validation.MIN_INVALID);
  121.88 +    testMinMaxFreeRatio("=0.1", "0.5", Validation.MIN_INVALID);
  121.89 +    testMinMaxFreeRatio("0.1f", "0.5", Validation.MIN_INVALID);
  121.90 +    testMinMaxFreeRatio(
  121.91 +                     "INVALID", "0.5", Validation.MIN_INVALID);
  121.92 +    testMinMaxFreeRatio(
  121.93 +                  "2147483647", "0.5", Validation.MIN_INVALID);
  121.94 +
  121.95 +    testMinMaxFreeRatio( "0.1", "-0.5", Validation.MAX_INVALID);
  121.96 +    testMinMaxFreeRatio( "0.1",  "1.5", Validation.MAX_INVALID);
  121.97 +    testMinMaxFreeRatio( "0.1", "0.5f", Validation.MAX_INVALID);
  121.98 +    testMinMaxFreeRatio( "0.1", "=0.5", Validation.MAX_INVALID);
  121.99 +    testMinMaxFreeRatio(
 121.100 +                     "0.1",  "INVALID", Validation.MAX_INVALID);
 121.101 +    testMinMaxFreeRatio(
 121.102 +                   "0.1", "2147483647", Validation.MAX_INVALID);
 121.103 +
 121.104 +    testMinMaxFreeRatio( "0.5",  "0.1", Validation.COMBINATION_INVALID);
 121.105 +    testMinMaxFreeRatio(  ".5",  ".10", Validation.COMBINATION_INVALID);
 121.106 +    testMinMaxFreeRatio("0.12","0.100", Validation.COMBINATION_INVALID);
 121.107 +  }
 121.108 +}
   122.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   122.2 +++ b/test/gc/g1/TestHumongousAllocInitialMark.java	Fri Oct 18 19:44:40 2013 -0700
   122.3 @@ -0,0 +1,75 @@
   122.4 +/*
   122.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   122.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   122.7 + *
   122.8 + * This code is free software; you can redistribute it and/or modify it
   122.9 + * under the terms of the GNU General Public License version 2 only, as
  122.10 + * published by the Free Software Foundation.
  122.11 + *
  122.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  122.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  122.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  122.15 + * version 2 for more details (a copy is included in the LICENSE file that
  122.16 + * accompanied this code).
  122.17 + *
  122.18 + * You should have received a copy of the GNU General Public License version
  122.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  122.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  122.21 + *
  122.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  122.23 + * or visit www.oracle.com if you need additional information or have any
  122.24 + * questions.
  122.25 + */
  122.26 +
  122.27 +/*
  122.28 + * @test TestHumongousAllocInitialMark
  122.29 + * @bug 7168848
  122.30 + * @summary G1: humongous object allocations should initiate marking cycles when necessary
  122.31 + * @library /testlibrary
  122.32 + */
  122.33 +
  122.34 +import com.oracle.java.testlibrary.*;
  122.35 +
  122.36 +public class TestHumongousAllocInitialMark {
  122.37 +    private static final int heapSize                       = 200; // MB
  122.38 +    private static final int heapRegionSize                 = 1;   // MB
  122.39 +    private static final int initiatingHeapOccupancyPercent = 50;  // %
  122.40 +
  122.41 +    public static void main(String[] args) throws Exception {
  122.42 +        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
  122.43 +            "-XX:+UseG1GC",
  122.44 +            "-Xms" + heapSize + "m",
  122.45 +            "-Xmx" + heapSize + "m",
  122.46 +            "-XX:G1HeapRegionSize=" + heapRegionSize + "m",
  122.47 +            "-XX:InitiatingHeapOccupancyPercent=" + initiatingHeapOccupancyPercent,
  122.48 +            "-XX:+PrintGC",
  122.49 +            HumongousObjectAllocator.class.getName());
  122.50 +
  122.51 +        OutputAnalyzer output = new OutputAnalyzer(pb.start());
  122.52 +        output.shouldContain("GC pause (G1 Humongous Allocation) (young) (initial-mark)");
  122.53 +        output.shouldNotContain("Full GC");
  122.54 +        output.shouldHaveExitValue(0);
  122.55 +    }
  122.56 +
  122.57 +    static class HumongousObjectAllocator {
  122.58 +        private static byte[] dummy;
  122.59 +
  122.60 +        public static void main(String [] args) throws Exception {
  122.61 +            // Make object size 75% of region size
  122.62 +            final int humongousObjectSize =
  122.63 +                (int)(heapRegionSize * 1024 * 1024 * 0.75);
  122.64 +
  122.65 +            // Number of objects to allocate to go above IHOP
  122.66 +            final int humongousObjectAllocations =
  122.67 +                (int)((heapSize * initiatingHeapOccupancyPercent / 100.0) / heapRegionSize) + 1;
  122.68 +
  122.69 +            // Allocate
  122.70 +            for (int i = 1; i <= humongousObjectAllocations; i++) {
  122.71 +                System.out.println("Allocating humongous object " + i + "/" + humongousObjectAllocations +
  122.72 +                                   " of size " + humongousObjectSize + " bytes");
  122.73 +                dummy = new byte[humongousObjectSize];
  122.74 +            }
  122.75 +        }
  122.76 +    }
  122.77 +}
  122.78 +
   123.1 --- a/test/gc/startup_warnings/TestCMS.java	Fri Oct 18 10:37:26 2013 +0000
   123.2 +++ b/test/gc/startup_warnings/TestCMS.java	Fri Oct 18 19:44:40 2013 -0700
   123.3 @@ -38,7 +38,7 @@
   123.4    public static void main(String args[]) throws Exception {
   123.5      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-version");
   123.6      OutputAnalyzer output = new OutputAnalyzer(pb.start());
   123.7 -    output.shouldNotContain("warning");
   123.8 +    output.shouldNotContain("deprecated");
   123.9      output.shouldNotContain("error");
  123.10      output.shouldHaveExitValue(0);
  123.11    }
   124.1 --- a/test/gc/startup_warnings/TestCMSNoIncrementalMode.java	Fri Oct 18 10:37:26 2013 +0000
   124.2 +++ b/test/gc/startup_warnings/TestCMSNoIncrementalMode.java	Fri Oct 18 19:44:40 2013 -0700
   124.3 @@ -37,7 +37,7 @@
   124.4    public static void main(String args[]) throws Exception {
   124.5      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:-CMSIncrementalMode", "-version");
   124.6      OutputAnalyzer output = new OutputAnalyzer(pb.start());
   124.7 -    output.shouldNotContain("warning");
   124.8 +    output.shouldNotContain("deprecated");
   124.9      output.shouldNotContain("error");
  124.10      output.shouldHaveExitValue(0);
  124.11    }
   125.1 --- a/test/gc/startup_warnings/TestG1.java	Fri Oct 18 10:37:26 2013 +0000
   125.2 +++ b/test/gc/startup_warnings/TestG1.java	Fri Oct 18 19:44:40 2013 -0700
   125.3 @@ -37,7 +37,7 @@
   125.4    public static void main(String args[]) throws Exception {
   125.5      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", "-version");
   125.6      OutputAnalyzer output = new OutputAnalyzer(pb.start());
   125.7 -    output.shouldNotContain("warning");
   125.8 +    output.shouldNotContain("deprecated");
   125.9      output.shouldNotContain("error");
  125.10      output.shouldHaveExitValue(0);
  125.11    }
   126.1 --- a/test/gc/startup_warnings/TestParNewCMS.java	Fri Oct 18 10:37:26 2013 +0000
   126.2 +++ b/test/gc/startup_warnings/TestParNewCMS.java	Fri Oct 18 19:44:40 2013 -0700
   126.3 @@ -38,7 +38,7 @@
   126.4    public static void main(String args[]) throws Exception {
   126.5      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParNewGC", "-XX:+UseConcMarkSweepGC", "-version");
   126.6      OutputAnalyzer output = new OutputAnalyzer(pb.start());
   126.7 -    output.shouldNotContain("warning");
   126.8 +    output.shouldNotContain("deprecated");
   126.9      output.shouldNotContain("error");
  126.10      output.shouldHaveExitValue(0);
  126.11    }
   127.1 --- a/test/gc/startup_warnings/TestParallelGC.java	Fri Oct 18 10:37:26 2013 +0000
   127.2 +++ b/test/gc/startup_warnings/TestParallelGC.java	Fri Oct 18 19:44:40 2013 -0700
   127.3 @@ -38,7 +38,7 @@
   127.4    public static void main(String args[]) throws Exception {
   127.5      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParallelGC", "-version");
   127.6      OutputAnalyzer output = new OutputAnalyzer(pb.start());
   127.7 -    output.shouldNotContain("warning");
   127.8 +    output.shouldNotContain("deprecated");
   127.9      output.shouldNotContain("error");
  127.10      output.shouldHaveExitValue(0);
  127.11    }
   128.1 --- a/test/gc/startup_warnings/TestParallelScavengeSerialOld.java	Fri Oct 18 10:37:26 2013 +0000
   128.2 +++ b/test/gc/startup_warnings/TestParallelScavengeSerialOld.java	Fri Oct 18 19:44:40 2013 -0700
   128.3 @@ -38,7 +38,7 @@
   128.4    public static void main(String args[]) throws Exception {
   128.5      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParallelGC", "-XX:-UseParallelOldGC", "-version");
   128.6      OutputAnalyzer output = new OutputAnalyzer(pb.start());
   128.7 -    output.shouldNotContain("warning");
   128.8 +    output.shouldNotContain("deprecated");
   128.9      output.shouldNotContain("error");
  128.10      output.shouldHaveExitValue(0);
  128.11    }
   129.1 --- a/test/gc/startup_warnings/TestSerialGC.java	Fri Oct 18 10:37:26 2013 +0000
   129.2 +++ b/test/gc/startup_warnings/TestSerialGC.java	Fri Oct 18 19:44:40 2013 -0700
   129.3 @@ -38,7 +38,7 @@
   129.4    public static void main(String args[]) throws Exception {
   129.5      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseSerialGC", "-version");
   129.6      OutputAnalyzer output = new OutputAnalyzer(pb.start());
   129.7 -    output.shouldNotContain("warning");
   129.8 +    output.shouldNotContain("deprecated");
   129.9      output.shouldNotContain("error");
  129.10      output.shouldHaveExitValue(0);
  129.11    }

mercurial