Merge

Fri, 30 Aug 2013 09:50:49 +0100

author
chegar
date
Fri, 30 Aug 2013 09:50:49 +0100
changeset 5879
07b5f47d7a18
parent 5878
d4fa23d6c35b
parent 5561
b649cfa58604
child 5880
98a2169ed7ac

Merge

src/share/vm/classfile/classFileParser.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Fri Aug 23 22:12:18 2013 +0100
     1.2 +++ b/.hgtags	Fri Aug 30 09:50:49 2013 +0100
     1.3 @@ -370,3 +370,5 @@
     1.4  6f9be7f87b9653e94fd8fb3070891a0cc91b15bf jdk8-b103
     1.5  580430d131ccd475e2f2ad4006531b8c4813d102 hs25-b46
     1.6  104743074675359cfbf7f4dcd9ab2a5974a16627 jdk8-b104
     1.7 +c1604d5885a6f2adc0bcea2fa142a8f6bafad2f0 hs25-b47
     1.8 +acac3bde66b2c22791c257a8d99611d6d08c6713 jdk8-b105
     2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Fri Aug 23 22:12:18 2013 +0100
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Fri Aug 30 09:50:49 2013 +0100
     2.3 @@ -75,19 +75,19 @@
     2.4      javaFieldsCount      = new CIntField(type.getCIntegerField("_java_fields_count"), 0);
     2.5      constants            = new MetadataField(type.getAddressField("_constants"), 0);
     2.6      classLoaderData      = type.getAddressField("_class_loader_data");
     2.7 -    sourceFileName       = type.getAddressField("_source_file_name");
     2.8      sourceDebugExtension = type.getAddressField("_source_debug_extension");
     2.9      innerClasses         = type.getAddressField("_inner_classes");
    2.10 +    sourceFileNameIndex  = new CIntField(type.getCIntegerField("_source_file_name_index"), 0);
    2.11      nonstaticFieldSize   = new CIntField(type.getCIntegerField("_nonstatic_field_size"), 0);
    2.12      staticFieldSize      = new CIntField(type.getCIntegerField("_static_field_size"), 0);
    2.13 -    staticOopFieldCount   = new CIntField(type.getCIntegerField("_static_oop_field_count"), 0);
    2.14 +    staticOopFieldCount  = new CIntField(type.getCIntegerField("_static_oop_field_count"), 0);
    2.15      nonstaticOopMapSize  = new CIntField(type.getCIntegerField("_nonstatic_oop_map_size"), 0);
    2.16      isMarkedDependent    = new CIntField(type.getCIntegerField("_is_marked_dependent"), 0);
    2.17      initState            = new CIntField(type.getCIntegerField("_init_state"), 0);
    2.18      vtableLen            = new CIntField(type.getCIntegerField("_vtable_len"), 0);
    2.19      itableLen            = new CIntField(type.getCIntegerField("_itable_len"), 0);
    2.20      breakpoints          = type.getAddressField("_breakpoints");
    2.21 -    genericSignature     = type.getAddressField("_generic_signature");
    2.22 +    genericSignatureIndex = new CIntField(type.getCIntegerField("_generic_signature_index"), 0);
    2.23      majorVersion         = new CIntField(type.getCIntegerField("_major_version"), 0);
    2.24      minorVersion         = new CIntField(type.getCIntegerField("_minor_version"), 0);
    2.25      headerSize           = Oop.alignObjectOffset(type.getSize());
    2.26 @@ -134,9 +134,9 @@
    2.27    private static CIntField javaFieldsCount;
    2.28    private static MetadataField constants;
    2.29    private static AddressField  classLoaderData;
    2.30 -  private static AddressField  sourceFileName;
    2.31    private static AddressField  sourceDebugExtension;
    2.32    private static AddressField  innerClasses;
    2.33 +  private static CIntField sourceFileNameIndex;
    2.34    private static CIntField nonstaticFieldSize;
    2.35    private static CIntField staticFieldSize;
    2.36    private static CIntField staticOopFieldCount;
    2.37 @@ -146,7 +146,7 @@
    2.38    private static CIntField vtableLen;
    2.39    private static CIntField itableLen;
    2.40    private static AddressField breakpoints;
    2.41 -  private static AddressField  genericSignature;
    2.42 +  private static CIntField genericSignatureIndex;
    2.43    private static CIntField majorVersion;
    2.44    private static CIntField minorVersion;
    2.45  
    2.46 @@ -346,7 +346,7 @@
    2.47    public ConstantPool getConstants()        { return (ConstantPool) constants.getValue(this); }
    2.48    public ClassLoaderData getClassLoaderData() { return                ClassLoaderData.instantiateWrapperFor(classLoaderData.getValue(getAddress())); }
    2.49    public Oop       getClassLoader()         { return                getClassLoaderData().getClassLoader(); }
    2.50 -  public Symbol    getSourceFileName()      { return getSymbol(sourceFileName); }
    2.51 +  public Symbol    getSourceFileName()      { return                getConstants().getSymbolAt(sourceFileNameIndex.getValue(this)); }
    2.52    public String    getSourceDebugExtension(){ return                CStringUtilities.getString(sourceDebugExtension.getValue(getAddress())); }
    2.53    public long      getNonstaticFieldSize()  { return                nonstaticFieldSize.getValue(this); }
    2.54    public long      getStaticOopFieldCount() { return                staticOopFieldCount.getValue(this); }
    2.55 @@ -354,7 +354,7 @@
    2.56    public boolean   getIsMarkedDependent()   { return                isMarkedDependent.getValue(this) != 0; }
    2.57    public long      getVtableLen()           { return                vtableLen.getValue(this); }
    2.58    public long      getItableLen()           { return                itableLen.getValue(this); }
    2.59 -  public Symbol    getGenericSignature()    { return getSymbol(genericSignature); }
    2.60 +  public Symbol    getGenericSignature()    { return                getConstants().getSymbolAt(genericSignatureIndex.getValue(this)); }
    2.61    public long      majorVersion()           { return                majorVersion.getValue(this); }
    2.62    public long      minorVersion()           { return                minorVersion.getValue(this); }
    2.63  
     3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java	Fri Aug 23 22:12:18 2013 +0100
     3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java	Fri Aug 30 09:50:49 2013 +0100
     3.3 @@ -92,8 +92,13 @@
     3.4                      System.err.println("Warning: Can not create class filter!");
     3.5                  }
     3.6              }
     3.7 -            String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", ".");
     3.8 -            setOutputDirectory(outputDirectory);
     3.9 +
    3.10 +            // outputDirectory and jarStream are alternatives: setting one closes the other.
    3.11 +            // If neither is set, use outputDirectory from the System property:
    3.12 +            if (outputDirectory == null && jarStream == null) {
    3.13 +                String dirName = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", ".");
    3.14 +                setOutputDirectory(dirName);
    3.15 +            }
    3.16  
    3.17              // walk through the system dictionary
    3.18              SystemDictionary dict = VM.getVM().getSystemDictionary();
     4.1 --- a/make/bsd/makefiles/gcc.make	Fri Aug 23 22:12:18 2013 +0100
     4.2 +++ b/make/bsd/makefiles/gcc.make	Fri Aug 30 09:50:49 2013 +0100
     4.3 @@ -247,7 +247,7 @@
     4.4  # Not yet supported by clang in Xcode 4.6.2
     4.5  #  WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
     4.6    WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
     4.7 -  WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
     4.8 +  WARNINGS_ARE_ERRORS += -Wno-empty-body
     4.9  endif
    4.10  
    4.11  WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
     5.1 --- a/make/hotspot_version	Fri Aug 23 22:12:18 2013 +0100
     5.2 +++ b/make/hotspot_version	Fri Aug 30 09:50:49 2013 +0100
     5.3 @@ -35,7 +35,7 @@
     5.4  
     5.5  HS_MAJOR_VER=25
     5.6  HS_MINOR_VER=0
     5.7 -HS_BUILD_NUMBER=46
     5.8 +HS_BUILD_NUMBER=47
     5.9  
    5.10  JDK_MAJOR_VER=1
    5.11  JDK_MINOR_VER=8
     6.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Aug 23 22:12:18 2013 +0100
     6.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Aug 30 09:50:49 2013 +0100
     6.3 @@ -1,5 +1,5 @@
     6.4  /*
     6.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     6.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     6.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.8   *
     6.9   * This code is free software; you can redistribute it and/or modify it
    6.10 @@ -29,6 +29,7 @@
    6.11  #include "interpreter/interpreter.hpp"
    6.12  #include "memory/cardTableModRefBS.hpp"
    6.13  #include "memory/resourceArea.hpp"
    6.14 +#include "memory/universe.hpp"
    6.15  #include "prims/methodHandles.hpp"
    6.16  #include "runtime/biasedLocking.hpp"
    6.17  #include "runtime/interfaceSupport.hpp"
    6.18 @@ -1145,7 +1146,7 @@
    6.19    assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
    6.20    int klass_index = oop_recorder()->find_index(k);
    6.21    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
    6.22 -  narrowOop encoded_k = oopDesc::encode_klass(k);
    6.23 +  narrowOop encoded_k = Klass::encode_klass(k);
    6.24  
    6.25    assert_not_delayed();
    6.26    // Relocation with special format (see relocInfo_sparc.hpp).
    6.27 @@ -1419,7 +1420,6 @@
    6.28    load_klass(O0_obj, O0_obj);
    6.29    // assert((klass != NULL)
    6.30    br_null_short(O0_obj, pn, fail);
    6.31 -  // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
    6.32  
    6.33    wrccr( O5_save_flags ); // Restore CCR's
    6.34  
    6.35 @@ -4089,52 +4089,91 @@
    6.36  }
    6.37  
    6.38  void MacroAssembler::encode_klass_not_null(Register r) {
    6.39 -  assert(Metaspace::is_initialized(), "metaspace should be initialized");
    6.40    assert (UseCompressedKlassPointers, "must be compressed");
    6.41 -  assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
    6.42 -  if (Universe::narrow_klass_base() != NULL)
    6.43 -    sub(r, G6_heapbase, r);
    6.44 -  srlx(r, LogKlassAlignmentInBytes, r);
    6.45 +  assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
    6.46 +  assert(r != G6_heapbase, "bad register choice");
    6.47 +  set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
    6.48 +  sub(r, G6_heapbase, r);
    6.49 +  if (Universe::narrow_klass_shift() != 0) {
    6.50 +    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
    6.51 +    srlx(r, LogKlassAlignmentInBytes, r);
    6.52 +  }
    6.53 +  reinit_heapbase();
    6.54  }
    6.55  
    6.56  void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
    6.57 -  assert(Metaspace::is_initialized(), "metaspace should be initialized");
    6.58 -  assert (UseCompressedKlassPointers, "must be compressed");
    6.59 -  assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
    6.60 -  if (Universe::narrow_klass_base() == NULL) {
    6.61 -    srlx(src, LogKlassAlignmentInBytes, dst);
    6.62 +  if (src == dst) {
    6.63 +    encode_klass_not_null(src);
    6.64    } else {
    6.65 -    sub(src, G6_heapbase, dst);
    6.66 -    srlx(dst, LogKlassAlignmentInBytes, dst);
    6.67 +    assert (UseCompressedKlassPointers, "must be compressed");
    6.68 +    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
    6.69 +    set((intptr_t)Universe::narrow_klass_base(), dst);
    6.70 +    sub(src, dst, dst);
    6.71 +    if (Universe::narrow_klass_shift() != 0) {
    6.72 +      srlx(dst, LogKlassAlignmentInBytes, dst);
    6.73 +    }
    6.74    }
    6.75  }
    6.76  
    6.77 +// Function instr_size_for_decode_klass_not_null() counts the instructions
    6.78 +// generated by decode_klass_not_null() and reinit_heapbase().  Hence, if
    6.79 +// the instructions they generate change, then this method needs to be updated.
    6.80 +int MacroAssembler::instr_size_for_decode_klass_not_null() {
    6.81 +  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
    6.82 +  // set + add + set
    6.83 +  int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
    6.84 +    insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
    6.85 +  if (Universe::narrow_klass_shift() == 0) {
    6.86 +    return num_instrs * BytesPerInstWord;
    6.87 +  } else { // sllx
    6.88 +    return (num_instrs + 1) * BytesPerInstWord;
    6.89 +  }
    6.90 +}
    6.91 +
    6.92 +// !!! If the instructions that get generated here change then function
    6.93 +// instr_size_for_decode_klass_not_null() needs to get updated.
    6.94  void  MacroAssembler::decode_klass_not_null(Register r) {
    6.95 -  assert(Metaspace::is_initialized(), "metaspace should be initialized");
    6.96    // Do not add assert code to this unless you change vtableStubs_sparc.cpp
    6.97    // pd_code_size_limit.
    6.98    assert (UseCompressedKlassPointers, "must be compressed");
    6.99 -  assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
   6.100 -  sllx(r, LogKlassAlignmentInBytes, r);
   6.101 -  if (Universe::narrow_klass_base() != NULL)
   6.102 -    add(r, G6_heapbase, r);
   6.103 +  assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   6.104 +  assert(r != G6_heapbase, "bad register choice");
   6.105 +  set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
   6.106 +  if (Universe::narrow_klass_shift() != 0)
   6.107 +    sllx(r, LogKlassAlignmentInBytes, r);
   6.108 +  add(r, G6_heapbase, r);
   6.109 +  reinit_heapbase();
   6.110  }
   6.111  
   6.112  void  MacroAssembler::decode_klass_not_null(Register src, Register dst) {
   6.113 -  assert(Metaspace::is_initialized(), "metaspace should be initialized");
   6.114 -  // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   6.115 -  // pd_code_size_limit.
   6.116 -  assert (UseCompressedKlassPointers, "must be compressed");
   6.117 -  assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
   6.118 -  sllx(src, LogKlassAlignmentInBytes, dst);
   6.119 -  if (Universe::narrow_klass_base() != NULL)
   6.120 -    add(dst, G6_heapbase, dst);
   6.121 +  if (src == dst) {
   6.122 +    decode_klass_not_null(src);
   6.123 +  } else {
   6.124 +    // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   6.125 +    // pd_code_size_limit.
   6.126 +    assert (UseCompressedKlassPointers, "must be compressed");
   6.127 +    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   6.128 +    if (Universe::narrow_klass_shift() != 0) {
   6.129 +      assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
   6.130 +      set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
   6.131 +      sllx(src, LogKlassAlignmentInBytes, dst);
   6.132 +      add(dst, G6_heapbase, dst);
   6.133 +      reinit_heapbase();
   6.134 +    } else {
   6.135 +      set((intptr_t)Universe::narrow_klass_base(), dst);
   6.136 +      add(src, dst, dst);
   6.137 +    }
   6.138 +  }
   6.139  }
   6.140  
   6.141  void MacroAssembler::reinit_heapbase() {
   6.142    if (UseCompressedOops || UseCompressedKlassPointers) {
   6.143 -    AddressLiteral base(Universe::narrow_ptrs_base_addr());
   6.144 -    load_ptr_contents(base, G6_heapbase);
   6.145 +    if (Universe::heap() != NULL) {
   6.146 +      set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
   6.147 +    } else {
   6.148 +      AddressLiteral base(Universe::narrow_ptrs_base_addr());
   6.149 +      load_ptr_contents(base, G6_heapbase);
   6.150 +    }
   6.151    }
   6.152  }
   6.153  
     7.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Fri Aug 23 22:12:18 2013 +0100
     7.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Fri Aug 30 09:50:49 2013 +0100
     7.3 @@ -1177,6 +1177,9 @@
     7.4    void push_CPU_state();
     7.5    void pop_CPU_state();
     7.6  
     7.7 +  // Returns the byte size of the instructions generated by decode_klass_not_null().
     7.8 +  static int instr_size_for_decode_klass_not_null();
     7.9 +
    7.10    // if heap base register is used - reinit it with the correct value
    7.11    void reinit_heapbase();
    7.12  
     8.1 --- a/src/cpu/sparc/vm/relocInfo_sparc.cpp	Fri Aug 23 22:12:18 2013 +0100
     8.2 +++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp	Fri Aug 30 09:50:49 2013 +0100
     8.3 @@ -1,5 +1,5 @@
     8.4  /*
     8.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
     8.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
     8.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.8   *
     8.9   * This code is free software; you can redistribute it and/or modify it
    8.10 @@ -97,7 +97,7 @@
    8.11      guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
    8.12      if (format() != 0) {
    8.13        assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
    8.14 -      jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : oopDesc::encode_klass((Klass*)x);
    8.15 +      jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
    8.16        inst &= ~Assembler::hi22(-1);
    8.17        inst |=  Assembler::hi22((intptr_t)np);
    8.18        if (verify_only) {
     9.1 --- a/src/cpu/sparc/vm/sparc.ad	Fri Aug 23 22:12:18 2013 +0100
     9.2 +++ b/src/cpu/sparc/vm/sparc.ad	Fri Aug 30 09:50:49 2013 +0100
     9.3 @@ -559,10 +559,7 @@
     9.4      int klass_load_size;
     9.5      if (UseCompressedKlassPointers) {
     9.6        assert(Universe::heap() != NULL, "java heap should be initialized");
     9.7 -      if (Universe::narrow_klass_base() == NULL)
     9.8 -        klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
     9.9 -      else
    9.10 -        klass_load_size = 3*BytesPerInstWord;
    9.11 +      klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
    9.12      } else {
    9.13        klass_load_size = 1*BytesPerInstWord;
    9.14      }
    9.15 @@ -1663,9 +1660,12 @@
    9.16    if (UseCompressedKlassPointers) {
    9.17      assert(Universe::heap() != NULL, "java heap should be initialized");
    9.18      st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
    9.19 -    st->print_cr("\tSLL    R_G5,3,R_G5");
    9.20 -    if (Universe::narrow_klass_base() != NULL)
    9.21 -      st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
    9.22 +    st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
    9.23 +    if (Universe::narrow_klass_shift() != 0) {
    9.24 +      st->print_cr("\tSLL    R_G5,3,R_G5");
    9.25 +    }
    9.26 +    st->print_cr("\tADD    R_G5,R_G6_heap_base,R_G5");
    9.27 +    st->print_cr("\tSET    Universe::narrow_ptrs_base,R_G6_heap_base");
    9.28    } else {
    9.29      st->print_cr("\tLDX    [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
    9.30    }
    9.31 @@ -2563,10 +2563,7 @@
    9.32        int klass_load_size;
    9.33        if (UseCompressedKlassPointers) {
    9.34          assert(Universe::heap() != NULL, "java heap should be initialized");
    9.35 -        if (Universe::narrow_klass_base() == NULL)
    9.36 -          klass_load_size = 2*BytesPerInstWord;
    9.37 -        else
    9.38 -          klass_load_size = 3*BytesPerInstWord;
    9.39 +        klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
    9.40        } else {
    9.41          klass_load_size = 1*BytesPerInstWord;
    9.42        }
    10.1 --- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Fri Aug 23 22:12:18 2013 +0100
    10.2 +++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Fri Aug 30 09:50:49 2013 +0100
    10.3 @@ -1,5 +1,5 @@
    10.4  /*
    10.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    10.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    10.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8   *
    10.9   * This code is free software; you can redistribute it and/or modify it
   10.10 @@ -219,13 +219,13 @@
   10.11        const int basic = 5*BytesPerInstWord +
   10.12                          // shift;add for load_klass (only shift with zero heap based)
   10.13                          (UseCompressedKlassPointers ?
   10.14 -                         ((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
   10.15 +                          MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   10.16        return basic + slop;
   10.17      } else {
   10.18        const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
   10.19                          // shift;add for load_klass (only shift with zero heap based)
   10.20                          (UseCompressedKlassPointers ?
   10.21 -                         ((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
   10.22 +                          MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   10.23        return (basic + slop);
   10.24      }
   10.25    }
    11.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Aug 23 22:12:18 2013 +0100
    11.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Aug 30 09:50:49 2013 +0100
    11.3 @@ -30,6 +30,7 @@
    11.4  #include "interpreter/interpreter.hpp"
    11.5  #include "memory/cardTableModRefBS.hpp"
    11.6  #include "memory/resourceArea.hpp"
    11.7 +#include "memory/universe.hpp"
    11.8  #include "prims/methodHandles.hpp"
    11.9  #include "runtime/biasedLocking.hpp"
   11.10  #include "runtime/interfaceSupport.hpp"
   11.11 @@ -4810,23 +4811,8 @@
   11.12  }
   11.13  
   11.14  void MacroAssembler::load_prototype_header(Register dst, Register src) {
   11.15 -#ifdef _LP64
   11.16 -  if (UseCompressedKlassPointers) {
   11.17 -    assert (Universe::heap() != NULL, "java heap should be initialized");
   11.18 -    movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
   11.19 -    if (Universe::narrow_klass_shift() != 0) {
   11.20 -      assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
   11.21 -      assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
   11.22 -      movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
   11.23 -    } else {
   11.24 -      movq(dst, Address(dst, Klass::prototype_header_offset()));
   11.25 -    }
   11.26 -  } else
   11.27 -#endif
   11.28 -  {
   11.29 -    movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
   11.30 -    movptr(dst, Address(dst, Klass::prototype_header_offset()));
   11.31 -  }
   11.32 +  load_klass(dst, src);
   11.33 +  movptr(dst, Address(dst, Klass::prototype_header_offset()));
   11.34  }
   11.35  
   11.36  void MacroAssembler::store_klass(Register dst, Register src) {
   11.37 @@ -4914,7 +4900,7 @@
   11.38  
   11.39  #ifdef ASSERT
   11.40  void MacroAssembler::verify_heapbase(const char* msg) {
   11.41 -  assert (UseCompressedOops || UseCompressedKlassPointers, "should be compressed");
   11.42 +  assert (UseCompressedOops, "should be compressed");
   11.43    assert (Universe::heap() != NULL, "java heap should be initialized");
   11.44    if (CheckCompressedOops) {
   11.45      Label ok;
   11.46 @@ -5058,69 +5044,80 @@
   11.47  }
   11.48  
   11.49  void MacroAssembler::encode_klass_not_null(Register r) {
   11.50 -  assert(Metaspace::is_initialized(), "metaspace should be initialized");
   11.51 -#ifdef ASSERT
   11.52 -  verify_heapbase("MacroAssembler::encode_klass_not_null: heap base corrupted?");
   11.53 -#endif
   11.54 -  if (Universe::narrow_klass_base() != NULL) {
   11.55 -    subq(r, r12_heapbase);
   11.56 -  }
   11.57 +  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
   11.58 +  // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
   11.59 +  assert(r != r12_heapbase, "Encoding a klass in r12");
   11.60 +  mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
   11.61 +  subq(r, r12_heapbase);
   11.62    if (Universe::narrow_klass_shift() != 0) {
   11.63      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
   11.64      shrq(r, LogKlassAlignmentInBytes);
   11.65    }
   11.66 +  reinit_heapbase();
   11.67  }
   11.68  
   11.69  void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
   11.70 -  assert(Metaspace::is_initialized(), "metaspace should be initialized");
   11.71 -#ifdef ASSERT
   11.72 -  verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
   11.73 -#endif
   11.74 -  if (dst != src) {
   11.75 -    movq(dst, src);
   11.76 -  }
   11.77 -  if (Universe::narrow_klass_base() != NULL) {
   11.78 -    subq(dst, r12_heapbase);
   11.79 -  }
   11.80 -  if (Universe::narrow_klass_shift() != 0) {
   11.81 -    assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
   11.82 -    shrq(dst, LogKlassAlignmentInBytes);
   11.83 -  }
   11.84 -}
   11.85 -
   11.86 +  if (dst == src) {
   11.87 +    encode_klass_not_null(src);
   11.88 +  } else {
   11.89 +    mov64(dst, (int64_t)Universe::narrow_klass_base());
   11.90 +    negq(dst);
   11.91 +    addq(dst, src);
   11.92 +    if (Universe::narrow_klass_shift() != 0) {
   11.93 +      assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
   11.94 +      shrq(dst, LogKlassAlignmentInBytes);
   11.95 +    }
   11.96 +  }
   11.97 +}
   11.98 +
   11.99 +// Function instr_size_for_decode_klass_not_null() counts the instructions
  11.100 +// generated by decode_klass_not_null(register r) and reinit_heapbase(),
  11.101 +// when (Universe::heap() != NULL).  Hence, if the instructions they
  11.102 +// generate change, then this method needs to be updated.
  11.103 +int MacroAssembler::instr_size_for_decode_klass_not_null() {
  11.104 +  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
  11.105 +  // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
  11.106 +  return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
  11.107 +}
  11.108 +
  11.109 +// !!! If the instructions that get generated here change then function
  11.110 +// instr_size_for_decode_klass_not_null() needs to get updated.
  11.111  void  MacroAssembler::decode_klass_not_null(Register r) {
  11.112 -  assert(Metaspace::is_initialized(), "metaspace should be initialized");
  11.113    // Note: it will change flags
  11.114 +  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
  11.115    assert (UseCompressedKlassPointers, "should only be used for compressed headers");
  11.116 +  assert(r != r12_heapbase, "Decoding a klass in r12");
  11.117    // Cannot assert, unverified entry point counts instructions (see .ad file)
  11.118    // vtableStubs also counts instructions in pd_code_size_limit.
  11.119    // Also do not verify_oop as this is called by verify_oop.
  11.120    if (Universe::narrow_klass_shift() != 0) {
  11.121      assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
  11.122      shlq(r, LogKlassAlignmentInBytes);
  11.123 -    if (Universe::narrow_klass_base() != NULL) {
  11.124 -      addq(r, r12_heapbase);
  11.125 -    }
  11.126 +  }
  11.127 +  // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
  11.128 +  mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
  11.129 +  addq(r, r12_heapbase);
  11.130 +  reinit_heapbase();
  11.131 +}
  11.132 +
  11.133 +void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
  11.134 +  // Note: it will change flags
  11.135 +  assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
  11.136 +  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
  11.137 +  if (dst == src) {
  11.138 +    decode_klass_not_null(dst);
  11.139    } else {
  11.140 -    assert (Universe::narrow_klass_base() == NULL, "sanity");
  11.141 -  }
  11.142 -}
  11.143 -
  11.144 -void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
  11.145 -  assert(Metaspace::is_initialized(), "metaspace should be initialized");
  11.146 -  // Note: it will change flags
  11.147 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
  11.148 -  // Cannot assert, unverified entry point counts instructions (see .ad file)
  11.149 -  // vtableStubs also counts instructions in pd_code_size_limit.
  11.150 -  // Also do not verify_oop as this is called by verify_oop.
  11.151 -  if (Universe::narrow_klass_shift() != 0) {
  11.152 -    assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
  11.153 -    assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
  11.154 -    leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
  11.155 -  } else {
  11.156 -    assert (Universe::narrow_klass_base() == NULL, "sanity");
  11.157 -    if (dst != src) {
  11.158 -      movq(dst, src);
  11.159 +    // Cannot assert, unverified entry point counts instructions (see .ad file)
  11.160 +    // vtableStubs also counts instructions in pd_code_size_limit.
  11.161 +    // Also do not verify_oop as this is called by verify_oop.
  11.162 +
  11.163 +    mov64(dst, (int64_t)Universe::narrow_klass_base());
  11.164 +    if (Universe::narrow_klass_shift() != 0) {
  11.165 +      assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
  11.166 +      assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
  11.167 +      leaq(dst, Address(dst, src, Address::times_8, 0));
  11.168 +    } else {
  11.169 +      addq(dst, src);
  11.170      }
  11.171    }
  11.172  }
  11.173 @@ -5148,7 +5145,7 @@
  11.174    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
  11.175    int klass_index = oop_recorder()->find_index(k);
  11.176    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
  11.177 -  mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
  11.178 +  mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
  11.179  }
  11.180  
  11.181  void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
  11.182 @@ -5156,7 +5153,7 @@
  11.183    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
  11.184    int klass_index = oop_recorder()->find_index(k);
  11.185    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
  11.186 -  mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
  11.187 +  mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
  11.188  }
  11.189  
  11.190  void  MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
  11.191 @@ -5182,7 +5179,7 @@
  11.192    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
  11.193    int klass_index = oop_recorder()->find_index(k);
  11.194    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
  11.195 -  Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
  11.196 +  Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
  11.197  }
  11.198  
  11.199  void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
  11.200 @@ -5190,14 +5187,23 @@
  11.201    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
  11.202    int klass_index = oop_recorder()->find_index(k);
  11.203    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
  11.204 -  Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
  11.205 +  Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
  11.206  }
  11.207  
  11.208  void MacroAssembler::reinit_heapbase() {
  11.209    if (UseCompressedOops || UseCompressedKlassPointers) {
  11.210 -    movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
  11.211 -  }
  11.212 -}
  11.213 +    if (Universe::heap() != NULL) {
  11.214 +      if (Universe::narrow_oop_base() == NULL) {
  11.215 +        MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
  11.216 +      } else {
  11.217 +        mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base());
  11.218 +      }
  11.219 +    } else {
  11.220 +      movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
  11.221 +    }
  11.222 +  }
  11.223 +}
  11.224 +
  11.225  #endif // _LP64
  11.226  
  11.227  
    12.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Aug 23 22:12:18 2013 +0100
    12.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Aug 30 09:50:49 2013 +0100
    12.3 @@ -371,6 +371,10 @@
    12.4    void cmp_narrow_klass(Register dst, Klass* k);
    12.5    void cmp_narrow_klass(Address dst, Klass* k);
    12.6  
    12.7 +  // Returns the byte size of the instructions generated by decode_klass_not_null()
    12.8 +  // when compressed klass pointers are being used.
    12.9 +  static int instr_size_for_decode_klass_not_null();
   12.10 +
   12.11    // if heap base register is used - reinit it with the correct value
   12.12    void reinit_heapbase();
   12.13  
    13.1 --- a/src/cpu/x86/vm/relocInfo_x86.cpp	Fri Aug 23 22:12:18 2013 +0100
    13.2 +++ b/src/cpu/x86/vm/relocInfo_x86.cpp	Fri Aug 30 09:50:49 2013 +0100
    13.3 @@ -1,5 +1,5 @@
    13.4  /*
    13.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    13.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    13.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.8   *
    13.9   * This code is free software; you can redistribute it and/or modify it
   13.10 @@ -55,9 +55,9 @@
   13.11      }
   13.12    } else {
   13.13        if (verify_only) {
   13.14 -        assert(*(uint32_t*) disp == oopDesc::encode_klass((Klass*)x), "instructions must match");
   13.15 +        assert(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match");
   13.16        } else {
   13.17 -        *(int32_t*) disp = oopDesc::encode_klass((Klass*)x);
   13.18 +        *(int32_t*) disp = Klass::encode_klass((Klass*)x);
   13.19        }
   13.20      }
   13.21    } else {
    14.1 --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Aug 23 22:12:18 2013 +0100
    14.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Aug 30 09:50:49 2013 +0100
    14.3 @@ -675,7 +675,6 @@
    14.4      __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
    14.5      __ testptr(rax, rax);
    14.6      __ jcc(Assembler::zero, error);              // if klass is NULL it is broken
    14.7 -    // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
    14.8  
    14.9      // return if everything seems ok
   14.10      __ bind(exit);
    15.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Aug 23 22:12:18 2013 +0100
    15.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Aug 30 09:50:49 2013 +0100
    15.3 @@ -1021,7 +1021,6 @@
    15.4      __ load_klass(rax, rax);  // get klass
    15.5      __ testptr(rax, rax);
    15.6      __ jcc(Assembler::zero, error); // if klass is NULL it is broken
    15.7 -    // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
    15.8  
    15.9      // return if everything seems ok
   15.10      __ bind(exit);
    16.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Aug 23 22:12:18 2013 +0100
    16.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Aug 30 09:50:49 2013 +0100
    16.3 @@ -849,9 +849,9 @@
    16.4      address entry = __ pc();
    16.5  
    16.6      // rbx,: Method*
    16.7 -    // rsi: senderSP must preserved for slow path, set SP to it on fast path
    16.8 -    // rdx: scratch
    16.9 -    // rdi: scratch
   16.10 +    // r13: senderSP must preserved for slow path, set SP to it on fast path
   16.11 +    // c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
   16.12 +    // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
   16.13  
   16.14      Label slow_path;
   16.15      // If we need a safepoint check, generate full interpreter entry.
   16.16 @@ -865,8 +865,8 @@
   16.17  
   16.18      // Load parameters
   16.19      const Register crc = rax;  // crc
   16.20 -    const Register val = rdx;  // source java byte value
   16.21 -    const Register tbl = rdi;  // scratch
   16.22 +    const Register val = c_rarg0;  // source java byte value
   16.23 +    const Register tbl = c_rarg1;  // scratch
   16.24  
   16.25      // Arguments are reversed on java expression stack
   16.26      __ movl(val, Address(rsp,   wordSize)); // byte value
   16.27 @@ -880,7 +880,7 @@
   16.28  
   16.29      // _areturn
   16.30      __ pop(rdi);                // get return address
   16.31 -    __ mov(rsp, rsi);           // set sp to sender sp
   16.32 +    __ mov(rsp, r13);           // set sp to sender sp
   16.33      __ jmp(rdi);
   16.34  
   16.35      // generate a vanilla native entry as the slow path
   16.36 @@ -919,20 +919,24 @@
   16.37      const Register crc = c_rarg0;  // crc
   16.38      const Register buf = c_rarg1;  // source java byte array address
   16.39      const Register len = c_rarg2;  // length
   16.40 +    const Register off = len;      // offset (never overlaps with 'len')
   16.41  
   16.42      // Arguments are reversed on java expression stack
   16.43 -    __ movl(len,   Address(rsp,   wordSize)); // Length
   16.44      // Calculate address of start element
   16.45      if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
   16.46        __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
   16.47 -      __ addptr(buf, Address(rsp, 2*wordSize)); // + offset
   16.48 +      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
   16.49 +      __ addq(buf, off); // + offset
   16.50        __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
   16.51      } else {
   16.52        __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
   16.53        __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
   16.54 -      __ addptr(buf, Address(rsp, 2*wordSize)); // + offset
   16.55 +      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
   16.56 +      __ addq(buf, off); // + offset
   16.57        __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
   16.58      }
   16.59 +    // Can now load 'len' since we're finished with 'off'
   16.60 +    __ movl(len, Address(rsp, wordSize)); // Length
   16.61  
   16.62      __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
   16.63      // result in rax
    17.1 --- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Fri Aug 23 22:12:18 2013 +0100
    17.2 +++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Fri Aug 30 09:50:49 2013 +0100
    17.3 @@ -211,11 +211,11 @@
    17.4    if (is_vtable_stub) {
    17.5      // Vtable stub size
    17.6      return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
    17.7 -           (UseCompressedKlassPointers ? 16 : 0);  // 1 leaq can be 3 bytes + 1 long
    17.8 +           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
    17.9    } else {
   17.10      // Itable stub size
   17.11      return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
   17.12 -           (UseCompressedKlassPointers ? 32 : 0);  // 2 leaqs
   17.13 +           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   17.14    }
   17.15    // In order to tune these parameters, run the JVM with VM options
   17.16    // +PrintMiscellaneous and +WizardMode to see information about
    18.1 --- a/src/cpu/x86/vm/x86_64.ad	Fri Aug 23 22:12:18 2013 +0100
    18.2 +++ b/src/cpu/x86/vm/x86_64.ad	Fri Aug 30 09:50:49 2013 +0100
    18.3 @@ -1393,9 +1393,7 @@
    18.4  {
    18.5    if (UseCompressedKlassPointers) {
    18.6      st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
    18.7 -    if (Universe::narrow_klass_shift() != 0) {
    18.8 -      st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
    18.9 -    }
   18.10 +    st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
   18.11      st->print_cr("\tcmpq    rax, rscratch1\t # Inline cache check");
   18.12    } else {
   18.13      st->print_cr("\tcmpq    rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
   18.14 @@ -4035,146 +4033,6 @@
   18.15    %}
   18.16  %}
   18.17  
   18.18 -operand indirectNarrowKlass(rRegN reg)
   18.19 -%{
   18.20 -  predicate(Universe::narrow_klass_shift() == 0);
   18.21 -  constraint(ALLOC_IN_RC(ptr_reg));
   18.22 -  match(DecodeNKlass reg);
   18.23 -
   18.24 -  format %{ "[$reg]" %}
   18.25 -  interface(MEMORY_INTER) %{
   18.26 -    base($reg);
   18.27 -    index(0x4);
   18.28 -    scale(0x0);
   18.29 -    disp(0x0);
   18.30 -  %}
   18.31 -%}
   18.32 -
   18.33 -operand indOffset8NarrowKlass(rRegN reg, immL8 off)
   18.34 -%{
   18.35 -  predicate(Universe::narrow_klass_shift() == 0);
   18.36 -  constraint(ALLOC_IN_RC(ptr_reg));
   18.37 -  match(AddP (DecodeNKlass reg) off);
   18.38 -
   18.39 -  format %{ "[$reg + $off (8-bit)]" %}
   18.40 -  interface(MEMORY_INTER) %{
   18.41 -    base($reg);
   18.42 -    index(0x4);
   18.43 -    scale(0x0);
   18.44 -    disp($off);
   18.45 -  %}
   18.46 -%}
   18.47 -
   18.48 -operand indOffset32NarrowKlass(rRegN reg, immL32 off)
   18.49 -%{
   18.50 -  predicate(Universe::narrow_klass_shift() == 0);
   18.51 -  constraint(ALLOC_IN_RC(ptr_reg));
   18.52 -  match(AddP (DecodeNKlass reg) off);
   18.53 -
   18.54 -  format %{ "[$reg + $off (32-bit)]" %}
   18.55 -  interface(MEMORY_INTER) %{
   18.56 -    base($reg);
   18.57 -    index(0x4);
   18.58 -    scale(0x0);
   18.59 -    disp($off);
   18.60 -  %}
   18.61 -%}
   18.62 -
   18.63 -operand indIndexOffsetNarrowKlass(rRegN reg, rRegL lreg, immL32 off)
   18.64 -%{
   18.65 -  predicate(Universe::narrow_klass_shift() == 0);
   18.66 -  constraint(ALLOC_IN_RC(ptr_reg));
   18.67 -  match(AddP (AddP (DecodeNKlass reg) lreg) off);
   18.68 -
   18.69 -  op_cost(10);
   18.70 -  format %{"[$reg + $off + $lreg]" %}
   18.71 -  interface(MEMORY_INTER) %{
   18.72 -    base($reg);
   18.73 -    index($lreg);
   18.74 -    scale(0x0);
   18.75 -    disp($off);
   18.76 -  %}
   18.77 -%}
   18.78 -
   18.79 -operand indIndexNarrowKlass(rRegN reg, rRegL lreg)
   18.80 -%{
   18.81 -  predicate(Universe::narrow_klass_shift() == 0);
   18.82 -  constraint(ALLOC_IN_RC(ptr_reg));
   18.83 -  match(AddP (DecodeNKlass reg) lreg);
   18.84 -
   18.85 -  op_cost(10);
   18.86 -  format %{"[$reg + $lreg]" %}
   18.87 -  interface(MEMORY_INTER) %{
   18.88 -    base($reg);
   18.89 -    index($lreg);
   18.90 -    scale(0x0);
   18.91 -    disp(0x0);
   18.92 -  %}
   18.93 -%}
   18.94 -
   18.95 -operand indIndexScaleNarrowKlass(rRegN reg, rRegL lreg, immI2 scale)
   18.96 -%{
   18.97 -  predicate(Universe::narrow_klass_shift() == 0);
   18.98 -  constraint(ALLOC_IN_RC(ptr_reg));
   18.99 -  match(AddP (DecodeNKlass reg) (LShiftL lreg scale));
  18.100 -
  18.101 -  op_cost(10);
  18.102 -  format %{"[$reg + $lreg << $scale]" %}
  18.103 -  interface(MEMORY_INTER) %{
  18.104 -    base($reg);
  18.105 -    index($lreg);
  18.106 -    scale($scale);
  18.107 -    disp(0x0);
  18.108 -  %}
  18.109 -%}
  18.110 -
  18.111 -operand indIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegL lreg, immI2 scale)
  18.112 -%{
  18.113 -  predicate(Universe::narrow_klass_shift() == 0);
  18.114 -  constraint(ALLOC_IN_RC(ptr_reg));
  18.115 -  match(AddP (AddP (DecodeNKlass reg) (LShiftL lreg scale)) off);
  18.116 -
  18.117 -  op_cost(10);
  18.118 -  format %{"[$reg + $off + $lreg << $scale]" %}
  18.119 -  interface(MEMORY_INTER) %{
  18.120 -    base($reg);
  18.121 -    index($lreg);
  18.122 -    scale($scale);
  18.123 -    disp($off);
  18.124 -  %}
  18.125 -%}
  18.126 -
  18.127 -operand indCompressedKlassOffset(rRegN reg, immL32 off) %{
  18.128 -  predicate(UseCompressedKlassPointers && (Universe::narrow_klass_shift() == Address::times_8));
  18.129 -  constraint(ALLOC_IN_RC(ptr_reg));
  18.130 -  match(AddP (DecodeNKlass reg) off);
  18.131 -
  18.132 -  op_cost(10);
  18.133 -  format %{"[R12 + $reg << 3 + $off] (compressed klass addressing)" %}
  18.134 -  interface(MEMORY_INTER) %{
  18.135 -    base(0xc); // R12
  18.136 -    index($reg);
  18.137 -    scale(0x3);
  18.138 -    disp($off);
  18.139 -  %}
  18.140 -%}
  18.141 -
  18.142 -operand indPosIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegI idx, immI2 scale)
  18.143 -%{
  18.144 -  constraint(ALLOC_IN_RC(ptr_reg));
  18.145 -  predicate(Universe::narrow_klass_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
  18.146 -  match(AddP (AddP (DecodeNKlass reg) (LShiftL (ConvI2L idx) scale)) off);
  18.147 -
  18.148 -  op_cost(10);
  18.149 -  format %{"[$reg + $off + $idx << $scale]" %}
  18.150 -  interface(MEMORY_INTER) %{
  18.151 -    base($reg);
  18.152 -    index($idx);
  18.153 -    scale($scale);
  18.154 -    disp($off);
  18.155 -  %}
  18.156 -%}
  18.157 -
  18.158  //----------Special Memory Operands--------------------------------------------
  18.159  // Stack Slot Operand - This operand is used for loading and storing temporary
  18.160  //                      values on the stack where a match requires a value to
  18.161 @@ -4345,11 +4203,7 @@
  18.162                 indCompressedOopOffset,
  18.163                 indirectNarrow, indOffset8Narrow, indOffset32Narrow,
  18.164                 indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
  18.165 -               indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow,
  18.166 -               indCompressedKlassOffset,
  18.167 -               indirectNarrowKlass, indOffset8NarrowKlass, indOffset32NarrowKlass,
  18.168 -               indIndexOffsetNarrowKlass, indIndexNarrowKlass, indIndexScaleNarrowKlass,
  18.169 -               indIndexScaleOffsetNarrowKlass, indPosIndexScaleOffsetNarrowKlass);
  18.170 +               indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
  18.171  
  18.172  //----------PIPELINE-----------------------------------------------------------
  18.173  // Rules which define the behavior of the target architectures pipeline.
  18.174 @@ -6665,7 +6519,7 @@
  18.175  instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
  18.176    match(Set dst (EncodePKlass src));
  18.177    effect(KILL cr);
  18.178 -  format %{ "encode_heap_oop_not_null $dst,$src" %}
  18.179 +  format %{ "encode_klass_not_null $dst,$src" %}
  18.180    ins_encode %{
  18.181      __ encode_klass_not_null($dst$$Register, $src$$Register);
  18.182    %}
  18.183 @@ -6675,7 +6529,7 @@
  18.184  instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
  18.185    match(Set dst (DecodeNKlass src));
  18.186    effect(KILL cr);
  18.187 -  format %{ "decode_heap_oop_not_null $dst,$src" %}
  18.188 +  format %{ "decode_klass_not_null $dst,$src" %}
  18.189    ins_encode %{
  18.190      Register s = $src$$Register;
  18.191      Register d = $dst$$Register;
    19.1 --- a/src/cpu/zero/vm/assembler_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    19.2 +++ b/src/cpu/zero/vm/assembler_zero.cpp	Fri Aug 30 09:50:49 2013 +0100
    19.3 @@ -50,6 +50,7 @@
    19.4  #ifdef ASSERT
    19.5  bool AbstractAssembler::pd_check_instruction_mark() {
    19.6    ShouldNotCallThis();
    19.7 +  return false;
    19.8  }
    19.9  #endif
   19.10  
   19.11 @@ -73,6 +74,7 @@
   19.12  RegisterOrConstant MacroAssembler::delayed_value_impl(
   19.13    intptr_t* delayed_value_addr, Register tmpl, int offset) {
   19.14    ShouldNotCallThis();
   19.15 +  return RegisterOrConstant();
   19.16  }
   19.17  
   19.18  void MacroAssembler::store_oop(jobject obj) {
    20.1 --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    20.2 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Aug 30 09:50:49 2013 +0100
    20.3 @@ -1008,6 +1008,7 @@
    20.4  
    20.5  address CppInterpreter::return_entry(TosState state, int length) {
    20.6    ShouldNotCallThis();
    20.7 +  return NULL;
    20.8  }
    20.9  
   20.10  address CppInterpreter::deopt_entry(TosState state, int length) {
    21.1 --- a/src/cpu/zero/vm/frame_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    21.2 +++ b/src/cpu/zero/vm/frame_zero.cpp	Fri Aug 30 09:50:49 2013 +0100
    21.3 @@ -116,6 +116,7 @@
    21.4  
    21.5  bool frame::safe_for_sender(JavaThread *thread) {
    21.6    ShouldNotCallThis();
    21.7 +  return false;
    21.8  }
    21.9  
   21.10  void frame::pd_gc_epilog() {
   21.11 @@ -123,6 +124,7 @@
   21.12  
   21.13  bool frame::is_interpreted_frame_valid(JavaThread *thread) const {
   21.14    ShouldNotCallThis();
   21.15 +  return false;
   21.16  }
   21.17  
   21.18  BasicType frame::interpreter_frame_result(oop* oop_result,
   21.19 @@ -184,9 +186,8 @@
   21.20  int frame::frame_size(RegisterMap* map) const {
   21.21  #ifdef PRODUCT
   21.22    ShouldNotCallThis();
   21.23 -#else
   21.24 +#endif // PRODUCT
   21.25    return 0; // make javaVFrame::print_value work
   21.26 -#endif // PRODUCT
   21.27  }
   21.28  
   21.29  intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
    22.1 --- a/src/cpu/zero/vm/frame_zero.inline.hpp	Fri Aug 23 22:12:18 2013 +0100
    22.2 +++ b/src/cpu/zero/vm/frame_zero.inline.hpp	Fri Aug 30 09:50:49 2013 +0100
    22.3 @@ -36,7 +36,7 @@
    22.4    _deopt_state = unknown;
    22.5  }
    22.6  
    22.7 -inline address  frame::sender_pc()           const { ShouldNotCallThis();  }
    22.8 +inline address  frame::sender_pc()           const { ShouldNotCallThis(); return NULL; }
    22.9  
   22.10  inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
   22.11    _zeroframe = zf;
   22.12 @@ -89,6 +89,7 @@
   22.13  
   22.14  inline intptr_t* frame::link() const {
   22.15    ShouldNotCallThis();
   22.16 +  return NULL;
   22.17  }
   22.18  
   22.19  #ifdef CC_INTERP
   22.20 @@ -151,14 +152,17 @@
   22.21  
   22.22  inline oop frame::saved_oop_result(RegisterMap* map) const {
   22.23    ShouldNotCallThis();
   22.24 +  return NULL;
   22.25  }
   22.26  
   22.27  inline bool frame::is_older(intptr_t* id) const {
   22.28    ShouldNotCallThis();
   22.29 +  return false;
   22.30  }
   22.31  
   22.32  inline intptr_t* frame::entry_frame_argument_at(int offset) const {
   22.33    ShouldNotCallThis();
   22.34 +  return NULL;
   22.35  }
   22.36  
   22.37  inline intptr_t* frame::unextended_sp() const {
    23.1 --- a/src/cpu/zero/vm/icBuffer_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    23.2 +++ b/src/cpu/zero/vm/icBuffer_zero.cpp	Fri Aug 30 09:50:49 2013 +0100
    23.3 @@ -49,8 +49,10 @@
    23.4  address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
    23.5    // NB ic_stub_code_size() must return the size of the code we generate
    23.6    ShouldNotCallThis();
    23.7 +  return NULL;
    23.8  }
    23.9  
   23.10  void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
   23.11    ShouldNotCallThis();
   23.12 +  return NULL;
   23.13  }
    24.1 --- a/src/cpu/zero/vm/interp_masm_zero.hpp	Fri Aug 23 22:12:18 2013 +0100
    24.2 +++ b/src/cpu/zero/vm/interp_masm_zero.hpp	Fri Aug 30 09:50:49 2013 +0100
    24.3 @@ -40,6 +40,7 @@
    24.4                                          Register  tmp,
    24.5                                          int       offset) {
    24.6      ShouldNotCallThis();
    24.7 +    return RegisterOrConstant();
    24.8    }
    24.9  };
   24.10  
    25.1 --- a/src/cpu/zero/vm/interpreter_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    25.2 +++ b/src/cpu/zero/vm/interpreter_zero.cpp	Fri Aug 30 09:50:49 2013 +0100
    25.3 @@ -64,6 +64,7 @@
    25.4      return NULL;
    25.5  
    25.6    Unimplemented();
    25.7 +  return NULL;
    25.8  }
    25.9  
   25.10  address InterpreterGenerator::generate_abstract_entry() {
    26.1 --- a/src/cpu/zero/vm/nativeInst_zero.hpp	Fri Aug 23 22:12:18 2013 +0100
    26.2 +++ b/src/cpu/zero/vm/nativeInst_zero.hpp	Fri Aug 30 09:50:49 2013 +0100
    26.3 @@ -51,15 +51,18 @@
    26.4   public:
    26.5    bool is_jump() {
    26.6      ShouldNotCallThis();
    26.7 +    return false;
    26.8    }
    26.9  
   26.10    bool is_safepoint_poll() {
   26.11      ShouldNotCallThis();
   26.12 +    return false;
   26.13    }
   26.14  };
   26.15  
   26.16  inline NativeInstruction* nativeInstruction_at(address address) {
   26.17    ShouldNotCallThis();
   26.18 +  return NULL;
   26.19  }
   26.20  
   26.21  class NativeCall : public NativeInstruction {
   26.22 @@ -70,18 +73,22 @@
   26.23  
   26.24    address instruction_address() const {
   26.25      ShouldNotCallThis();
   26.26 +    return NULL;
   26.27    }
   26.28  
   26.29    address next_instruction_address() const {
   26.30      ShouldNotCallThis();
   26.31 +    return NULL;
   26.32    }
   26.33  
   26.34    address return_address() const {
   26.35      ShouldNotCallThis();
   26.36 +    return NULL;
   26.37    }
   26.38  
   26.39    address destination() const {
   26.40      ShouldNotCallThis();
   26.41 +    return NULL;
   26.42    }
   26.43  
   26.44    void set_destination_mt_safe(address dest) {
   26.45 @@ -98,25 +105,30 @@
   26.46  
   26.47    static bool is_call_before(address return_address) {
   26.48      ShouldNotCallThis();
   26.49 +    return false;
   26.50    }
   26.51  };
   26.52  
   26.53  inline NativeCall* nativeCall_before(address return_address) {
   26.54    ShouldNotCallThis();
   26.55 +  return NULL;
   26.56  }
   26.57  
   26.58  inline NativeCall* nativeCall_at(address address) {
   26.59    ShouldNotCallThis();
   26.60 +  return NULL;
   26.61  }
   26.62  
   26.63  class NativeMovConstReg : public NativeInstruction {
   26.64   public:
   26.65    address next_instruction_address() const {
   26.66      ShouldNotCallThis();
   26.67 +    return NULL;
   26.68    }
   26.69  
   26.70    intptr_t data() const {
   26.71      ShouldNotCallThis();
   26.72 +    return 0;
   26.73    }
   26.74  
   26.75    void set_data(intptr_t x) {
   26.76 @@ -126,12 +138,14 @@
   26.77  
   26.78  inline NativeMovConstReg* nativeMovConstReg_at(address address) {
   26.79    ShouldNotCallThis();
   26.80 +  return NULL;
   26.81  }
   26.82  
   26.83  class NativeMovRegMem : public NativeInstruction {
   26.84   public:
   26.85    int offset() const {
   26.86      ShouldNotCallThis();
   26.87 +    return 0;
   26.88    }
   26.89  
   26.90    void set_offset(intptr_t x) {
   26.91 @@ -145,6 +159,7 @@
   26.92  
   26.93  inline NativeMovRegMem* nativeMovRegMem_at(address address) {
   26.94    ShouldNotCallThis();
   26.95 +  return NULL;
   26.96  }
   26.97  
   26.98  class NativeJump : public NativeInstruction {
   26.99 @@ -155,6 +170,7 @@
  26.100  
  26.101    address jump_destination() const {
  26.102      ShouldNotCallThis();
  26.103 +    return NULL;
  26.104    }
  26.105  
  26.106    void set_jump_destination(address dest) {
  26.107 @@ -172,12 +188,14 @@
  26.108  
  26.109  inline NativeJump* nativeJump_at(address address) {
  26.110    ShouldNotCallThis();
  26.111 +  return NULL;
  26.112  }
  26.113  
  26.114  class NativeGeneralJump : public NativeInstruction {
  26.115   public:
  26.116    address jump_destination() const {
  26.117      ShouldNotCallThis();
  26.118 +    return NULL;
  26.119    }
  26.120  
  26.121    static void insert_unconditional(address code_pos, address entry) {
  26.122 @@ -191,6 +209,7 @@
  26.123  
  26.124  inline NativeGeneralJump* nativeGeneralJump_at(address address) {
  26.125    ShouldNotCallThis();
  26.126 +  return NULL;
  26.127  }
  26.128  
  26.129  #endif // CPU_ZERO_VM_NATIVEINST_ZERO_HPP
    27.1 --- a/src/cpu/zero/vm/register_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    27.2 +++ b/src/cpu/zero/vm/register_zero.cpp	Fri Aug 30 09:50:49 2013 +0100
    27.3 @@ -32,8 +32,10 @@
    27.4  
    27.5  const char* RegisterImpl::name() const {
    27.6    ShouldNotCallThis();
    27.7 +  return NULL;
    27.8  }
    27.9  
   27.10  const char* FloatRegisterImpl::name() const {
   27.11    ShouldNotCallThis();
   27.12 +  return NULL;
   27.13  }
    28.1 --- a/src/cpu/zero/vm/relocInfo_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    28.2 +++ b/src/cpu/zero/vm/relocInfo_zero.cpp	Fri Aug 30 09:50:49 2013 +0100
    28.3 @@ -37,6 +37,7 @@
    28.4  
    28.5  address Relocation::pd_call_destination(address orig_addr) {
    28.6    ShouldNotCallThis();
    28.7 +  return NULL;
    28.8  }
    28.9  
   28.10  void Relocation::pd_set_call_destination(address x) {
   28.11 @@ -45,6 +46,7 @@
   28.12  
   28.13  address Relocation::pd_get_address_from_code() {
   28.14    ShouldNotCallThis();
   28.15 +  return NULL;
   28.16  }
   28.17  
   28.18  address* Relocation::pd_address_in_code() {
    29.1 --- a/src/cpu/zero/vm/sharedRuntime_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    29.2 +++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp	Fri Aug 30 09:50:49 2013 +0100
    29.3 @@ -89,6 +89,7 @@
    29.4                                                              ret_type);
    29.5  #else
    29.6    ShouldNotCallThis();
    29.7 +  return NULL;
    29.8  #endif // SHARK
    29.9  }
   29.10  
   29.11 @@ -99,6 +100,7 @@
   29.12  
   29.13  uint SharedRuntime::out_preserve_stack_slots() {
   29.14    ShouldNotCallThis();
   29.15 +  return 0;
   29.16  }
   29.17  
   29.18  JRT_LEAF(void, zero_stub())
   29.19 @@ -135,4 +137,5 @@
   29.20                                           VMRegPair *regs,
   29.21                                           int total_args_passed) {
   29.22    ShouldNotCallThis();
   29.23 +  return 0;
   29.24  }
    30.1 --- a/src/cpu/zero/vm/vtableStubs_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    30.2 +++ b/src/cpu/zero/vm/vtableStubs_zero.cpp	Fri Aug 30 09:50:49 2013 +0100
    30.3 @@ -39,16 +39,20 @@
    30.4  
    30.5  VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
    30.6    ShouldNotCallThis();
    30.7 +  return NULL;
    30.8  }
    30.9  
   30.10  VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
   30.11    ShouldNotCallThis();
   30.12 +  return NULL;
   30.13  }
   30.14  
   30.15  int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
   30.16    ShouldNotCallThis();
   30.17 +  return 0;
   30.18  }
   30.19  
   30.20  int VtableStub::pd_code_alignment() {
   30.21    ShouldNotCallThis();
   30.22 +  return 0;
   30.23  }
    31.1 --- a/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Fri Aug 23 22:12:18 2013 +0100
    31.2 +++ b/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Fri Aug 30 09:50:49 2013 +0100
    31.3 @@ -190,7 +190,7 @@
    31.4  inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
    31.5  
    31.6  inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
    31.7 -inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jdouble_cast(v)); }
    31.8 +inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
    31.9  
   31.10  inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
   31.11  #ifdef AMD64
    32.1 --- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Fri Aug 23 22:12:18 2013 +0100
    32.2 +++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Fri Aug 30 09:50:49 2013 +0100
    32.3 @@ -715,6 +715,7 @@
    32.4    err.report_and_die();
    32.5  
    32.6    ShouldNotReachHere();
    32.7 +  return false;
    32.8  }
    32.9  
   32.10  // From solaris_i486.s ported to bsd_i486.s
    33.1 --- a/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    33.2 +++ b/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Fri Aug 30 09:50:49 2013 +0100
    33.3 @@ -66,6 +66,7 @@
    33.4  
    33.5  frame os::get_sender_for_C_frame(frame* fr) {
    33.6    ShouldNotCallThis();
    33.7 +  return frame();
    33.8  }
    33.9  
   33.10  frame os::current_frame() {
   33.11 @@ -103,16 +104,19 @@
   33.12  
   33.13  address os::Bsd::ucontext_get_pc(ucontext_t* uc) {
   33.14    ShouldNotCallThis();
   33.15 +  return NULL;
   33.16  }
   33.17  
   33.18  ExtendedPC os::fetch_frame_from_context(void* ucVoid,
   33.19                                          intptr_t** ret_sp,
   33.20                                          intptr_t** ret_fp) {
   33.21    ShouldNotCallThis();
   33.22 +  return ExtendedPC();
   33.23  }
   33.24  
   33.25  frame os::fetch_frame_from_context(void* ucVoid) {
   33.26    ShouldNotCallThis();
   33.27 +  return frame();
   33.28  }
   33.29  
   33.30  extern "C" JNIEXPORT int
   33.31 @@ -240,6 +244,7 @@
   33.32  
   33.33    sprintf(buf, fmt, sig, info->si_addr);
   33.34    fatal(buf);
   33.35 +  return false;
   33.36  }
   33.37  
   33.38  void os::Bsd::init_thread_fpu_state(void) {
   33.39 @@ -373,17 +378,7 @@
   33.40  
   33.41  extern "C" {
   33.42    int SpinPause() {
   33.43 -  }
   33.44 -
   33.45 -  int SafeFetch32(int *adr, int errValue) {
   33.46 -    int value = errValue;
   33.47 -    value = *adr;
   33.48 -    return value;
   33.49 -  }
   33.50 -  intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
   33.51 -    intptr_t value = errValue;
   33.52 -    value = *adr;
   33.53 -    return value;
   33.54 +    return 1;
   33.55    }
   33.56  
   33.57    void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
    34.1 --- a/src/os_cpu/bsd_zero/vm/thread_bsd_zero.hpp	Fri Aug 23 22:12:18 2013 +0100
    34.2 +++ b/src/os_cpu/bsd_zero/vm/thread_bsd_zero.hpp	Fri Aug 30 09:50:49 2013 +0100
    34.3 @@ -110,6 +110,7 @@
    34.4                                             void* ucontext,
    34.5                                             bool isInJava) {
    34.6      ShouldNotCallThis();
    34.7 +    return false;
    34.8    }
    34.9  
   34.10    // These routines are only used on cpu architectures that
    35.1 --- a/src/share/vm/c1/c1_Runtime1.cpp	Fri Aug 23 22:12:18 2013 +0100
    35.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp	Fri Aug 30 09:50:49 2013 +0100
    35.3 @@ -915,16 +915,6 @@
    35.4      // Return to the now deoptimized frame.
    35.5    }
    35.6  
    35.7 -  // If we are patching in a non-perm oop, make sure the nmethod
    35.8 -  // is on the right list.
    35.9 -  if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
   35.10 -    MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
   35.11 -    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
   35.12 -    guarantee(nm != NULL, "only nmethods can contain non-perm oops");
   35.13 -    if (!nm->on_scavenge_root_list())
   35.14 -      CodeCache::add_scavenge_root_nmethod(nm);
   35.15 -  }
   35.16 -
   35.17    // Now copy code back
   35.18  
   35.19    {
   35.20 @@ -1125,6 +1115,21 @@
   35.21        }
   35.22      }
   35.23    }
   35.24 +
   35.25 +  // If we are patching in a non-perm oop, make sure the nmethod
   35.26 +  // is on the right list.
   35.27 +  if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
   35.28 +    MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
   35.29 +    nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
   35.30 +    guarantee(nm != NULL, "only nmethods can contain non-perm oops");
   35.31 +    if (!nm->on_scavenge_root_list()) {
   35.32 +      CodeCache::add_scavenge_root_nmethod(nm);
   35.33 +    }
   35.34 +
   35.35 +    // Since we've patched some oops in the nmethod,
   35.36 +    // (re)register it with the heap.
   35.37 +    Universe::heap()->register_nmethod(nm);
   35.38 +  }
   35.39  JRT_END
   35.40  
   35.41  //
    36.1 --- a/src/share/vm/classfile/classFileParser.cpp	Fri Aug 23 22:12:18 2013 +0100
    36.2 +++ b/src/share/vm/classfile/classFileParser.cpp	Fri Aug 30 09:50:49 2013 +0100
    36.3 @@ -2590,7 +2590,7 @@
    36.4      valid_symbol_at(sourcefile_index),
    36.5      "Invalid SourceFile attribute at constant pool index %u in class file %s",
    36.6      sourcefile_index, CHECK);
    36.7 -  set_class_sourcefile(_cp->symbol_at(sourcefile_index));
    36.8 +  set_class_sourcefile_index(sourcefile_index);
    36.9  }
   36.10  
   36.11  
   36.12 @@ -2728,7 +2728,7 @@
   36.13      valid_symbol_at(signature_index),
   36.14      "Invalid constant pool index %u in Signature attribute in class file %s",
   36.15      signature_index, CHECK);
   36.16 -  set_class_generic_signature(_cp->symbol_at(signature_index));
   36.17 +  set_class_generic_signature_index(signature_index);
   36.18  }
   36.19  
   36.20  void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_byte_length, TRAPS) {
   36.21 @@ -2975,13 +2975,11 @@
   36.22  void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) {
   36.23    if (_synthetic_flag)
   36.24      k->set_is_synthetic();
   36.25 -  if (_sourcefile != NULL) {
   36.26 -    _sourcefile->increment_refcount();
   36.27 -    k->set_source_file_name(_sourcefile);
   36.28 +  if (_sourcefile_index != 0) {
   36.29 +    k->set_source_file_name_index(_sourcefile_index);
   36.30    }
   36.31 -  if (_generic_signature != NULL) {
   36.32 -    _generic_signature->increment_refcount();
   36.33 -    k->set_generic_signature(_generic_signature);
   36.34 +  if (_generic_signature_index != 0) {
   36.35 +    k->set_generic_signature_index(_generic_signature_index);
   36.36    }
   36.37    if (_sde_buffer != NULL) {
   36.38      k->set_source_debug_extension(_sde_buffer, _sde_length);
    37.1 --- a/src/share/vm/classfile/classFileParser.hpp	Fri Aug 23 22:12:18 2013 +0100
    37.2 +++ b/src/share/vm/classfile/classFileParser.hpp	Fri Aug 30 09:50:49 2013 +0100
    37.3 @@ -62,8 +62,8 @@
    37.4    bool       _synthetic_flag;
    37.5    int        _sde_length;
    37.6    char*      _sde_buffer;
    37.7 -  Symbol*    _sourcefile;
    37.8 -  Symbol*    _generic_signature;
    37.9 +  u2         _sourcefile_index;
   37.10 +  u2         _generic_signature_index;
   37.11  
   37.12    // Metadata created before the instance klass is created.  Must be deallocated
   37.13    // if not transferred to the InstanceKlass upon successful class loading
   37.14 @@ -81,16 +81,16 @@
   37.15    Array<AnnotationArray*>* _fields_type_annotations;
   37.16    InstanceKlass*   _klass;  // InstanceKlass once created.
   37.17  
   37.18 -  void set_class_synthetic_flag(bool x)           { _synthetic_flag = x; }
   37.19 -  void set_class_sourcefile(Symbol* x)            { _sourcefile = x; }
   37.20 -  void set_class_generic_signature(Symbol* x)     { _generic_signature = x; }
   37.21 -  void set_class_sde_buffer(char* x, int len)     { _sde_buffer = x; _sde_length = len; }
   37.22 +  void set_class_synthetic_flag(bool x)        { _synthetic_flag = x; }
   37.23 +  void set_class_sourcefile_index(u2 x)        { _sourcefile_index = x; }
   37.24 +  void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
   37.25 +  void set_class_sde_buffer(char* x, int len)  { _sde_buffer = x; _sde_length = len; }
   37.26  
   37.27    void init_parsed_class_attributes(ClassLoaderData* loader_data) {
   37.28      _loader_data = loader_data;
   37.29      _synthetic_flag = false;
   37.30 -    _sourcefile = NULL;
   37.31 -    _generic_signature = NULL;
   37.32 +    _sourcefile_index = 0;
   37.33 +    _generic_signature_index = 0;
   37.34      _sde_buffer = NULL;
   37.35      _sde_length = 0;
   37.36      // initialize the other flags too:
    38.1 --- a/src/share/vm/code/nmethod.cpp	Fri Aug 23 22:12:18 2013 +0100
    38.2 +++ b/src/share/vm/code/nmethod.cpp	Fri Aug 30 09:50:49 2013 +0100
    38.3 @@ -687,6 +687,7 @@
    38.4      code_buffer->copy_values_to(this);
    38.5      if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
    38.6        CodeCache::add_scavenge_root_nmethod(this);
    38.7 +      Universe::heap()->register_nmethod(this);
    38.8      }
    38.9      debug_only(verify_scavenge_root_oops());
   38.10      CodeCache::commit(this);
   38.11 @@ -881,6 +882,7 @@
   38.12      dependencies->copy_to(this);
   38.13      if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
   38.14        CodeCache::add_scavenge_root_nmethod(this);
   38.15 +      Universe::heap()->register_nmethod(this);
   38.16      }
   38.17      debug_only(verify_scavenge_root_oops());
   38.18  
   38.19 @@ -1300,6 +1302,13 @@
   38.20    methodHandle the_method(method());
   38.21    No_Safepoint_Verifier nsv;
   38.22  
   38.23 +  // during patching, depending on the nmethod state we must notify the GC that
   38.24 +  // code has been unloaded, unregistering it. We cannot do this right while
   38.25 +  // holding the Patching_lock because we need to use the CodeCache_lock. This
   38.26 +  // would be prone to deadlocks.
   38.27 +  // This flag is used to remember whether we need to later lock and unregister.
   38.28 +  bool nmethod_needs_unregister = false;
   38.29 +
   38.30    {
   38.31      // invalidate osr nmethod before acquiring the patching lock since
   38.32      // they both acquire leaf locks and we don't want a deadlock.
   38.33 @@ -1332,6 +1341,13 @@
   38.34        inc_decompile_count();
   38.35      }
   38.36  
   38.37 +    // If the state is becoming a zombie, signal to unregister the nmethod with
   38.38 +    // the heap.
   38.39 +    // This nmethod may have already been unloaded during a full GC.
   38.40 +    if ((state == zombie) && !is_unloaded()) {
   38.41 +      nmethod_needs_unregister = true;
   38.42 +    }
   38.43 +
   38.44      // Change state
   38.45      _state = state;
   38.46  
   38.47 @@ -1367,6 +1383,9 @@
   38.48        // safepoint can sneak in, otherwise the oops used by the
   38.49        // dependency logic could have become stale.
   38.50        MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   38.51 +      if (nmethod_needs_unregister) {
   38.52 +        Universe::heap()->unregister_nmethod(this);
   38.53 +      }
   38.54        flush_dependencies(NULL);
   38.55      }
   38.56  
   38.57 @@ -1817,21 +1836,10 @@
   38.58    if (_method != NULL) f(_method);
   38.59  }
   38.60  
   38.61 -
   38.62 -// This method is called twice during GC -- once while
   38.63 -// tracing the "active" nmethods on thread stacks during
   38.64 -// the (strong) marking phase, and then again when walking
   38.65 -// the code cache contents during the weak roots processing
   38.66 -// phase. The two uses are distinguished by means of the
   38.67 -// 'do_strong_roots_only' flag, which is true in the first
   38.68 -// case. We want to walk the weak roots in the nmethod
   38.69 -// only in the second case. The weak roots in the nmethod
   38.70 -// are the oops in the ExceptionCache and the InlineCache
   38.71 -// oops.
   38.72 -void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
   38.73 +void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
   38.74    // make sure the oops ready to receive visitors
   38.75 -  assert(!is_zombie() && !is_unloaded(),
   38.76 -         "should not call follow on zombie or unloaded nmethod");
   38.77 +  assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
   38.78 +  assert(!is_unloaded(), "should not call follow on unloaded nmethod");
   38.79  
   38.80    // If the method is not entrant or zombie then a JMP is plastered over the
   38.81    // first few bytes.  If an oop in the old code was there, that oop
    39.1 --- a/src/share/vm/code/nmethod.hpp	Fri Aug 23 22:12:18 2013 +0100
    39.2 +++ b/src/share/vm/code/nmethod.hpp	Fri Aug 30 09:50:49 2013 +0100
    39.3 @@ -566,7 +566,7 @@
    39.4    void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
    39.5                                       OopClosure* f);
    39.6    void oops_do(OopClosure* f) { oops_do(f, false); }
    39.7 -  void oops_do(OopClosure* f, bool do_strong_roots_only);
    39.8 +  void oops_do(OopClosure* f, bool allow_zombie);
    39.9    bool detect_scavenge_root_oops();
   39.10    void verify_scavenge_root_oops() PRODUCT_RETURN;
   39.11  
    40.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 23 22:12:18 2013 +0100
    40.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 30 09:50:49 2013 +0100
    40.3 @@ -5478,40 +5478,42 @@
    40.4    HandleMark   hm;
    40.5  
    40.6    SequentialSubTasksDone* pst = space->par_seq_tasks();
    40.7 -  assert(pst->valid(), "Uninitialized use?");
    40.8  
    40.9    uint nth_task = 0;
   40.10    uint n_tasks  = pst->n_tasks();
   40.11  
   40.12 -  HeapWord *start, *end;
   40.13 -  while (!pst->is_task_claimed(/* reference */ nth_task)) {
   40.14 -    // We claimed task # nth_task; compute its boundaries.
   40.15 -    if (chunk_top == 0) {  // no samples were taken
   40.16 -      assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
   40.17 -      start = space->bottom();
   40.18 -      end   = space->top();
   40.19 -    } else if (nth_task == 0) {
   40.20 -      start = space->bottom();
   40.21 -      end   = chunk_array[nth_task];
   40.22 -    } else if (nth_task < (uint)chunk_top) {
   40.23 -      assert(nth_task >= 1, "Control point invariant");
   40.24 -      start = chunk_array[nth_task - 1];
   40.25 -      end   = chunk_array[nth_task];
   40.26 -    } else {
   40.27 -      assert(nth_task == (uint)chunk_top, "Control point invariant");
   40.28 -      start = chunk_array[chunk_top - 1];
   40.29 -      end   = space->top();
   40.30 -    }
   40.31 -    MemRegion mr(start, end);
   40.32 -    // Verify that mr is in space
   40.33 -    assert(mr.is_empty() || space->used_region().contains(mr),
   40.34 -           "Should be in space");
   40.35 -    // Verify that "start" is an object boundary
   40.36 -    assert(mr.is_empty() || oop(mr.start())->is_oop(),
   40.37 -           "Should be an oop");
   40.38 -    space->par_oop_iterate(mr, cl);
   40.39 -  }
   40.40 -  pst->all_tasks_completed();
   40.41 +  if (n_tasks > 0) {
   40.42 +    assert(pst->valid(), "Uninitialized use?");
   40.43 +    HeapWord *start, *end;
   40.44 +    while (!pst->is_task_claimed(/* reference */ nth_task)) {
   40.45 +      // We claimed task # nth_task; compute its boundaries.
   40.46 +      if (chunk_top == 0) {  // no samples were taken
   40.47 +        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
   40.48 +        start = space->bottom();
   40.49 +        end   = space->top();
   40.50 +      } else if (nth_task == 0) {
   40.51 +        start = space->bottom();
   40.52 +        end   = chunk_array[nth_task];
   40.53 +      } else if (nth_task < (uint)chunk_top) {
   40.54 +        assert(nth_task >= 1, "Control point invariant");
   40.55 +        start = chunk_array[nth_task - 1];
   40.56 +        end   = chunk_array[nth_task];
   40.57 +      } else {
   40.58 +        assert(nth_task == (uint)chunk_top, "Control point invariant");
   40.59 +        start = chunk_array[chunk_top - 1];
   40.60 +        end   = space->top();
   40.61 +      }
   40.62 +      MemRegion mr(start, end);
   40.63 +      // Verify that mr is in space
   40.64 +      assert(mr.is_empty() || space->used_region().contains(mr),
   40.65 +             "Should be in space");
   40.66 +      // Verify that "start" is an object boundary
   40.67 +      assert(mr.is_empty() || oop(mr.start())->is_oop(),
   40.68 +             "Should be an oop");
   40.69 +      space->par_oop_iterate(mr, cl);
   40.70 +    }
   40.71 +    pst->all_tasks_completed();
   40.72 +  }
   40.73  }
   40.74  
   40.75  void
   40.76 @@ -5788,7 +5790,7 @@
   40.77    DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
   40.78  
   40.79    // Eden space
   40.80 -  {
   40.81 +  if (!dng->eden()->is_empty()) {
   40.82      SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
   40.83      assert(!pst->valid(), "Clobbering existing data?");
   40.84      // Each valid entry in [0, _eden_chunk_index) represents a task.
    41.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Aug 23 22:12:18 2013 +0100
    41.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Aug 30 09:50:49 2013 +0100
    41.3 @@ -4529,7 +4529,7 @@
    41.4      _total_prev_live_bytes(0), _total_next_live_bytes(0),
    41.5      _hum_used_bytes(0), _hum_capacity_bytes(0),
    41.6      _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
    41.7 -    _total_remset_bytes(0) {
    41.8 +    _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
    41.9    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   41.10    MemRegion g1_committed = g1h->g1_committed();
   41.11    MemRegion g1_reserved = g1h->g1_reserved();
   41.12 @@ -4553,9 +4553,11 @@
   41.13                  G1PPRL_BYTE_H_FORMAT
   41.14                  G1PPRL_BYTE_H_FORMAT
   41.15                  G1PPRL_DOUBLE_H_FORMAT
   41.16 +                G1PPRL_BYTE_H_FORMAT
   41.17                  G1PPRL_BYTE_H_FORMAT,
   41.18                  "type", "address-range",
   41.19 -                "used", "prev-live", "next-live", "gc-eff", "remset");
   41.20 +                "used", "prev-live", "next-live", "gc-eff",
   41.21 +                "remset", "code-roots");
   41.22    _out->print_cr(G1PPRL_LINE_PREFIX
   41.23                  G1PPRL_TYPE_H_FORMAT
   41.24                  G1PPRL_ADDR_BASE_H_FORMAT
   41.25 @@ -4563,9 +4565,11 @@
   41.26                  G1PPRL_BYTE_H_FORMAT
   41.27                  G1PPRL_BYTE_H_FORMAT
   41.28                  G1PPRL_DOUBLE_H_FORMAT
   41.29 +                G1PPRL_BYTE_H_FORMAT
   41.30                  G1PPRL_BYTE_H_FORMAT,
   41.31                  "", "",
   41.32 -                "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", "(bytes)");
   41.33 +                "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
   41.34 +                "(bytes)", "(bytes)");
   41.35  }
   41.36  
   41.37  // It takes as a parameter a reference to one of the _hum_* fields, it
   41.38 @@ -4608,6 +4612,8 @@
   41.39    size_t next_live_bytes = r->next_live_bytes();
   41.40    double gc_eff          = r->gc_efficiency();
   41.41    size_t remset_bytes    = r->rem_set()->mem_size();
   41.42 +  size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
   41.43 +
   41.44    if (r->used() == 0) {
   41.45      type = "FREE";
   41.46    } else if (r->is_survivor()) {
   41.47 @@ -4642,6 +4648,7 @@
   41.48    _total_prev_live_bytes += prev_live_bytes;
   41.49    _total_next_live_bytes += next_live_bytes;
   41.50    _total_remset_bytes    += remset_bytes;
   41.51 +  _total_strong_code_roots_bytes += strong_code_roots_bytes;
   41.52  
   41.53    // Print a line for this particular region.
   41.54    _out->print_cr(G1PPRL_LINE_PREFIX
   41.55 @@ -4651,9 +4658,11 @@
   41.56                   G1PPRL_BYTE_FORMAT
   41.57                   G1PPRL_BYTE_FORMAT
   41.58                   G1PPRL_DOUBLE_FORMAT
   41.59 +                 G1PPRL_BYTE_FORMAT
   41.60                   G1PPRL_BYTE_FORMAT,
   41.61                   type, bottom, end,
   41.62 -                 used_bytes, prev_live_bytes, next_live_bytes, gc_eff , remset_bytes);
   41.63 +                 used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
   41.64 +                 remset_bytes, strong_code_roots_bytes);
   41.65  
   41.66    return false;
   41.67  }
   41.68 @@ -4669,7 +4678,8 @@
   41.69                   G1PPRL_SUM_MB_PERC_FORMAT("used")
   41.70                   G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
   41.71                   G1PPRL_SUM_MB_PERC_FORMAT("next-live")
   41.72 -                 G1PPRL_SUM_MB_FORMAT("remset"),
   41.73 +                 G1PPRL_SUM_MB_FORMAT("remset")
   41.74 +                 G1PPRL_SUM_MB_FORMAT("code-roots"),
   41.75                   bytes_to_mb(_total_capacity_bytes),
   41.76                   bytes_to_mb(_total_used_bytes),
   41.77                   perc(_total_used_bytes, _total_capacity_bytes),
   41.78 @@ -4677,6 +4687,7 @@
   41.79                   perc(_total_prev_live_bytes, _total_capacity_bytes),
   41.80                   bytes_to_mb(_total_next_live_bytes),
   41.81                   perc(_total_next_live_bytes, _total_capacity_bytes),
   41.82 -                 bytes_to_mb(_total_remset_bytes));
   41.83 +                 bytes_to_mb(_total_remset_bytes),
   41.84 +                 bytes_to_mb(_total_strong_code_roots_bytes));
   41.85    _out->cr();
   41.86  }
    42.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Aug 23 22:12:18 2013 +0100
    42.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Aug 30 09:50:49 2013 +0100
    42.3 @@ -1257,6 +1257,9 @@
    42.4    // Accumulator for the remembered set size
    42.5    size_t _total_remset_bytes;
    42.6  
    42.7 +  // Accumulator for strong code roots memory size
    42.8 +  size_t _total_strong_code_roots_bytes;
    42.9 +
   42.10    static double perc(size_t val, size_t total) {
   42.11      if (total == 0) {
   42.12        return 0.0;
    43.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 23 22:12:18 2013 +0100
    43.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 30 09:50:49 2013 +0100
    43.3 @@ -23,6 +23,7 @@
    43.4   */
    43.5  
    43.6  #include "precompiled.hpp"
    43.7 +#include "code/codeCache.hpp"
    43.8  #include "code/icBuffer.hpp"
    43.9  #include "gc_implementation/g1/bufferingOopClosure.hpp"
   43.10  #include "gc_implementation/g1/concurrentG1Refine.hpp"
   43.11 @@ -1176,20 +1177,27 @@
   43.12    ModRefBarrierSet* _mr_bs;
   43.13  public:
   43.14    PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
   43.15 -    _g1h(g1h), _mr_bs(mr_bs) { }
   43.16 +    _g1h(g1h), _mr_bs(mr_bs) {}
   43.17 +
   43.18    bool doHeapRegion(HeapRegion* r) {
   43.19 +    HeapRegionRemSet* hrrs = r->rem_set();
   43.20 +
   43.21      if (r->continuesHumongous()) {
   43.22 +      // We'll assert that the strong code root list and RSet is empty
   43.23 +      assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
   43.24 +      assert(hrrs->occupied() == 0, "RSet should be empty");
   43.25        return false;
   43.26      }
   43.27 +
   43.28      _g1h->reset_gc_time_stamps(r);
   43.29 -    HeapRegionRemSet* hrrs = r->rem_set();
   43.30 -    if (hrrs != NULL) hrrs->clear();
   43.31 +    hrrs->clear();
   43.32      // You might think here that we could clear just the cards
   43.33      // corresponding to the used region.  But no: if we leave a dirty card
   43.34      // in a region we might allocate into, then it would prevent that card
   43.35      // from being enqueued, and cause it to be missed.
   43.36      // Re: the performance cost: we shouldn't be doing full GC anyway!
   43.37      _mr_bs->clear(MemRegion(r->bottom(), r->end()));
   43.38 +
   43.39      return false;
   43.40    }
   43.41  };
   43.42 @@ -1269,30 +1277,6 @@
   43.43    heap_region_iterate(&cl);
   43.44  }
   43.45  
   43.46 -double G1CollectedHeap::verify(bool guard, const char* msg) {
   43.47 -  double verify_time_ms = 0.0;
   43.48 -
   43.49 -  if (guard && total_collections() >= VerifyGCStartAt) {
   43.50 -    double verify_start = os::elapsedTime();
   43.51 -    HandleMark hm;  // Discard invalid handles created during verification
   43.52 -    prepare_for_verify();
   43.53 -    Universe::verify(VerifyOption_G1UsePrevMarking, msg);
   43.54 -    verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
   43.55 -  }
   43.56 -
   43.57 -  return verify_time_ms;
   43.58 -}
   43.59 -
   43.60 -void G1CollectedHeap::verify_before_gc() {
   43.61 -  double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
   43.62 -  g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
   43.63 -}
   43.64 -
   43.65 -void G1CollectedHeap::verify_after_gc() {
   43.66 -  double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
   43.67 -  g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
   43.68 -}
   43.69 -
   43.70  bool G1CollectedHeap::do_collection(bool explicit_gc,
   43.71                                      bool clear_all_soft_refs,
   43.72                                      size_t word_size) {
   43.73 @@ -1433,7 +1417,7 @@
   43.74  
   43.75        // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   43.76        ClassLoaderDataGraph::purge();
   43.77 -    MetaspaceAux::verify_metrics();
   43.78 +      MetaspaceAux::verify_metrics();
   43.79  
   43.80        // Note: since we've just done a full GC, concurrent
   43.81        // marking is no longer active. Therefore we need not
   43.82 @@ -1504,6 +1488,9 @@
   43.83          heap_region_iterate(&rebuild_rs);
   43.84        }
   43.85  
   43.86 +      // Rebuild the strong code root lists for each region
   43.87 +      rebuild_strong_code_roots();
   43.88 +
   43.89        if (true) { // FIXME
   43.90          MetaspaceGC::compute_new_size();
   43.91        }
   43.92 @@ -3109,6 +3096,145 @@
   43.93    return NULL; // keep some compilers happy
   43.94  }
   43.95  
   43.96 +// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
   43.97 +//       pass it as the perm_blk to SharedHeap::process_strong_roots.
   43.98 +//       When process_strong_roots stop calling perm_blk->younger_refs_iterate
   43.99 +//       we can change this closure to extend the simpler OopClosure.
  43.100 +class VerifyRootsClosure: public OopsInGenClosure {
  43.101 +private:
  43.102 +  G1CollectedHeap* _g1h;
  43.103 +  VerifyOption     _vo;
  43.104 +  bool             _failures;
  43.105 +public:
  43.106 +  // _vo == UsePrevMarking -> use "prev" marking information,
  43.107 +  // _vo == UseNextMarking -> use "next" marking information,
  43.108 +  // _vo == UseMarkWord    -> use mark word from object header.
  43.109 +  VerifyRootsClosure(VerifyOption vo) :
  43.110 +    _g1h(G1CollectedHeap::heap()),
  43.111 +    _vo(vo),
  43.112 +    _failures(false) { }
  43.113 +
  43.114 +  bool failures() { return _failures; }
  43.115 +
  43.116 +  template <class T> void do_oop_nv(T* p) {
  43.117 +    T heap_oop = oopDesc::load_heap_oop(p);
  43.118 +    if (!oopDesc::is_null(heap_oop)) {
  43.119 +      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  43.120 +      if (_g1h->is_obj_dead_cond(obj, _vo)) {
  43.121 +        gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  43.122 +                              "points to dead obj "PTR_FORMAT, p, (void*) obj);
  43.123 +        if (_vo == VerifyOption_G1UseMarkWord) {
  43.124 +          gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
  43.125 +        }
  43.126 +        obj->print_on(gclog_or_tty);
  43.127 +        _failures = true;
  43.128 +      }
  43.129 +    }
  43.130 +  }
  43.131 +
  43.132 +  void do_oop(oop* p)       { do_oop_nv(p); }
  43.133 +  void do_oop(narrowOop* p) { do_oop_nv(p); }
  43.134 +};
  43.135 +
  43.136 +class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
  43.137 +  G1CollectedHeap* _g1h;
  43.138 +  OopClosure* _root_cl;
  43.139 +  nmethod* _nm;
  43.140 +  VerifyOption _vo;
  43.141 +  bool _failures;
  43.142 +
  43.143 +  template <class T> void do_oop_work(T* p) {
  43.144 +    // First verify that this root is live
  43.145 +    _root_cl->do_oop(p);
  43.146 +
  43.147 +    if (!G1VerifyHeapRegionCodeRoots) {
  43.148 +      // We're not verifying the code roots attached to heap region.
  43.149 +      return;
  43.150 +    }
  43.151 +
  43.152 +    // Don't check the code roots during marking verification in a full GC
  43.153 +    if (_vo == VerifyOption_G1UseMarkWord) {
  43.154 +      return;
  43.155 +    }
  43.156 +
  43.157 +    // Now verify that the current nmethod (which contains p) is
  43.158 +    // in the code root list of the heap region containing the
  43.159 +    // object referenced by p.
  43.160 +
  43.161 +    T heap_oop = oopDesc::load_heap_oop(p);
  43.162 +    if (!oopDesc::is_null(heap_oop)) {
  43.163 +      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  43.164 +
  43.165 +      // Now fetch the region containing the object
  43.166 +      HeapRegion* hr = _g1h->heap_region_containing(obj);
  43.167 +      HeapRegionRemSet* hrrs = hr->rem_set();
  43.168 +      // Verify that the strong code root list for this region
  43.169 +      // contains the nmethod
  43.170 +      if (!hrrs->strong_code_roots_list_contains(_nm)) {
  43.171 +        gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
  43.172 +                              "from nmethod "PTR_FORMAT" not in strong "
  43.173 +                              "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
  43.174 +                              p, _nm, hr->bottom(), hr->end());
  43.175 +        _failures = true;
  43.176 +      }
  43.177 +    }
  43.178 +  }
  43.179 +
  43.180 +public:
  43.181 +  G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
  43.182 +    _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
  43.183 +
  43.184 +  void do_oop(oop* p) { do_oop_work(p); }
  43.185 +  void do_oop(narrowOop* p) { do_oop_work(p); }
  43.186 +
  43.187 +  void set_nmethod(nmethod* nm) { _nm = nm; }
  43.188 +  bool failures() { return _failures; }
  43.189 +};
  43.190 +
  43.191 +class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
  43.192 +  G1VerifyCodeRootOopClosure* _oop_cl;
  43.193 +
  43.194 +public:
  43.195 +  G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
  43.196 +    _oop_cl(oop_cl) {}
  43.197 +
  43.198 +  void do_code_blob(CodeBlob* cb) {
  43.199 +    nmethod* nm = cb->as_nmethod_or_null();
  43.200 +    if (nm != NULL) {
  43.201 +      _oop_cl->set_nmethod(nm);
  43.202 +      nm->oops_do(_oop_cl);
  43.203 +    }
  43.204 +  }
  43.205 +};
  43.206 +
  43.207 +class YoungRefCounterClosure : public OopClosure {
  43.208 +  G1CollectedHeap* _g1h;
  43.209 +  int              _count;
  43.210 + public:
  43.211 +  YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
  43.212 +  void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
  43.213 +  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  43.214 +
  43.215 +  int count() { return _count; }
  43.216 +  void reset_count() { _count = 0; };
  43.217 +};
  43.218 +
  43.219 +class VerifyKlassClosure: public KlassClosure {
  43.220 +  YoungRefCounterClosure _young_ref_counter_closure;
  43.221 +  OopClosure *_oop_closure;
  43.222 + public:
  43.223 +  VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
  43.224 +  void do_klass(Klass* k) {
  43.225 +    k->oops_do(_oop_closure);
  43.226 +
  43.227 +    _young_ref_counter_closure.reset_count();
  43.228 +    k->oops_do(&_young_ref_counter_closure);
  43.229 +    if (_young_ref_counter_closure.count() > 0) {
  43.230 +      guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
  43.231 +    }
  43.232 +  }
  43.233 +};
  43.234 +
  43.235  class VerifyLivenessOopClosure: public OopClosure {
  43.236    G1CollectedHeap* _g1h;
  43.237    VerifyOption _vo;
  43.238 @@ -3242,75 +3368,7 @@
  43.239    }
  43.240  };
  43.241  
  43.242 -class YoungRefCounterClosure : public OopClosure {
  43.243 -  G1CollectedHeap* _g1h;
  43.244 -  int              _count;
  43.245 - public:
  43.246 -  YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
  43.247 -  void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
  43.248 -  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  43.249 -
  43.250 -  int count() { return _count; }
  43.251 -  void reset_count() { _count = 0; };
  43.252 -};
  43.253 -
  43.254 -class VerifyKlassClosure: public KlassClosure {
  43.255 -  YoungRefCounterClosure _young_ref_counter_closure;
  43.256 -  OopClosure *_oop_closure;
  43.257 - public:
  43.258 -  VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
  43.259 -  void do_klass(Klass* k) {
  43.260 -    k->oops_do(_oop_closure);
  43.261 -
  43.262 -    _young_ref_counter_closure.reset_count();
  43.263 -    k->oops_do(&_young_ref_counter_closure);
  43.264 -    if (_young_ref_counter_closure.count() > 0) {
  43.265 -      guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
  43.266 -    }
  43.267 -  }
  43.268 -};
  43.269 -
  43.270 -// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
  43.271 -//       pass it as the perm_blk to SharedHeap::process_strong_roots.
  43.272 -//       When process_strong_roots stop calling perm_blk->younger_refs_iterate
  43.273 -//       we can change this closure to extend the simpler OopClosure.
  43.274 -class VerifyRootsClosure: public OopsInGenClosure {
  43.275 -private:
  43.276 -  G1CollectedHeap* _g1h;
  43.277 -  VerifyOption     _vo;
  43.278 -  bool             _failures;
  43.279 -public:
  43.280 -  // _vo == UsePrevMarking -> use "prev" marking information,
  43.281 -  // _vo == UseNextMarking -> use "next" marking information,
  43.282 -  // _vo == UseMarkWord    -> use mark word from object header.
  43.283 -  VerifyRootsClosure(VerifyOption vo) :
  43.284 -    _g1h(G1CollectedHeap::heap()),
  43.285 -    _vo(vo),
  43.286 -    _failures(false) { }
  43.287 -
  43.288 -  bool failures() { return _failures; }
  43.289 -
  43.290 -  template <class T> void do_oop_nv(T* p) {
  43.291 -    T heap_oop = oopDesc::load_heap_oop(p);
  43.292 -    if (!oopDesc::is_null(heap_oop)) {
  43.293 -      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  43.294 -      if (_g1h->is_obj_dead_cond(obj, _vo)) {
  43.295 -        gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  43.296 -                              "points to dead obj "PTR_FORMAT, p, (void*) obj);
  43.297 -        if (_vo == VerifyOption_G1UseMarkWord) {
  43.298 -          gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
  43.299 -        }
  43.300 -        obj->print_on(gclog_or_tty);
  43.301 -        _failures = true;
  43.302 -      }
  43.303 -    }
  43.304 -  }
  43.305 -
  43.306 -  void do_oop(oop* p)       { do_oop_nv(p); }
  43.307 -  void do_oop(narrowOop* p) { do_oop_nv(p); }
  43.308 -};
  43.309 -
  43.310 -// This is the task used for parallel heap verification.
  43.311 +// This is the task used for parallel verification of the heap regions
  43.312  
  43.313  class G1ParVerifyTask: public AbstractGangTask {
  43.314  private:
  43.315 @@ -3344,20 +3402,15 @@
  43.316    }
  43.317  };
  43.318  
  43.319 -void G1CollectedHeap::verify(bool silent) {
  43.320 -  verify(silent, VerifyOption_G1UsePrevMarking);
  43.321 -}
  43.322 -
  43.323 -void G1CollectedHeap::verify(bool silent,
  43.324 -                             VerifyOption vo) {
  43.325 +void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
  43.326    if (SafepointSynchronize::is_at_safepoint()) {
  43.327 +    assert(Thread::current()->is_VM_thread(),
  43.328 +           "Expected to be executed serially by the VM thread at this point");
  43.329 +
  43.330      if (!silent) { gclog_or_tty->print("Roots "); }
  43.331      VerifyRootsClosure rootsCl(vo);
  43.332 -
  43.333 -    assert(Thread::current()->is_VM_thread(),
  43.334 -           "Expected to be executed serially by the VM thread at this point");
  43.335 -
  43.336 -    CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
  43.337 +    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
  43.338 +    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
  43.339      VerifyKlassClosure klassCl(this, &rootsCl);
  43.340  
  43.341      // We apply the relevant closures to all the oops in the
  43.342 @@ -3376,7 +3429,7 @@
  43.343                           &klassCl
  43.344                           );
  43.345  
  43.346 -    bool failures = rootsCl.failures();
  43.347 +    bool failures = rootsCl.failures() || codeRootsCl.failures();
  43.348  
  43.349      if (vo != VerifyOption_G1UseMarkWord) {
  43.350        // If we're verifying during a full GC then the region sets
  43.351 @@ -3445,6 +3498,34 @@
  43.352    }
  43.353  }
  43.354  
  43.355 +void G1CollectedHeap::verify(bool silent) {
  43.356 +  verify(silent, VerifyOption_G1UsePrevMarking);
  43.357 +}
  43.358 +
  43.359 +double G1CollectedHeap::verify(bool guard, const char* msg) {
  43.360 +  double verify_time_ms = 0.0;
  43.361 +
  43.362 +  if (guard && total_collections() >= VerifyGCStartAt) {
  43.363 +    double verify_start = os::elapsedTime();
  43.364 +    HandleMark hm;  // Discard invalid handles created during verification
  43.365 +    prepare_for_verify();
  43.366 +    Universe::verify(VerifyOption_G1UsePrevMarking, msg);
  43.367 +    verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
  43.368 +  }
  43.369 +
  43.370 +  return verify_time_ms;
  43.371 +}
  43.372 +
  43.373 +void G1CollectedHeap::verify_before_gc() {
  43.374 +  double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
  43.375 +  g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
  43.376 +}
  43.377 +
  43.378 +void G1CollectedHeap::verify_after_gc() {
  43.379 +  double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
  43.380 +  g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
  43.381 +}
  43.382 +
  43.383  class PrintRegionClosure: public HeapRegionClosure {
  43.384    outputStream* _st;
  43.385  public:
  43.386 @@ -3866,8 +3947,9 @@
  43.387        append_secondary_free_list_if_not_empty_with_lock();
  43.388      }
  43.389  
  43.390 -    assert(check_young_list_well_formed(),
  43.391 -      "young list should be well formed");
  43.392 +    assert(check_young_list_well_formed(), "young list should be well formed");
  43.393 +    assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  43.394 +           "sanity check");
  43.395  
  43.396      // Don't dynamically change the number of GC threads this early.  A value of
  43.397      // 0 is used to indicate serial work.  When parallel work is done,
  43.398 @@ -4987,7 +5069,11 @@
  43.399  
  43.400        G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  43.401  
  43.402 -      int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
  43.403 +      // Don't scan the scavengable methods in the code cache as part
  43.404 +      // of strong root scanning. The code roots that point into a
  43.405 +      // region in the collection set are scanned when we scan the
  43.406 +      // region's RSet.
  43.407 +      int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
  43.408  
  43.409        pss.start_strong_roots();
  43.410        _g1h->g1_process_strong_roots(/* is scavenging */ true,
  43.411 @@ -5029,67 +5115,6 @@
  43.412  
  43.413  // *** Common G1 Evacuation Stuff
  43.414  
  43.415 -// Closures that support the filtering of CodeBlobs scanned during
  43.416 -// external root scanning.
  43.417 -
  43.418 -// Closure applied to reference fields in code blobs (specifically nmethods)
  43.419 -// to determine whether an nmethod contains references that point into
  43.420 -// the collection set. Used as a predicate when walking code roots so
  43.421 -// that only nmethods that point into the collection set are added to the
  43.422 -// 'marked' list.
  43.423 -
  43.424 -class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
  43.425 -
  43.426 -  class G1PointsIntoCSOopClosure : public OopClosure {
  43.427 -    G1CollectedHeap* _g1;
  43.428 -    bool _points_into_cs;
  43.429 -  public:
  43.430 -    G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
  43.431 -      _g1(g1), _points_into_cs(false) { }
  43.432 -
  43.433 -    bool points_into_cs() const { return _points_into_cs; }
  43.434 -
  43.435 -    template <class T>
  43.436 -    void do_oop_nv(T* p) {
  43.437 -      if (!_points_into_cs) {
  43.438 -        T heap_oop = oopDesc::load_heap_oop(p);
  43.439 -        if (!oopDesc::is_null(heap_oop) &&
  43.440 -            _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
  43.441 -          _points_into_cs = true;
  43.442 -        }
  43.443 -      }
  43.444 -    }
  43.445 -
  43.446 -    virtual void do_oop(oop* p)        { do_oop_nv(p); }
  43.447 -    virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
  43.448 -  };
  43.449 -
  43.450 -  G1CollectedHeap* _g1;
  43.451 -
  43.452 -public:
  43.453 -  G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
  43.454 -    CodeBlobToOopClosure(cl, true), _g1(g1) { }
  43.455 -
  43.456 -  virtual void do_code_blob(CodeBlob* cb) {
  43.457 -    nmethod* nm = cb->as_nmethod_or_null();
  43.458 -    if (nm != NULL && !(nm->test_oops_do_mark())) {
  43.459 -      G1PointsIntoCSOopClosure predicate_cl(_g1);
  43.460 -      nm->oops_do(&predicate_cl);
  43.461 -
  43.462 -      if (predicate_cl.points_into_cs()) {
  43.463 -        // At least one of the reference fields or the oop relocations
  43.464 -        // in the nmethod points into the collection set. We have to
  43.465 -        // 'mark' this nmethod.
  43.466 -        // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
  43.467 -        // or MarkingCodeBlobClosure::do_code_blob() change.
  43.468 -        if (!nm->test_set_oops_do_mark()) {
  43.469 -          do_newly_marked_nmethod(nm);
  43.470 -        }
  43.471 -      }
  43.472 -    }
  43.473 -  }
  43.474 -};
  43.475 -
  43.476  // This method is run in a GC worker.
  43.477  
  43.478  void
  43.479 @@ -5107,9 +5132,10 @@
  43.480  
  43.481    BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  43.482  
  43.483 -  // Walk the code cache w/o buffering, because StarTask cannot handle
  43.484 -  // unaligned oop locations.
  43.485 -  G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
  43.486 +  assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
  43.487 +  // Walk the code cache/strong code roots w/o buffering, because StarTask
  43.488 +  // cannot handle unaligned oop locations.
  43.489 +  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
  43.490  
  43.491    process_strong_roots(false, // no scoping; this is parallel code
  43.492                         is_scavenging, so,
  43.493 @@ -5154,9 +5180,22 @@
  43.494    }
  43.495    g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
  43.496  
  43.497 +  // If this is an initial mark pause, and we're not scanning
  43.498 +  // the entire code cache, we need to mark the oops in the
  43.499 +  // strong code root lists for the regions that are not in
  43.500 +  // the collection set.
  43.501 +  // Note all threads participate in this set of root tasks.
  43.502 +  double mark_strong_code_roots_ms = 0.0;
  43.503 +  if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
  43.504 +    double mark_strong_roots_start = os::elapsedTime();
  43.505 +    mark_strong_code_roots(worker_i);
  43.506 +    mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
  43.507 +  }
  43.508 +  g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
  43.509 +
  43.510    // Now scan the complement of the collection set.
  43.511    if (scan_rs != NULL) {
  43.512 -    g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
  43.513 +    g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
  43.514    }
  43.515    _process_strong_tasks->all_tasks_completed();
  43.516  }
  43.517 @@ -5774,9 +5813,6 @@
  43.518    process_discovered_references(n_workers);
  43.519  
  43.520    // Weak root processing.
  43.521 -  // Note: when JSR 292 is enabled and code blobs can contain
  43.522 -  // non-perm oops then we will need to process the code blobs
  43.523 -  // here too.
  43.524    {
  43.525      G1STWIsAliveClosure is_alive(this);
  43.526      G1KeepAliveClosure keep_alive(this);
  43.527 @@ -5792,6 +5828,17 @@
  43.528    hot_card_cache->reset_hot_cache();
  43.529    hot_card_cache->set_use_cache(true);
  43.530  
  43.531 +  // Migrate the strong code roots attached to each region in
  43.532 +  // the collection set. Ideally we would like to do this
  43.533 +  // after we have finished the scanning/evacuation of the
  43.534 +  // strong code roots for a particular heap region.
  43.535 +  migrate_strong_code_roots();
  43.536 +
  43.537 +  if (g1_policy()->during_initial_mark_pause()) {
  43.538 +    // Reset the claim values set during marking the strong code roots
  43.539 +    reset_heap_region_claim_values();
  43.540 +  }
  43.541 +
  43.542    finalize_for_evac_failure();
  43.543  
  43.544    if (evacuation_failed()) {
  43.545 @@ -6588,3 +6635,208 @@
  43.546    _humongous_set.verify_end();
  43.547    _free_list.verify_end();
  43.548  }
  43.549 +
  43.550 +// Optimized nmethod scanning
  43.551 +
  43.552 +class RegisterNMethodOopClosure: public OopClosure {
  43.553 +  G1CollectedHeap* _g1h;
  43.554 +  nmethod* _nm;
  43.555 +
  43.556 +  template <class T> void do_oop_work(T* p) {
  43.557 +    T heap_oop = oopDesc::load_heap_oop(p);
  43.558 +    if (!oopDesc::is_null(heap_oop)) {
  43.559 +      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  43.560 +      HeapRegion* hr = _g1h->heap_region_containing(obj);
  43.561 +      assert(!hr->isHumongous(), "code root in humongous region?");
  43.562 +
  43.563 +      // HeapRegion::add_strong_code_root() avoids adding duplicate
  43.564 +      // entries but having duplicates is  OK since we "mark" nmethods
  43.565 +      // as visited when we scan the strong code root lists during the GC.
  43.566 +      hr->add_strong_code_root(_nm);
  43.567 +      assert(hr->rem_set()->strong_code_roots_list_contains(_nm), "add failed?");
  43.568 +    }
  43.569 +  }
  43.570 +
  43.571 +public:
  43.572 +  RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
  43.573 +    _g1h(g1h), _nm(nm) {}
  43.574 +
  43.575 +  void do_oop(oop* p)       { do_oop_work(p); }
  43.576 +  void do_oop(narrowOop* p) { do_oop_work(p); }
  43.577 +};
  43.578 +
  43.579 +class UnregisterNMethodOopClosure: public OopClosure {
  43.580 +  G1CollectedHeap* _g1h;
  43.581 +  nmethod* _nm;
  43.582 +
  43.583 +  template <class T> void do_oop_work(T* p) {
  43.584 +    T heap_oop = oopDesc::load_heap_oop(p);
  43.585 +    if (!oopDesc::is_null(heap_oop)) {
  43.586 +      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  43.587 +      HeapRegion* hr = _g1h->heap_region_containing(obj);
  43.588 +      assert(!hr->isHumongous(), "code root in humongous region?");
  43.589 +      hr->remove_strong_code_root(_nm);
  43.590 +      assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), "remove failed?");
  43.591 +    }
  43.592 +  }
  43.593 +
  43.594 +public:
  43.595 +  UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
  43.596 +    _g1h(g1h), _nm(nm) {}
  43.597 +
  43.598 +  void do_oop(oop* p)       { do_oop_work(p); }
  43.599 +  void do_oop(narrowOop* p) { do_oop_work(p); }
  43.600 +};
  43.601 +
  43.602 +void G1CollectedHeap::register_nmethod(nmethod* nm) {
  43.603 +  CollectedHeap::register_nmethod(nm);
  43.604 +
  43.605 +  guarantee(nm != NULL, "sanity");
  43.606 +  RegisterNMethodOopClosure reg_cl(this, nm);
  43.607 +  nm->oops_do(&reg_cl);
  43.608 +}
  43.609 +
  43.610 +void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
  43.611 +  CollectedHeap::unregister_nmethod(nm);
  43.612 +
  43.613 +  guarantee(nm != NULL, "sanity");
  43.614 +  UnregisterNMethodOopClosure reg_cl(this, nm);
  43.615 +  nm->oops_do(&reg_cl, true);
  43.616 +}
  43.617 +
  43.618 +class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
  43.619 +public:
  43.620 +  bool doHeapRegion(HeapRegion *hr) {
  43.621 +    assert(!hr->isHumongous(), "humongous region in collection set?");
  43.622 +    hr->migrate_strong_code_roots();
  43.623 +    return false;
  43.624 +  }
  43.625 +};
  43.626 +
  43.627 +void G1CollectedHeap::migrate_strong_code_roots() {
  43.628 +  MigrateCodeRootsHeapRegionClosure cl;
  43.629 +  double migrate_start = os::elapsedTime();
  43.630 +  collection_set_iterate(&cl);
  43.631 +  double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
  43.632 +  g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
  43.633 +}
  43.634 +
  43.635 +// Mark all the code roots that point into regions *not* in the
  43.636 +// collection set.
  43.637 +//
  43.638 +// Note we do not want to use a "marking" CodeBlobToOopClosure while
  43.639 +// walking the the code roots lists of regions not in the collection
  43.640 +// set. Suppose we have an nmethod (M) that points to objects in two
  43.641 +// separate regions - one in the collection set (R1) and one not (R2).
  43.642 +// Using a "marking" CodeBlobToOopClosure here would result in "marking"
  43.643 +// nmethod M when walking the code roots for R1. When we come to scan
  43.644 +// the code roots for R2, we would see that M is already marked and it
  43.645 +// would be skipped and the objects in R2 that are referenced from M
  43.646 +// would not be evacuated.
  43.647 +
  43.648 +class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
  43.649 +
  43.650 +  class MarkStrongCodeRootOopClosure: public OopClosure {
  43.651 +    ConcurrentMark* _cm;
  43.652 +    HeapRegion* _hr;
  43.653 +    uint _worker_id;
  43.654 +
  43.655 +    template <class T> void do_oop_work(T* p) {
  43.656 +      T heap_oop = oopDesc::load_heap_oop(p);
  43.657 +      if (!oopDesc::is_null(heap_oop)) {
  43.658 +        oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  43.659 +        // Only mark objects in the region (which is assumed
  43.660 +        // to be not in the collection set).
  43.661 +        if (_hr->is_in(obj)) {
  43.662 +          _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
  43.663 +        }
  43.664 +      }
  43.665 +    }
  43.666 +
  43.667 +  public:
  43.668 +    MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
  43.669 +      _cm(cm), _hr(hr), _worker_id(worker_id) {
  43.670 +      assert(!_hr->in_collection_set(), "sanity");
  43.671 +    }
  43.672 +
  43.673 +    void do_oop(narrowOop* p) { do_oop_work(p); }
  43.674 +    void do_oop(oop* p)       { do_oop_work(p); }
  43.675 +  };
  43.676 +
  43.677 +  MarkStrongCodeRootOopClosure _oop_cl;
  43.678 +
  43.679 +public:
  43.680 +  MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
  43.681 +    _oop_cl(cm, hr, worker_id) {}
  43.682 +
  43.683 +  void do_code_blob(CodeBlob* cb) {
  43.684 +    nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
  43.685 +    if (nm != NULL) {
  43.686 +      nm->oops_do(&_oop_cl);
  43.687 +    }
  43.688 +  }
  43.689 +};
  43.690 +
  43.691 +class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
  43.692 +  G1CollectedHeap* _g1h;
  43.693 +  uint _worker_id;
  43.694 +
  43.695 +public:
  43.696 +  MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
  43.697 +    _g1h(g1h), _worker_id(worker_id) {}
  43.698 +
  43.699 +  bool doHeapRegion(HeapRegion *hr) {
  43.700 +    HeapRegionRemSet* hrrs = hr->rem_set();
  43.701 +    if (hr->isHumongous()) {
  43.702 +      // Code roots should never be attached to a humongous region
  43.703 +      assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
  43.704 +      return false;
  43.705 +    }
  43.706 +
  43.707 +    if (hr->in_collection_set()) {
  43.708 +      // Don't mark code roots into regions in the collection set here.
  43.709 +      // They will be marked when we scan them.
  43.710 +      return false;
  43.711 +    }
  43.712 +
  43.713 +    MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
  43.714 +    hr->strong_code_roots_do(&cb_cl);
  43.715 +    return false;
  43.716 +  }
  43.717 +};
  43.718 +
  43.719 +void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
  43.720 +  MarkStrongCodeRootsHRClosure cl(this, worker_id);
  43.721 +  if (G1CollectedHeap::use_parallel_gc_threads()) {
  43.722 +    heap_region_par_iterate_chunked(&cl,
  43.723 +                                    worker_id,
  43.724 +                                    workers()->active_workers(),
  43.725 +                                    HeapRegion::ParMarkRootClaimValue);
  43.726 +  } else {
  43.727 +    heap_region_iterate(&cl);
  43.728 +  }
  43.729 +}
  43.730 +
  43.731 +class RebuildStrongCodeRootClosure: public CodeBlobClosure {
  43.732 +  G1CollectedHeap* _g1h;
  43.733 +
  43.734 +public:
  43.735 +  RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
  43.736 +    _g1h(g1h) {}
  43.737 +
  43.738 +  void do_code_blob(CodeBlob* cb) {
  43.739 +    nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
  43.740 +    if (nm == NULL) {
  43.741 +      return;
  43.742 +    }
  43.743 +
  43.744 +    if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) {
  43.745 +      _g1h->register_nmethod(nm);
  43.746 +    }
  43.747 +  }
  43.748 +};
  43.749 +
  43.750 +void G1CollectedHeap::rebuild_strong_code_roots() {
  43.751 +  RebuildStrongCodeRootClosure blob_cl(this);
  43.752 +  CodeCache::blobs_do(&blob_cl);
  43.753 +}
    44.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Aug 23 22:12:18 2013 +0100
    44.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Aug 30 09:50:49 2013 +0100
    44.3 @@ -46,6 +46,7 @@
    44.4  // may combine concurrent marking with parallel, incremental compaction of
    44.5  // heap subsets that will yield large amounts of garbage.
    44.6  
    44.7 +// Forward declarations
    44.8  class HeapRegion;
    44.9  class HRRSCleanupTask;
   44.10  class GenerationSpec;
   44.11 @@ -69,6 +70,7 @@
   44.12  class G1NewTracer;
   44.13  class G1OldTracer;
   44.14  class EvacuationFailedInfo;
   44.15 +class nmethod;
   44.16  
   44.17  typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
   44.18  typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
   44.19 @@ -163,18 +165,6 @@
   44.20      : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
   44.21  };
   44.22  
   44.23 -// The G1 STW is alive closure.
   44.24 -// An instance is embedded into the G1CH and used as the
   44.25 -// (optional) _is_alive_non_header closure in the STW
   44.26 -// reference processor. It is also extensively used during
   44.27 -// reference processing during STW evacuation pauses.
   44.28 -class G1STWIsAliveClosure: public BoolObjectClosure {
   44.29 -  G1CollectedHeap* _g1;
   44.30 -public:
   44.31 -  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
   44.32 -  bool do_object_b(oop p);
   44.33 -};
   44.34 -
   44.35  class SurvivorGCAllocRegion : public G1AllocRegion {
   44.36  protected:
   44.37    virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
   44.38 @@ -193,6 +183,18 @@
   44.39    : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
   44.40  };
   44.41  
   44.42 +// The G1 STW is alive closure.
   44.43 +// An instance is embedded into the G1CH and used as the
   44.44 +// (optional) _is_alive_non_header closure in the STW
   44.45 +// reference processor. It is also extensively used during
   44.46 +// reference processing during STW evacuation pauses.
   44.47 +class G1STWIsAliveClosure: public BoolObjectClosure {
   44.48 +  G1CollectedHeap* _g1;
   44.49 +public:
   44.50 +  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
   44.51 +  bool do_object_b(oop p);
   44.52 +};
   44.53 +
   44.54  class RefineCardTableEntryClosure;
   44.55  
   44.56  class G1CollectedHeap : public SharedHeap {
   44.57 @@ -1549,42 +1551,6 @@
   44.58  
   44.59    virtual jlong millis_since_last_gc();
   44.60  
   44.61 -  // Perform any cleanup actions necessary before allowing a verification.
   44.62 -  virtual void prepare_for_verify();
   44.63 -
   44.64 -  // Perform verification.
   44.65 -
   44.66 -  // vo == UsePrevMarking  -> use "prev" marking information,
   44.67 -  // vo == UseNextMarking -> use "next" marking information
   44.68 -  // vo == UseMarkWord    -> use the mark word in the object header
   44.69 -  //
   44.70 -  // NOTE: Only the "prev" marking information is guaranteed to be
   44.71 -  // consistent most of the time, so most calls to this should use
   44.72 -  // vo == UsePrevMarking.
   44.73 -  // Currently, there is only one case where this is called with
   44.74 -  // vo == UseNextMarking, which is to verify the "next" marking
   44.75 -  // information at the end of remark.
   44.76 -  // Currently there is only one place where this is called with
   44.77 -  // vo == UseMarkWord, which is to verify the marking during a
   44.78 -  // full GC.
   44.79 -  void verify(bool silent, VerifyOption vo);
   44.80 -
   44.81 -  // Override; it uses the "prev" marking information
   44.82 -  virtual void verify(bool silent);
   44.83 -
   44.84 -  virtual void print_on(outputStream* st) const;
   44.85 -  virtual void print_extended_on(outputStream* st) const;
   44.86 -  virtual void print_on_error(outputStream* st) const;
   44.87 -
   44.88 -  virtual void print_gc_threads_on(outputStream* st) const;
   44.89 -  virtual void gc_threads_do(ThreadClosure* tc) const;
   44.90 -
   44.91 -  // Override
   44.92 -  void print_tracing_info() const;
   44.93 -
   44.94 -  // The following two methods are helpful for debugging RSet issues.
   44.95 -  void print_cset_rsets() PRODUCT_RETURN;
   44.96 -  void print_all_rsets() PRODUCT_RETURN;
   44.97  
   44.98    // Convenience function to be used in situations where the heap type can be
   44.99    // asserted to be this type.
  44.100 @@ -1661,13 +1627,86 @@
  44.101      else return is_obj_ill(obj, hr);
  44.102    }
  44.103  
  44.104 +  bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
  44.105 +  HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
  44.106 +  bool is_marked(oop obj, VerifyOption vo);
  44.107 +  const char* top_at_mark_start_str(VerifyOption vo);
  44.108 +
  44.109 +  ConcurrentMark* concurrent_mark() const { return _cm; }
  44.110 +
  44.111 +  // Refinement
  44.112 +
  44.113 +  ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
  44.114 +
  44.115 +  // The dirty cards region list is used to record a subset of regions
  44.116 +  // whose cards need clearing. The list if populated during the
  44.117 +  // remembered set scanning and drained during the card table
  44.118 +  // cleanup. Although the methods are reentrant, population/draining
  44.119 +  // phases must not overlap. For synchronization purposes the last
  44.120 +  // element on the list points to itself.
  44.121 +  HeapRegion* _dirty_cards_region_list;
  44.122 +  void push_dirty_cards_region(HeapRegion* hr);
  44.123 +  HeapRegion* pop_dirty_cards_region();
  44.124 +
  44.125 +  // Optimized nmethod scanning support routines
  44.126 +
  44.127 +  // Register the given nmethod with the G1 heap
  44.128 +  virtual void register_nmethod(nmethod* nm);
  44.129 +
  44.130 +  // Unregister the given nmethod from the G1 heap
  44.131 +  virtual void unregister_nmethod(nmethod* nm);
  44.132 +
  44.133 +  // Migrate the nmethods in the code root lists of the regions
  44.134 +  // in the collection set to regions in to-space. In the event
  44.135 +  // of an evacuation failure, nmethods that reference objects
  44.136 +  // that were not successfullly evacuated are not migrated.
  44.137 +  void migrate_strong_code_roots();
  44.138 +
  44.139 +  // During an initial mark pause, mark all the code roots that
  44.140 +  // point into regions *not* in the collection set.
  44.141 +  void mark_strong_code_roots(uint worker_id);
  44.142 +
  44.143 +  // Rebuild the stong code root lists for each region
  44.144 +  // after a full GC
  44.145 +  void rebuild_strong_code_roots();
  44.146 +
  44.147 +  // Verification
  44.148 +
  44.149 +  // The following is just to alert the verification code
  44.150 +  // that a full collection has occurred and that the
  44.151 +  // remembered sets are no longer up to date.
  44.152 +  bool _full_collection;
  44.153 +  void set_full_collection() { _full_collection = true;}
  44.154 +  void clear_full_collection() {_full_collection = false;}
  44.155 +  bool full_collection() {return _full_collection;}
  44.156 +
  44.157 +  // Perform any cleanup actions necessary before allowing a verification.
  44.158 +  virtual void prepare_for_verify();
  44.159 +
  44.160 +  // Perform verification.
  44.161 +
  44.162 +  // vo == UsePrevMarking  -> use "prev" marking information,
  44.163 +  // vo == UseNextMarking -> use "next" marking information
  44.164 +  // vo == UseMarkWord    -> use the mark word in the object header
  44.165 +  //
  44.166 +  // NOTE: Only the "prev" marking information is guaranteed to be
  44.167 +  // consistent most of the time, so most calls to this should use
  44.168 +  // vo == UsePrevMarking.
  44.169 +  // Currently, there is only one case where this is called with
  44.170 +  // vo == UseNextMarking, which is to verify the "next" marking
  44.171 +  // information at the end of remark.
  44.172 +  // Currently there is only one place where this is called with
  44.173 +  // vo == UseMarkWord, which is to verify the marking during a
  44.174 +  // full GC.
  44.175 +  void verify(bool silent, VerifyOption vo);
  44.176 +
  44.177 +  // Override; it uses the "prev" marking information
  44.178 +  virtual void verify(bool silent);
  44.179 +
  44.180    // The methods below are here for convenience and dispatch the
  44.181    // appropriate method depending on value of the given VerifyOption
  44.182 -  // parameter. The options for that parameter are:
  44.183 -  //
  44.184 -  // vo == UsePrevMarking -> use "prev" marking information,
  44.185 -  // vo == UseNextMarking -> use "next" marking information,
  44.186 -  // vo == UseMarkWord    -> use mark word from object header
  44.187 +  // parameter. The values for that parameter, and their meanings,
  44.188 +  // are the same as those above.
  44.189  
  44.190    bool is_obj_dead_cond(const oop obj,
  44.191                          const HeapRegion* hr,
  44.192 @@ -1692,31 +1731,21 @@
  44.193      return false; // keep some compilers happy
  44.194    }
  44.195  
  44.196 -  bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
  44.197 -  HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
  44.198 -  bool is_marked(oop obj, VerifyOption vo);
  44.199 -  const char* top_at_mark_start_str(VerifyOption vo);
  44.200 +  // Printing
  44.201  
  44.202 -  // The following is just to alert the verification code
  44.203 -  // that a full collection has occurred and that the
  44.204 -  // remembered sets are no longer up to date.
  44.205 -  bool _full_collection;
  44.206 -  void set_full_collection() { _full_collection = true;}
  44.207 -  void clear_full_collection() {_full_collection = false;}
  44.208 -  bool full_collection() {return _full_collection;}
  44.209 +  virtual void print_on(outputStream* st) const;
  44.210 +  virtual void print_extended_on(outputStream* st) const;
  44.211 +  virtual void print_on_error(outputStream* st) const;
  44.212  
  44.213 -  ConcurrentMark* concurrent_mark() const { return _cm; }
  44.214 -  ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
  44.215 +  virtual void print_gc_threads_on(outputStream* st) const;
  44.216 +  virtual void gc_threads_do(ThreadClosure* tc) const;
  44.217  
  44.218 -  // The dirty cards region list is used to record a subset of regions
  44.219 -  // whose cards need clearing. The list if populated during the
  44.220 -  // remembered set scanning and drained during the card table
  44.221 -  // cleanup. Although the methods are reentrant, population/draining
  44.222 -  // phases must not overlap. For synchronization purposes the last
  44.223 -  // element on the list points to itself.
  44.224 -  HeapRegion* _dirty_cards_region_list;
  44.225 -  void push_dirty_cards_region(HeapRegion* hr);
  44.226 -  HeapRegion* pop_dirty_cards_region();
  44.227 +  // Override
  44.228 +  void print_tracing_info() const;
  44.229 +
  44.230 +  // The following two methods are helpful for debugging RSet issues.
  44.231 +  void print_cset_rsets() PRODUCT_RETURN;
  44.232 +  void print_all_rsets() PRODUCT_RETURN;
  44.233  
  44.234  public:
  44.235    void stop_conc_gc_threads();
    45.1 --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Fri Aug 23 22:12:18 2013 +0100
    45.2 +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Fri Aug 30 09:50:49 2013 +0100
    45.3 @@ -161,6 +161,8 @@
    45.4    _last_update_rs_times_ms(_max_gc_threads, "%.1lf"),
    45.5    _last_update_rs_processed_buffers(_max_gc_threads, "%d"),
    45.6    _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
    45.7 +  _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
    45.8 +  _last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
    45.9    _last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
   45.10    _last_termination_times_ms(_max_gc_threads, "%.1lf"),
   45.11    _last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
   45.12 @@ -182,6 +184,8 @@
   45.13    _last_update_rs_times_ms.reset();
   45.14    _last_update_rs_processed_buffers.reset();
   45.15    _last_scan_rs_times_ms.reset();
   45.16 +  _last_strong_code_root_scan_times_ms.reset();
   45.17 +  _last_strong_code_root_mark_times_ms.reset();
   45.18    _last_obj_copy_times_ms.reset();
   45.19    _last_termination_times_ms.reset();
   45.20    _last_termination_attempts.reset();
   45.21 @@ -197,6 +201,8 @@
   45.22    _last_update_rs_times_ms.verify();
   45.23    _last_update_rs_processed_buffers.verify();
   45.24    _last_scan_rs_times_ms.verify();
   45.25 +  _last_strong_code_root_scan_times_ms.verify();
   45.26 +  _last_strong_code_root_mark_times_ms.verify();
   45.27    _last_obj_copy_times_ms.verify();
   45.28    _last_termination_times_ms.verify();
   45.29    _last_termination_attempts.verify();
   45.30 @@ -210,6 +216,8 @@
   45.31                                 _last_satb_filtering_times_ms.get(i) +
   45.32                                 _last_update_rs_times_ms.get(i) +
   45.33                                 _last_scan_rs_times_ms.get(i) +
   45.34 +                               _last_strong_code_root_scan_times_ms.get(i) +
   45.35 +                               _last_strong_code_root_mark_times_ms.get(i) +
   45.36                                 _last_obj_copy_times_ms.get(i) +
   45.37                                 _last_termination_times_ms.get(i);
   45.38  
   45.39 @@ -239,6 +247,9 @@
   45.40      // Now subtract the time taken to fix up roots in generated code
   45.41      misc_time_ms += _cur_collection_code_root_fixup_time_ms;
   45.42  
   45.43 +    // Strong code root migration time
   45.44 +    misc_time_ms += _cur_strong_code_root_migration_time_ms;
   45.45 +
   45.46      // Subtract the time taken to clean the card table from the
   45.47      // current value of "other time"
   45.48      misc_time_ms += _cur_clear_ct_time_ms;
   45.49 @@ -257,9 +268,13 @@
   45.50      if (_last_satb_filtering_times_ms.sum() > 0.0) {
   45.51        _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
   45.52      }
   45.53 +    if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
   45.54 +     _last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
   45.55 +    }
   45.56      _last_update_rs_times_ms.print(2, "Update RS (ms)");
   45.57        _last_update_rs_processed_buffers.print(3, "Processed Buffers");
   45.58      _last_scan_rs_times_ms.print(2, "Scan RS (ms)");
   45.59 +    _last_strong_code_root_scan_times_ms.print(2, "Code Root Scanning (ms)");
   45.60      _last_obj_copy_times_ms.print(2, "Object Copy (ms)");
   45.61      _last_termination_times_ms.print(2, "Termination (ms)");
   45.62      if (G1Log::finest()) {
   45.63 @@ -273,12 +288,17 @@
   45.64      if (_last_satb_filtering_times_ms.sum() > 0.0) {
   45.65        _last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
   45.66      }
   45.67 +    if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
   45.68 +      _last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
   45.69 +    }
   45.70      _last_update_rs_times_ms.print(1, "Update RS (ms)");
   45.71        _last_update_rs_processed_buffers.print(2, "Processed Buffers");
   45.72      _last_scan_rs_times_ms.print(1, "Scan RS (ms)");
   45.73 +    _last_strong_code_root_scan_times_ms.print(1, "Code Root Scanning (ms)");
   45.74      _last_obj_copy_times_ms.print(1, "Object Copy (ms)");
   45.75    }
   45.76    print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
   45.77 +  print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms);
   45.78    print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
   45.79    double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
   45.80    print_stats(1, "Other", misc_time_ms);
    46.1 --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Fri Aug 23 22:12:18 2013 +0100
    46.2 +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Fri Aug 30 09:50:49 2013 +0100
    46.3 @@ -119,6 +119,8 @@
    46.4    WorkerDataArray<double> _last_update_rs_times_ms;
    46.5    WorkerDataArray<int>    _last_update_rs_processed_buffers;
    46.6    WorkerDataArray<double> _last_scan_rs_times_ms;
    46.7 +  WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
    46.8 +  WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
    46.9    WorkerDataArray<double> _last_obj_copy_times_ms;
   46.10    WorkerDataArray<double> _last_termination_times_ms;
   46.11    WorkerDataArray<size_t> _last_termination_attempts;
   46.12 @@ -128,6 +130,7 @@
   46.13  
   46.14    double _cur_collection_par_time_ms;
   46.15    double _cur_collection_code_root_fixup_time_ms;
   46.16 +  double _cur_strong_code_root_migration_time_ms;
   46.17  
   46.18    double _cur_clear_ct_time_ms;
   46.19    double _cur_ref_proc_time_ms;
   46.20 @@ -179,6 +182,14 @@
   46.21      _last_scan_rs_times_ms.set(worker_i, ms);
   46.22    }
   46.23  
   46.24 +  void record_strong_code_root_scan_time(uint worker_i, double ms) {
   46.25 +    _last_strong_code_root_scan_times_ms.set(worker_i, ms);
   46.26 +  }
   46.27 +
   46.28 +  void record_strong_code_root_mark_time(uint worker_i, double ms) {
   46.29 +    _last_strong_code_root_mark_times_ms.set(worker_i, ms);
   46.30 +  }
   46.31 +
   46.32    void record_obj_copy_time(uint worker_i, double ms) {
   46.33      _last_obj_copy_times_ms.set(worker_i, ms);
   46.34    }
   46.35 @@ -208,6 +219,10 @@
   46.36      _cur_collection_code_root_fixup_time_ms = ms;
   46.37    }
   46.38  
   46.39 +  void record_strong_code_root_migration_time(double ms) {
   46.40 +    _cur_strong_code_root_migration_time_ms = ms;
   46.41 +  }
   46.42 +
   46.43    void record_ref_proc_time(double ms) {
   46.44      _cur_ref_proc_time_ms = ms;
   46.45    }
   46.46 @@ -294,6 +309,14 @@
   46.47      return _last_scan_rs_times_ms.average();
   46.48    }
   46.49  
   46.50 +  double average_last_strong_code_root_scan_time(){
   46.51 +    return _last_strong_code_root_scan_times_ms.average();
   46.52 +  }
   46.53 +
   46.54 +  double average_last_strong_code_root_mark_time(){
   46.55 +    return _last_strong_code_root_mark_times_ms.average();
   46.56 +  }
   46.57 +
   46.58    double average_last_obj_copy_time() {
   46.59      return _last_obj_copy_times_ms.average();
   46.60    }
    47.1 --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Fri Aug 23 22:12:18 2013 +0100
    47.2 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Fri Aug 30 09:50:49 2013 +0100
    47.3 @@ -262,6 +262,7 @@
    47.4      old_collection_counters()->update_all();
    47.5      young_collection_counters()->update_all();
    47.6      MetaspaceCounters::update_performance_counters();
    47.7 +    CompressedClassSpaceCounters::update_performance_counters();
    47.8    }
    47.9  }
   47.10  
    48.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Aug 23 22:12:18 2013 +0100
    48.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Aug 30 09:50:49 2013 +0100
    48.3 @@ -104,15 +104,25 @@
    48.4  class ScanRSClosure : public HeapRegionClosure {
    48.5    size_t _cards_done, _cards;
    48.6    G1CollectedHeap* _g1h;
    48.7 +
    48.8    OopsInHeapRegionClosure* _oc;
    48.9 +  CodeBlobToOopClosure* _code_root_cl;
   48.10 +
   48.11    G1BlockOffsetSharedArray* _bot_shared;
   48.12    CardTableModRefBS *_ct_bs;
   48.13 -  int _worker_i;
   48.14 -  int _block_size;
   48.15 -  bool _try_claimed;
   48.16 +
   48.17 +  double _strong_code_root_scan_time_sec;
   48.18 +  int    _worker_i;
   48.19 +  int    _block_size;
   48.20 +  bool   _try_claimed;
   48.21 +
   48.22  public:
   48.23 -  ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
   48.24 +  ScanRSClosure(OopsInHeapRegionClosure* oc,
   48.25 +                CodeBlobToOopClosure* code_root_cl,
   48.26 +                int worker_i) :
   48.27      _oc(oc),
   48.28 +    _code_root_cl(code_root_cl),
   48.29 +    _strong_code_root_scan_time_sec(0.0),
   48.30      _cards(0),
   48.31      _cards_done(0),
   48.32      _worker_i(worker_i),
   48.33 @@ -160,6 +170,12 @@
   48.34                             card_start, card_start + G1BlockOffsetSharedArray::N_words);
   48.35    }
   48.36  
   48.37 +  void scan_strong_code_roots(HeapRegion* r) {
   48.38 +    double scan_start = os::elapsedTime();
   48.39 +    r->strong_code_roots_do(_code_root_cl);
   48.40 +    _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start);
   48.41 +  }
   48.42 +
   48.43    bool doHeapRegion(HeapRegion* r) {
   48.44      assert(r->in_collection_set(), "should only be called on elements of CS.");
   48.45      HeapRegionRemSet* hrrs = r->rem_set();
   48.46 @@ -173,6 +189,7 @@
   48.47      //   _try_claimed || r->claim_iter()
   48.48      // is true: either we're supposed to work on claimed-but-not-complete
   48.49      // regions, or we successfully claimed the region.
   48.50 +
   48.51      HeapRegionRemSetIterator iter(hrrs);
   48.52      size_t card_index;
   48.53  
   48.54 @@ -205,30 +222,43 @@
   48.55        }
   48.56      }
   48.57      if (!_try_claimed) {
   48.58 +      // Scan the strong code root list attached to the current region
   48.59 +      scan_strong_code_roots(r);
   48.60 +
   48.61        hrrs->set_iter_complete();
   48.62      }
   48.63      return false;
   48.64    }
   48.65 +
   48.66 +  double strong_code_root_scan_time_sec() {
   48.67 +    return _strong_code_root_scan_time_sec;
   48.68 +  }
   48.69 +
   48.70    size_t cards_done() { return _cards_done;}
   48.71    size_t cards_looked_up() { return _cards;}
   48.72  };
   48.73  
   48.74 -void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
   48.75 +void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
   48.76 +                      CodeBlobToOopClosure* code_root_cl,
   48.77 +                      int worker_i) {
   48.78    double rs_time_start = os::elapsedTime();
   48.79    HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
   48.80  
   48.81 -  ScanRSClosure scanRScl(oc, worker_i);
   48.82 +  ScanRSClosure scanRScl(oc, code_root_cl, worker_i);
   48.83  
   48.84    _g1->collection_set_iterate_from(startRegion, &scanRScl);
   48.85    scanRScl.set_try_claimed();
   48.86    _g1->collection_set_iterate_from(startRegion, &scanRScl);
   48.87  
   48.88 -  double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
   48.89 +  double scan_rs_time_sec = (os::elapsedTime() - rs_time_start)
   48.90 +                            - scanRScl.strong_code_root_scan_time_sec();
   48.91  
   48.92 -  assert( _cards_scanned != NULL, "invariant" );
   48.93 +  assert(_cards_scanned != NULL, "invariant");
   48.94    _cards_scanned[worker_i] = scanRScl.cards_done();
   48.95  
   48.96    _g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
   48.97 +  _g1p->phase_times()->record_strong_code_root_scan_time(worker_i,
   48.98 +                                                         scanRScl.strong_code_root_scan_time_sec() * 1000.0);
   48.99  }
  48.100  
  48.101  // Closure used for updating RSets and recording references that
  48.102 @@ -288,7 +318,8 @@
  48.103  }
  48.104  
  48.105  void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
  48.106 -                                             int worker_i) {
  48.107 +                                           CodeBlobToOopClosure* code_root_cl,
  48.108 +                                           int worker_i) {
  48.109  #if CARD_REPEAT_HISTO
  48.110    ct_freq_update_histo_and_reset();
  48.111  #endif
  48.112 @@ -328,7 +359,7 @@
  48.113      _g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
  48.114    }
  48.115    if (G1UseParallelRSetScanning || (worker_i == 0)) {
  48.116 -    scanRS(oc, worker_i);
  48.117 +    scanRS(oc, code_root_cl, worker_i);
  48.118    } else {
  48.119      _g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
  48.120    }
    49.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Fri Aug 23 22:12:18 2013 +0100
    49.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Fri Aug 30 09:50:49 2013 +0100
    49.3 @@ -81,14 +81,23 @@
    49.4    G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
    49.5    ~G1RemSet();
    49.6  
    49.7 -  // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
    49.8 -  // outside the CS (having invoked "blk->set_region" to set the "from"
    49.9 -  // region correctly beforehand.) The "worker_i" param is for the
   49.10 -  // parallel case where the number of the worker thread calling this
   49.11 -  // function can be helpful in partitioning the work to be done. It
   49.12 -  // should be the same as the "i" passed to the calling thread's
   49.13 -  // work(i) function. In the sequential case this param will be ingored.
   49.14 -  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i);
   49.15 +  // Invoke "blk->do_oop" on all pointers into the collection set
   49.16 +  // from objects in regions outside the collection set (having
   49.17 +  // invoked "blk->set_region" to set the "from" region correctly
   49.18 +  // beforehand.)
   49.19 +  //
   49.20 +  // Invoke code_root_cl->do_code_blob on the unmarked nmethods
   49.21 +  // on the strong code roots list for each region in the
   49.22 +  // collection set.
   49.23 +  //
   49.24 +  // The "worker_i" param is for the parallel case where the id
   49.25 +  // of the worker thread calling this function can be helpful in
   49.26 +  // partitioning the work to be done. It should be the same as
   49.27 +  // the "i" passed to the calling thread's work(i) function.
   49.28 +  // In the sequential case this param will be ignored.
   49.29 +  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
   49.30 +                                   CodeBlobToOopClosure* code_root_cl,
   49.31 +                                   int worker_i);
   49.32  
   49.33    // Prepare for and cleanup after an oops_into_collection_set_do
   49.34    // call.  Must call each of these once before and after (in sequential
   49.35 @@ -98,7 +107,10 @@
   49.36    void prepare_for_oops_into_collection_set_do();
   49.37    void cleanup_after_oops_into_collection_set_do();
   49.38  
   49.39 -  void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
   49.40 +  void scanRS(OopsInHeapRegionClosure* oc,
   49.41 +              CodeBlobToOopClosure* code_root_cl,
   49.42 +              int worker_i);
   49.43 +
   49.44    void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
   49.45  
   49.46    CardTableModRefBS* ct_bs() { return _ct_bs; }
    50.1 --- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Fri Aug 23 22:12:18 2013 +0100
    50.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Fri Aug 30 09:50:49 2013 +0100
    50.3 @@ -127,32 +127,55 @@
    50.4  
    50.5  class HRRSStatsIter: public HeapRegionClosure {
    50.6    size_t _occupied;
    50.7 -  size_t _total_mem_sz;
    50.8 -  size_t _max_mem_sz;
    50.9 -  HeapRegion* _max_mem_sz_region;
   50.10 +
   50.11 +  size_t _total_rs_mem_sz;
   50.12 +  size_t _max_rs_mem_sz;
   50.13 +  HeapRegion* _max_rs_mem_sz_region;
   50.14 +
   50.15 +  size_t _total_code_root_mem_sz;
   50.16 +  size_t _max_code_root_mem_sz;
   50.17 +  HeapRegion* _max_code_root_mem_sz_region;
   50.18  public:
   50.19    HRRSStatsIter() :
   50.20      _occupied(0),
   50.21 -    _total_mem_sz(0),
   50.22 -    _max_mem_sz(0),
   50.23 -    _max_mem_sz_region(NULL)
   50.24 +    _total_rs_mem_sz(0),
   50.25 +    _max_rs_mem_sz(0),
   50.26 +    _max_rs_mem_sz_region(NULL),
   50.27 +    _total_code_root_mem_sz(0),
   50.28 +    _max_code_root_mem_sz(0),
   50.29 +    _max_code_root_mem_sz_region(NULL)
   50.30    {}
   50.31  
   50.32    bool doHeapRegion(HeapRegion* r) {
   50.33 -    size_t mem_sz = r->rem_set()->mem_size();
   50.34 -    if (mem_sz > _max_mem_sz) {
   50.35 -      _max_mem_sz = mem_sz;
   50.36 -      _max_mem_sz_region = r;
   50.37 +    HeapRegionRemSet* hrrs = r->rem_set();
   50.38 +
   50.39 +    // HeapRegionRemSet::mem_size() includes the
   50.40 +    // size of the strong code roots
   50.41 +    size_t rs_mem_sz = hrrs->mem_size();
   50.42 +    if (rs_mem_sz > _max_rs_mem_sz) {
   50.43 +      _max_rs_mem_sz = rs_mem_sz;
   50.44 +      _max_rs_mem_sz_region = r;
   50.45      }
   50.46 -    _total_mem_sz += mem_sz;
   50.47 -    size_t occ = r->rem_set()->occupied();
   50.48 +    _total_rs_mem_sz += rs_mem_sz;
   50.49 +
   50.50 +    size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
   50.51 +    if (code_root_mem_sz > _max_code_root_mem_sz) {
   50.52 +      _max_code_root_mem_sz = code_root_mem_sz;
   50.53 +      _max_code_root_mem_sz_region = r;
   50.54 +    }
   50.55 +    _total_code_root_mem_sz += code_root_mem_sz;
   50.56 +
   50.57 +    size_t occ = hrrs->occupied();
   50.58      _occupied += occ;
   50.59      return false;
   50.60    }
   50.61 -  size_t total_mem_sz() { return _total_mem_sz; }
   50.62 -  size_t max_mem_sz() { return _max_mem_sz; }
   50.63 +  size_t total_rs_mem_sz() { return _total_rs_mem_sz; }
   50.64 +  size_t max_rs_mem_sz() { return _max_rs_mem_sz; }
   50.65 +  HeapRegion* max_rs_mem_sz_region() { return _max_rs_mem_sz_region; }
   50.66 +  size_t total_code_root_mem_sz() { return _total_code_root_mem_sz; }
   50.67 +  size_t max_code_root_mem_sz() { return _max_code_root_mem_sz; }
   50.68 +  HeapRegion* max_code_root_mem_sz_region() { return _max_code_root_mem_sz_region; }
   50.69    size_t occupied() { return _occupied; }
   50.70 -  HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
   50.71  };
   50.72  
   50.73  double calc_percentage(size_t numerator, size_t denominator) {
   50.74 @@ -184,22 +207,33 @@
   50.75  
   50.76    HRRSStatsIter blk;
   50.77    G1CollectedHeap::heap()->heap_region_iterate(&blk);
   50.78 +  // RemSet stats
   50.79    out->print_cr("  Total heap region rem set sizes = "SIZE_FORMAT"K."
   50.80                  "  Max = "SIZE_FORMAT"K.",
   50.81 -                blk.total_mem_sz()/K, blk.max_mem_sz()/K);
   50.82 +                blk.total_rs_mem_sz()/K, blk.max_rs_mem_sz()/K);
   50.83    out->print_cr("  Static structures = "SIZE_FORMAT"K,"
   50.84                  " free_lists = "SIZE_FORMAT"K.",
   50.85                  HeapRegionRemSet::static_mem_size() / K,
   50.86                  HeapRegionRemSet::fl_mem_size() / K);
   50.87    out->print_cr("    "SIZE_FORMAT" occupied cards represented.",
   50.88                  blk.occupied());
   50.89 -  HeapRegion* max_mem_sz_region = blk.max_mem_sz_region();
   50.90 -  HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set();
   50.91 +  HeapRegion* max_rs_mem_sz_region = blk.max_rs_mem_sz_region();
   50.92 +  HeapRegionRemSet* max_rs_rem_set = max_rs_mem_sz_region->rem_set();
   50.93    out->print_cr("    Max size region = "HR_FORMAT", "
   50.94                  "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
   50.95 -                HR_FORMAT_PARAMS(max_mem_sz_region),
   50.96 -                (rem_set->mem_size() + K - 1)/K,
   50.97 -                (rem_set->occupied() + K - 1)/K);
   50.98 -
   50.99 +                HR_FORMAT_PARAMS(max_rs_mem_sz_region),
  50.100 +                (max_rs_rem_set->mem_size() + K - 1)/K,
  50.101 +                (max_rs_rem_set->occupied() + K - 1)/K);
  50.102    out->print_cr("    Did %d coarsenings.", num_coarsenings());
  50.103 +  // Strong code root stats
  50.104 +  out->print_cr("  Total heap region code-root set sizes = "SIZE_FORMAT"K."
  50.105 +                "  Max = "SIZE_FORMAT"K.",
  50.106 +                blk.total_code_root_mem_sz()/K, blk.max_code_root_mem_sz()/K);
  50.107 +  HeapRegion* max_code_root_mem_sz_region = blk.max_code_root_mem_sz_region();
  50.108 +  HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region->rem_set();
  50.109 +  out->print_cr("    Max size region = "HR_FORMAT", "
  50.110 +                "size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
  50.111 +                HR_FORMAT_PARAMS(max_code_root_mem_sz_region),
  50.112 +                (max_code_root_rem_set->strong_code_roots_mem_size() + K - 1)/K,
  50.113 +                (max_code_root_rem_set->strong_code_roots_list_length()));
  50.114  }
    51.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Aug 23 22:12:18 2013 +0100
    51.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Aug 30 09:50:49 2013 +0100
    51.3 @@ -319,7 +319,10 @@
    51.4                                                                              \
    51.5    diagnostic(bool, G1VerifyRSetsDuringFullGC, false,                        \
    51.6               "If true, perform verification of each heap region's "         \
    51.7 -             "remembered set when verifying the heap during a full GC.")
    51.8 +             "remembered set when verifying the heap during a full GC.")    \
    51.9 +                                                                            \
   51.10 +  diagnostic(bool, G1VerifyHeapRegionCodeRoots, false,                      \
   51.11 +             "Verify the code root lists attached to each heap region.")
   51.12  
   51.13  G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
   51.14  
    52.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Aug 23 22:12:18 2013 +0100
    52.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Aug 30 09:50:49 2013 +0100
    52.3 @@ -23,6 +23,7 @@
    52.4   */
    52.5  
    52.6  #include "precompiled.hpp"
    52.7 +#include "code/nmethod.hpp"
    52.8  #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
    52.9  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
   52.10  #include "gc_implementation/g1/g1OopClosures.inline.hpp"
   52.11 @@ -50,144 +51,6 @@
   52.12                                                     OopClosure* oc) :
   52.13    _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
   52.14  
   52.15 -class VerifyLiveClosure: public OopClosure {
   52.16 -private:
   52.17 -  G1CollectedHeap* _g1h;
   52.18 -  CardTableModRefBS* _bs;
   52.19 -  oop _containing_obj;
   52.20 -  bool _failures;
   52.21 -  int _n_failures;
   52.22 -  VerifyOption _vo;
   52.23 -public:
   52.24 -  // _vo == UsePrevMarking -> use "prev" marking information,
   52.25 -  // _vo == UseNextMarking -> use "next" marking information,
   52.26 -  // _vo == UseMarkWord    -> use mark word from object header.
   52.27 -  VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
   52.28 -    _g1h(g1h), _bs(NULL), _containing_obj(NULL),
   52.29 -    _failures(false), _n_failures(0), _vo(vo)
   52.30 -  {
   52.31 -    BarrierSet* bs = _g1h->barrier_set();
   52.32 -    if (bs->is_a(BarrierSet::CardTableModRef))
   52.33 -      _bs = (CardTableModRefBS*)bs;
   52.34 -  }
   52.35 -
   52.36 -  void set_containing_obj(oop obj) {
   52.37 -    _containing_obj = obj;
   52.38 -  }
   52.39 -
   52.40 -  bool failures() { return _failures; }
   52.41 -  int n_failures() { return _n_failures; }
   52.42 -
   52.43 -  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   52.44 -  virtual void do_oop(      oop* p) { do_oop_work(p); }
   52.45 -
   52.46 -  void print_object(outputStream* out, oop obj) {
   52.47 -#ifdef PRODUCT
   52.48 -    Klass* k = obj->klass();
   52.49 -    const char* class_name = InstanceKlass::cast(k)->external_name();
   52.50 -    out->print_cr("class name %s", class_name);
   52.51 -#else // PRODUCT
   52.52 -    obj->print_on(out);
   52.53 -#endif // PRODUCT
   52.54 -  }
   52.55 -
   52.56 -  template <class T>
   52.57 -  void do_oop_work(T* p) {
   52.58 -    assert(_containing_obj != NULL, "Precondition");
   52.59 -    assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
   52.60 -           "Precondition");
   52.61 -    T heap_oop = oopDesc::load_heap_oop(p);
   52.62 -    if (!oopDesc::is_null(heap_oop)) {
   52.63 -      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
   52.64 -      bool failed = false;
   52.65 -      if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
   52.66 -        MutexLockerEx x(ParGCRareEvent_lock,
   52.67 -                        Mutex::_no_safepoint_check_flag);
   52.68 -
   52.69 -        if (!_failures) {
   52.70 -          gclog_or_tty->print_cr("");
   52.71 -          gclog_or_tty->print_cr("----------");
   52.72 -        }
   52.73 -        if (!_g1h->is_in_closed_subset(obj)) {
   52.74 -          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   52.75 -          gclog_or_tty->print_cr("Field "PTR_FORMAT
   52.76 -                                 " of live obj "PTR_FORMAT" in region "
   52.77 -                                 "["PTR_FORMAT", "PTR_FORMAT")",
   52.78 -                                 p, (void*) _containing_obj,
   52.79 -                                 from->bottom(), from->end());
   52.80 -          print_object(gclog_or_tty, _containing_obj);
   52.81 -          gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
   52.82 -                                 (void*) obj);
   52.83 -        } else {
   52.84 -          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   52.85 -          HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
   52.86 -          gclog_or_tty->print_cr("Field "PTR_FORMAT
   52.87 -                                 " of live obj "PTR_FORMAT" in region "
   52.88 -                                 "["PTR_FORMAT", "PTR_FORMAT")",
   52.89 -                                 p, (void*) _containing_obj,
   52.90 -                                 from->bottom(), from->end());
   52.91 -          print_object(gclog_or_tty, _containing_obj);
   52.92 -          gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
   52.93 -                                 "["PTR_FORMAT", "PTR_FORMAT")",
   52.94 -                                 (void*) obj, to->bottom(), to->end());
   52.95 -          print_object(gclog_or_tty, obj);
   52.96 -        }
   52.97 -        gclog_or_tty->print_cr("----------");
   52.98 -        gclog_or_tty->flush();
   52.99 -        _failures = true;
  52.100 -        failed = true;
  52.101 -        _n_failures++;
  52.102 -      }
  52.103 -
  52.104 -      if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
  52.105 -        HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
  52.106 -        HeapRegion* to   = _g1h->heap_region_containing(obj);
  52.107 -        if (from != NULL && to != NULL &&
  52.108 -            from != to &&
  52.109 -            !to->isHumongous()) {
  52.110 -          jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
  52.111 -          jbyte cv_field = *_bs->byte_for_const(p);
  52.112 -          const jbyte dirty = CardTableModRefBS::dirty_card_val();
  52.113 -
  52.114 -          bool is_bad = !(from->is_young()
  52.115 -                          || to->rem_set()->contains_reference(p)
  52.116 -                          || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
  52.117 -                              (_containing_obj->is_objArray() ?
  52.118 -                                  cv_field == dirty
  52.119 -                               : cv_obj == dirty || cv_field == dirty));
  52.120 -          if (is_bad) {
  52.121 -            MutexLockerEx x(ParGCRareEvent_lock,
  52.122 -                            Mutex::_no_safepoint_check_flag);
  52.123 -
  52.124 -            if (!_failures) {
  52.125 -              gclog_or_tty->print_cr("");
  52.126 -              gclog_or_tty->print_cr("----------");
  52.127 -            }
  52.128 -            gclog_or_tty->print_cr("Missing rem set entry:");
  52.129 -            gclog_or_tty->print_cr("Field "PTR_FORMAT" "
  52.130 -                                   "of obj "PTR_FORMAT", "
  52.131 -                                   "in region "HR_FORMAT,
  52.132 -                                   p, (void*) _containing_obj,
  52.133 -                                   HR_FORMAT_PARAMS(from));
  52.134 -            _containing_obj->print_on(gclog_or_tty);
  52.135 -            gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
  52.136 -                                   "in region "HR_FORMAT,
  52.137 -                                   (void*) obj,
  52.138 -                                   HR_FORMAT_PARAMS(to));
  52.139 -            obj->print_on(gclog_or_tty);
  52.140 -            gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
  52.141 -                          cv_obj, cv_field);
  52.142 -            gclog_or_tty->print_cr("----------");
  52.143 -            gclog_or_tty->flush();
  52.144 -            _failures = true;
  52.145 -            if (!failed) _n_failures++;
  52.146 -          }
  52.147 -        }
  52.148 -      }
  52.149 -    }
  52.150 -  }
  52.151 -};
  52.152 -
  52.153  template<class ClosureType>
  52.154  HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
  52.155                                 HeapRegion* hr,
  52.156 @@ -368,7 +231,7 @@
  52.157    if (!par) {
  52.158      // If this is parallel, this will be done later.
  52.159      HeapRegionRemSet* hrrs = rem_set();
  52.160 -    if (hrrs != NULL) hrrs->clear();
  52.161 +    hrrs->clear();
  52.162      _claimed = InitialClaimValue;
  52.163    }
  52.164    zero_marked_bytes();
  52.165 @@ -505,6 +368,7 @@
  52.166      _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
  52.167      _predicted_bytes_to_copy(0)
  52.168  {
  52.169 +  _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
  52.170    _orig_end = mr.end();
  52.171    // Note that initialize() will set the start of the unmarked area of the
  52.172    // region.
  52.173 @@ -512,8 +376,6 @@
  52.174    set_top(bottom());
  52.175    set_saved_mark();
  52.176  
  52.177 -  _rem_set =  new HeapRegionRemSet(sharedOffsetArray, this);
  52.178 -
  52.179    assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
  52.180  }
  52.181  
  52.182 @@ -733,6 +595,160 @@
  52.183    return NULL;
  52.184  }
  52.185  
  52.186 +// Code roots support
  52.187 +
  52.188 +void HeapRegion::add_strong_code_root(nmethod* nm) {
  52.189 +  HeapRegionRemSet* hrrs = rem_set();
  52.190 +  hrrs->add_strong_code_root(nm);
  52.191 +}
  52.192 +
  52.193 +void HeapRegion::remove_strong_code_root(nmethod* nm) {
  52.194 +  HeapRegionRemSet* hrrs = rem_set();
  52.195 +  hrrs->remove_strong_code_root(nm);
  52.196 +}
  52.197 +
  52.198 +void HeapRegion::migrate_strong_code_roots() {
  52.199 +  assert(in_collection_set(), "only collection set regions");
  52.200 +  assert(!isHumongous(), "not humongous regions");
  52.201 +
  52.202 +  HeapRegionRemSet* hrrs = rem_set();
  52.203 +  hrrs->migrate_strong_code_roots();
  52.204 +}
  52.205 +
  52.206 +void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
  52.207 +  HeapRegionRemSet* hrrs = rem_set();
  52.208 +  hrrs->strong_code_roots_do(blk);
  52.209 +}
  52.210 +
  52.211 +class VerifyStrongCodeRootOopClosure: public OopClosure {
  52.212 +  const HeapRegion* _hr;
  52.213 +  nmethod* _nm;
  52.214 +  bool _failures;
  52.215 +  bool _has_oops_in_region;
  52.216 +
  52.217 +  template <class T> void do_oop_work(T* p) {
  52.218 +    T heap_oop = oopDesc::load_heap_oop(p);
  52.219 +    if (!oopDesc::is_null(heap_oop)) {
  52.220 +      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  52.221 +
  52.222 +      // Note: not all the oops embedded in the nmethod are in the
  52.223 +      // current region. We only look at those which are.
  52.224 +      if (_hr->is_in(obj)) {
  52.225 +        // Object is in the region. Check that its less than top
  52.226 +        if (_hr->top() <= (HeapWord*)obj) {
  52.227 +          // Object is above top
  52.228 +          gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
  52.229 +                                 "["PTR_FORMAT", "PTR_FORMAT") is above "
  52.230 +                                 "top "PTR_FORMAT,
  52.231 +                                 obj, _hr->bottom(), _hr->end(), _hr->top());
  52.232 +          _failures = true;
  52.233 +          return;
  52.234 +        }
  52.235 +        // Nmethod has at least one oop in the current region
  52.236 +        _has_oops_in_region = true;
  52.237 +      }
  52.238 +    }
  52.239 +  }
  52.240 +
  52.241 +public:
  52.242 +  VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
  52.243 +    _hr(hr), _failures(false), _has_oops_in_region(false) {}
  52.244 +
  52.245 +  void do_oop(narrowOop* p) { do_oop_work(p); }
  52.246 +  void do_oop(oop* p)       { do_oop_work(p); }
  52.247 +
  52.248 +  bool failures()           { return _failures; }
  52.249 +  bool has_oops_in_region() { return _has_oops_in_region; }
  52.250 +};
  52.251 +
  52.252 +class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
  52.253 +  const HeapRegion* _hr;
  52.254 +  bool _failures;
  52.255 +public:
  52.256 +  VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
  52.257 +    _hr(hr), _failures(false) {}
  52.258 +
  52.259 +  void do_code_blob(CodeBlob* cb) {
  52.260 +    nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
  52.261 +    if (nm != NULL) {
  52.262 +      // Verify that the nemthod is live
  52.263 +      if (!nm->is_alive()) {
  52.264 +        gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod "
  52.265 +                               PTR_FORMAT" in its strong code roots",
  52.266 +                               _hr->bottom(), _hr->end(), nm);
  52.267 +        _failures = true;
  52.268 +      } else {
  52.269 +        VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
  52.270 +        nm->oops_do(&oop_cl);
  52.271 +        if (!oop_cl.has_oops_in_region()) {
  52.272 +          gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod "
  52.273 +                                 PTR_FORMAT" in its strong code roots "
  52.274 +                                 "with no pointers into region",
  52.275 +                                 _hr->bottom(), _hr->end(), nm);
  52.276 +          _failures = true;
  52.277 +        } else if (oop_cl.failures()) {
  52.278 +          gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other "
  52.279 +                                 "failures for nmethod "PTR_FORMAT,
  52.280 +                                 _hr->bottom(), _hr->end(), nm);
  52.281 +          _failures = true;
  52.282 +        }
  52.283 +      }
  52.284 +    }
  52.285 +  }
  52.286 +
  52.287 +  bool failures()       { return _failures; }
  52.288 +};
  52.289 +
  52.290 +void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
  52.291 +  if (!G1VerifyHeapRegionCodeRoots) {
  52.292 +    // We're not verifying code roots.
  52.293 +    return;
  52.294 +  }
  52.295 +  if (vo == VerifyOption_G1UseMarkWord) {
  52.296 +    // Marking verification during a full GC is performed after class
  52.297 +    // unloading, code cache unloading, etc so the strong code roots
  52.298 +    // attached to each heap region are in an inconsistent state. They won't
  52.299 +    // be consistent until the strong code roots are rebuilt after the
  52.300 +    // actual GC. Skip verifying the strong code roots in this particular
  52.301 +    // time.
  52.302 +    assert(VerifyDuringGC, "only way to get here");
  52.303 +    return;
  52.304 +  }
  52.305 +
  52.306 +  HeapRegionRemSet* hrrs = rem_set();
  52.307 +  int strong_code_roots_length = hrrs->strong_code_roots_list_length();
  52.308 +
  52.309 +  // if this region is empty then there should be no entries
  52.310 +  // on its strong code root list
  52.311 +  if (is_empty()) {
  52.312 +    if (strong_code_roots_length > 0) {
  52.313 +      gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
  52.314 +                             "but has "INT32_FORMAT" code root entries",
  52.315 +                             bottom(), end(), strong_code_roots_length);
  52.316 +      *failures = true;
  52.317 +    }
  52.318 +    return;
  52.319 +  }
  52.320 +
  52.321 +  // An H-region should have an empty strong code root list
  52.322 +  if (isHumongous()) {
  52.323 +    if (strong_code_roots_length > 0) {
  52.324 +      gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
  52.325 +                             "but has "INT32_FORMAT" code root entries",
  52.326 +                             bottom(), end(), strong_code_roots_length);
  52.327 +      *failures = true;
  52.328 +    }
  52.329 +    return;
  52.330 +  }
  52.331 +
  52.332 +  VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
  52.333 +  strong_code_roots_do(&cb_cl);
  52.334 +
  52.335 +  if (cb_cl.failures()) {
  52.336 +    *failures = true;
  52.337 +  }
  52.338 +}
  52.339 +
  52.340  void HeapRegion::print() const { print_on(gclog_or_tty); }
  52.341  void HeapRegion::print_on(outputStream* st) const {
  52.342    if (isHumongous()) {
  52.343 @@ -761,10 +777,143 @@
  52.344    G1OffsetTableContigSpace::print_on(st);
  52.345  }
  52.346  
  52.347 -void HeapRegion::verify() const {
  52.348 -  bool dummy = false;
  52.349 -  verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
  52.350 -}
  52.351 +class VerifyLiveClosure: public OopClosure {
  52.352 +private:
  52.353 +  G1CollectedHeap* _g1h;
  52.354 +  CardTableModRefBS* _bs;
  52.355 +  oop _containing_obj;
  52.356 +  bool _failures;
  52.357 +  int _n_failures;
  52.358 +  VerifyOption _vo;
  52.359 +public:
  52.360 +  // _vo == UsePrevMarking -> use "prev" marking information,
  52.361 +  // _vo == UseNextMarking -> use "next" marking information,
  52.362 +  // _vo == UseMarkWord    -> use mark word from object header.
  52.363 +  VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
  52.364 +    _g1h(g1h), _bs(NULL), _containing_obj(NULL),
  52.365 +    _failures(false), _n_failures(0), _vo(vo)
  52.366 +  {
  52.367 +    BarrierSet* bs = _g1h->barrier_set();
  52.368 +    if (bs->is_a(BarrierSet::CardTableModRef))
  52.369 +      _bs = (CardTableModRefBS*)bs;
  52.370 +  }
  52.371 +
  52.372 +  void set_containing_obj(oop obj) {
  52.373 +    _containing_obj = obj;
  52.374 +  }
  52.375 +
  52.376 +  bool failures() { return _failures; }
  52.377 +  int n_failures() { return _n_failures; }
  52.378 +
  52.379 +  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  52.380 +  virtual void do_oop(      oop* p) { do_oop_work(p); }
  52.381 +
  52.382 +  void print_object(outputStream* out, oop obj) {
  52.383 +#ifdef PRODUCT
  52.384 +    Klass* k = obj->klass();
  52.385 +    const char* class_name = InstanceKlass::cast(k)->external_name();
  52.386 +    out->print_cr("class name %s", class_name);
  52.387 +#else // PRODUCT
  52.388 +    obj->print_on(out);
  52.389 +#endif // PRODUCT
  52.390 +  }
  52.391 +
  52.392 +  template <class T>
  52.393 +  void do_oop_work(T* p) {
  52.394 +    assert(_containing_obj != NULL, "Precondition");
  52.395 +    assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
  52.396 +           "Precondition");
  52.397 +    T heap_oop = oopDesc::load_heap_oop(p);
  52.398 +    if (!oopDesc::is_null(heap_oop)) {
  52.399 +      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  52.400 +      bool failed = false;
  52.401 +      if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
  52.402 +        MutexLockerEx x(ParGCRareEvent_lock,
  52.403 +                        Mutex::_no_safepoint_check_flag);
  52.404 +
  52.405 +        if (!_failures) {
  52.406 +          gclog_or_tty->print_cr("");
  52.407 +          gclog_or_tty->print_cr("----------");
  52.408 +        }
  52.409 +        if (!_g1h->is_in_closed_subset(obj)) {
  52.410 +          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
  52.411 +          gclog_or_tty->print_cr("Field "PTR_FORMAT
  52.412 +                                 " of live obj "PTR_FORMAT" in region "
  52.413 +                                 "["PTR_FORMAT", "PTR_FORMAT")",
  52.414 +                                 p, (void*) _containing_obj,
  52.415 +                                 from->bottom(), from->end());
  52.416 +          print_object(gclog_or_tty, _containing_obj);
  52.417 +          gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
  52.418 +                                 (void*) obj);
  52.419 +        } else {
  52.420 +          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
  52.421 +          HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
  52.422 +          gclog_or_tty->print_cr("Field "PTR_FORMAT
  52.423 +                                 " of live obj "PTR_FORMAT" in region "
  52.424 +                                 "["PTR_FORMAT", "PTR_FORMAT")",
  52.425 +                                 p, (void*) _containing_obj,
  52.426 +                                 from->bottom(), from->end());
  52.427 +          print_object(gclog_or_tty, _containing_obj);
  52.428 +          gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
  52.429 +                                 "["PTR_FORMAT", "PTR_FORMAT")",
  52.430 +                                 (void*) obj, to->bottom(), to->end());
  52.431 +          print_object(gclog_or_tty, obj);
  52.432 +        }
  52.433 +        gclog_or_tty->print_cr("----------");
  52.434 +        gclog_or_tty->flush();
  52.435 +        _failures = true;
  52.436 +        failed = true;
  52.437 +        _n_failures++;
  52.438 +      }
  52.439 +
  52.440 +      if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
  52.441 +        HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
  52.442 +        HeapRegion* to   = _g1h->heap_region_containing(obj);
  52.443 +        if (from != NULL && to != NULL &&
  52.444 +            from != to &&
  52.445 +            !to->isHumongous()) {
  52.446 +          jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
  52.447 +          jbyte cv_field = *_bs->byte_for_const(p);
  52.448 +          const jbyte dirty = CardTableModRefBS::dirty_card_val();
  52.449 +
  52.450 +          bool is_bad = !(from->is_young()
  52.451 +                          || to->rem_set()->contains_reference(p)
  52.452 +                          || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
  52.453 +                              (_containing_obj->is_objArray() ?
  52.454 +                                  cv_field == dirty
  52.455 +                               : cv_obj == dirty || cv_field == dirty));
  52.456 +          if (is_bad) {
  52.457 +            MutexLockerEx x(ParGCRareEvent_lock,
  52.458 +                            Mutex::_no_safepoint_check_flag);
  52.459 +
  52.460 +            if (!_failures) {
  52.461 +              gclog_or_tty->print_cr("");
  52.462 +              gclog_or_tty->print_cr("----------");
  52.463 +            }
  52.464 +            gclog_or_tty->print_cr("Missing rem set entry:");
  52.465 +            gclog_or_tty->print_cr("Field "PTR_FORMAT" "
  52.466 +                                   "of obj "PTR_FORMAT", "
  52.467 +                                   "in region "HR_FORMAT,
  52.468 +                                   p, (void*) _containing_obj,
  52.469 +                                   HR_FORMAT_PARAMS(from));
  52.470 +            _containing_obj->print_on(gclog_or_tty);
  52.471 +            gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
  52.472 +                                   "in region "HR_FORMAT,
  52.473 +                                   (void*) obj,
  52.474 +                                   HR_FORMAT_PARAMS(to));
  52.475 +            obj->print_on(gclog_or_tty);
  52.476 +            gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
  52.477 +                          cv_obj, cv_field);
  52.478 +            gclog_or_tty->print_cr("----------");
  52.479 +            gclog_or_tty->flush();
  52.480 +            _failures = true;
  52.481 +            if (!failed) _n_failures++;
  52.482 +          }
  52.483 +        }
  52.484 +      }
  52.485 +    }
  52.486 +  }
  52.487 +};
  52.488  
  52.489  // This really ought to be commoned up into OffsetTableContigSpace somehow.
  52.490  // We would need a mechanism to make that code skip dead objects.
  52.491 @@ -904,6 +1053,13 @@
  52.492      *failures = true;
  52.493      return;
  52.494    }
  52.495 +
  52.496 +  verify_strong_code_roots(vo, failures);
  52.497 +}
  52.498 +
  52.499 +void HeapRegion::verify() const {
  52.500 +  bool dummy = false;
  52.501 +  verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
  52.502  }
  52.503  
  52.504  // G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
    53.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Aug 23 22:12:18 2013 +0100
    53.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Aug 30 09:50:49 2013 +0100
    53.3 @@ -52,6 +52,7 @@
    53.4  class HeapRegionRemSetIterator;
    53.5  class HeapRegion;
    53.6  class HeapRegionSetBase;
    53.7 +class nmethod;
    53.8  
    53.9  #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
   53.10  #define HR_FORMAT_PARAMS(_hr_) \
   53.11 @@ -371,7 +372,8 @@
   53.12      RebuildRSClaimValue        = 5,
   53.13      ParEvacFailureClaimValue   = 6,
   53.14      AggregateCountClaimValue   = 7,
   53.15 -    VerifyCountClaimValue      = 8
   53.16 +    VerifyCountClaimValue      = 8,
   53.17 +    ParMarkRootClaimValue      = 9
   53.18    };
   53.19  
   53.20    inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
   53.21 @@ -796,6 +798,25 @@
   53.22  
   53.23    virtual void reset_after_compaction();
   53.24  
   53.25 +  // Routines for managing a list of code roots (attached to the
   53.26 +  // this region's RSet) that point into this heap region.
   53.27 +  void add_strong_code_root(nmethod* nm);
   53.28 +  void remove_strong_code_root(nmethod* nm);
   53.29 +
   53.30 +  // During a collection, migrate the successfully evacuated
   53.31 +  // strong code roots that referenced into this region to the
   53.32 +  // new regions that they now point into. Unsuccessfully
   53.33 +  // evacuated code roots are not migrated.
   53.34 +  void migrate_strong_code_roots();
   53.35 +
   53.36 +  // Applies blk->do_code_blob() to each of the entries in
   53.37 +  // the strong code roots list for this region
   53.38 +  void strong_code_roots_do(CodeBlobClosure* blk) const;
   53.39 +
   53.40 +  // Verify that the entries on the strong code root list for this
   53.41 +  // region are live and include at least one pointer into this region.
   53.42 +  void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
   53.43 +
   53.44    void print() const;
   53.45    void print_on(outputStream* st) const;
   53.46  
    54.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Aug 23 22:12:18 2013 +0100
    54.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Aug 30 09:50:49 2013 +0100
    54.3 @@ -33,6 +33,7 @@
    54.4  #include "oops/oop.inline.hpp"
    54.5  #include "utilities/bitMap.inline.hpp"
    54.6  #include "utilities/globalDefinitions.hpp"
    54.7 +#include "utilities/growableArray.hpp"
    54.8  
    54.9  class PerRegionTable: public CHeapObj<mtGC> {
   54.10    friend class OtherRegionsTable;
   54.11 @@ -849,7 +850,7 @@
   54.12  
   54.13  HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
   54.14                                     HeapRegion* hr)
   54.15 -  : _bosa(bosa), _other_regions(hr) {
   54.16 +  : _bosa(bosa), _strong_code_roots_list(NULL), _other_regions(hr) {
   54.17    reset_for_par_iteration();
   54.18  }
   54.19  
   54.20 @@ -908,6 +909,12 @@
   54.21  }
   54.22  
   54.23  void HeapRegionRemSet::clear() {
   54.24 +  if (_strong_code_roots_list != NULL) {
   54.25 +    delete _strong_code_roots_list;
   54.26 +  }
   54.27 +  _strong_code_roots_list = new (ResourceObj::C_HEAP, mtGC)
   54.28 +                                GrowableArray<nmethod*>(10, 0, NULL, true);
   54.29 +
   54.30    _other_regions.clear();
   54.31    assert(occupied() == 0, "Should be clear.");
   54.32    reset_for_par_iteration();
   54.33 @@ -925,6 +932,121 @@
   54.34    _other_regions.scrub(ctbs, region_bm, card_bm);
   54.35  }
   54.36  
   54.37 +
   54.38 +// Code roots support
   54.39 +
   54.40 +void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
   54.41 +  assert(nm != NULL, "sanity");
   54.42 +  // Search for the code blob from the RHS to avoid
   54.43 +  // duplicate entries as much as possible
   54.44 +  if (_strong_code_roots_list->find_from_end(nm) < 0) {
   54.45 +    // Code blob isn't already in the list
   54.46 +    _strong_code_roots_list->push(nm);
   54.47 +  }
   54.48 +}
   54.49 +
   54.50 +void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
   54.51 +  assert(nm != NULL, "sanity");
   54.52 +  int idx = _strong_code_roots_list->find(nm);
   54.53 +  if (idx >= 0) {
   54.54 +    _strong_code_roots_list->remove_at(idx);
   54.55 +  }
   54.56 +  // Check that there were no duplicates
   54.57 +  guarantee(_strong_code_roots_list->find(nm) < 0, "duplicate entry found");
   54.58 +}
   54.59 +
   54.60 +class NMethodMigrationOopClosure : public OopClosure {
   54.61 +  G1CollectedHeap* _g1h;
   54.62 +  HeapRegion* _from;
   54.63 +  nmethod* _nm;
   54.64 +
   54.65 +  uint _num_self_forwarded;
   54.66 +
   54.67 +  template <class T> void do_oop_work(T* p) {
   54.68 +    T heap_oop = oopDesc::load_heap_oop(p);
   54.69 +    if (!oopDesc::is_null(heap_oop)) {
   54.70 +      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
   54.71 +      if (_from->is_in(obj)) {
   54.72 +        // Reference still points into the source region.
   54.73 +        // Since roots are immediately evacuated this means that
   54.74 +        // we must have self forwarded the object
   54.75 +        assert(obj->is_forwarded(),
   54.76 +               err_msg("code roots should be immediately evacuated. "
   54.77 +                       "Ref: "PTR_FORMAT", "
   54.78 +                       "Obj: "PTR_FORMAT", "
   54.79 +                       "Region: "HR_FORMAT,
   54.80 +                       p, (void*) obj, HR_FORMAT_PARAMS(_from)));
   54.81 +        assert(obj->forwardee() == obj,
   54.82 +               err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
   54.83 +
   54.84 +        // The object has been self forwarded.
   54.85 +        // Note, if we're during an initial mark pause, there is
   54.86 +        // no need to explicitly mark object. It will be marked
   54.87 +        // during the regular evacuation failure handling code.
   54.88 +        _num_self_forwarded++;
   54.89 +      } else {
   54.90 +        // The reference points into a promotion or to-space region
   54.91 +        HeapRegion* to = _g1h->heap_region_containing(obj);
   54.92 +        to->rem_set()->add_strong_code_root(_nm);
   54.93 +      }
   54.94 +    }
   54.95 +  }
   54.96 +
   54.97 +public:
   54.98 +  NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
   54.99 +    _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
  54.100 +
  54.101 +  void do_oop(narrowOop* p) { do_oop_work(p); }
  54.102 +  void do_oop(oop* p)       { do_oop_work(p); }
  54.103 +
  54.104 +  uint retain() { return _num_self_forwarded > 0; }
  54.105 +};
  54.106 +
  54.107 +void HeapRegionRemSet::migrate_strong_code_roots() {
  54.108 +  assert(hr()->in_collection_set(), "only collection set regions");
  54.109 +  assert(!hr()->isHumongous(), "not humongous regions");
  54.110 +
  54.111 +  ResourceMark rm;
  54.112 +
  54.113 +  // List of code blobs to retain for this region
  54.114 +  GrowableArray<nmethod*> to_be_retained(10);
  54.115 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  54.116 +
  54.117 +  while (_strong_code_roots_list->is_nonempty()) {
  54.118 +    nmethod *nm = _strong_code_roots_list->pop();
  54.119 +    if (nm != NULL) {
  54.120 +      NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
  54.121 +      nm->oops_do(&oop_cl);
  54.122 +      if (oop_cl.retain()) {
  54.123 +        to_be_retained.push(nm);
  54.124 +      }
  54.125 +    }
  54.126 +  }
  54.127 +
  54.128 +  // Now push any code roots we need to retain
  54.129 +  assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
  54.130 +         "Retained nmethod list must be empty or "
  54.131 +         "evacuation of this region failed");
  54.132 +
  54.133 +  while (to_be_retained.is_nonempty()) {
  54.134 +    nmethod* nm = to_be_retained.pop();
  54.135 +    assert(nm != NULL, "sanity");
  54.136 +    add_strong_code_root(nm);
  54.137 +  }
  54.138 +}
  54.139 +
  54.140 +void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
  54.141 +  for (int i = 0; i < _strong_code_roots_list->length(); i += 1) {
  54.142 +    nmethod* nm = _strong_code_roots_list->at(i);
  54.143 +    blk->do_code_blob(nm);
  54.144 +  }
  54.145 +}
  54.146 +
  54.147 +size_t HeapRegionRemSet::strong_code_roots_mem_size() {
  54.148 +  return sizeof(GrowableArray<nmethod*>) +
  54.149 +         _strong_code_roots_list->max_length() * sizeof(nmethod*);
  54.150 +}
  54.151 +
  54.152  //-------------------- Iteration --------------------
  54.153  
  54.154  HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
    55.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Aug 23 22:12:18 2013 +0100
    55.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Aug 30 09:50:49 2013 +0100
    55.3 @@ -37,6 +37,7 @@
    55.4  class HeapRegionRemSetIterator;
    55.5  class PerRegionTable;
    55.6  class SparsePRT;
    55.7 +class nmethod;
    55.8  
    55.9  // Essentially a wrapper around SparsePRTCleanupTask. See
   55.10  // sparsePRT.hpp for more details.
   55.11 @@ -191,6 +192,10 @@
   55.12    G1BlockOffsetSharedArray* _bosa;
   55.13    G1BlockOffsetSharedArray* bosa() const { return _bosa; }
   55.14  
   55.15 +  // A list of code blobs (nmethods) whose code contains pointers into
   55.16 +  // the region that owns this RSet.
   55.17 +  GrowableArray<nmethod*>* _strong_code_roots_list;
   55.18 +
   55.19    OtherRegionsTable _other_regions;
   55.20  
   55.21    enum ParIterState { Unclaimed, Claimed, Complete };
   55.22 @@ -282,11 +287,13 @@
   55.23    }
   55.24  
   55.25    // The actual # of bytes this hr_remset takes up.
   55.26 +  // Note also includes the strong code root set.
   55.27    size_t mem_size() {
   55.28      return _other_regions.mem_size()
   55.29        // This correction is necessary because the above includes the second
   55.30        // part.
   55.31 -      + sizeof(this) - sizeof(OtherRegionsTable);
   55.32 +      + (sizeof(this) - sizeof(OtherRegionsTable))
   55.33 +      + strong_code_roots_mem_size();
   55.34    }
   55.35  
   55.36    // Returns the memory occupancy of all static data structures associated
   55.37 @@ -304,6 +311,37 @@
   55.38    bool contains_reference(OopOrNarrowOopStar from) const {
   55.39      return _other_regions.contains_reference(from);
   55.40    }
   55.41 +
   55.42 +  // Routines for managing the list of code roots that point into
   55.43 +  // the heap region that owns this RSet.
   55.44 +  void add_strong_code_root(nmethod* nm);
   55.45 +  void remove_strong_code_root(nmethod* nm);
   55.46 +
   55.47 +  // During a collection, migrate the successfully evacuated strong
   55.48 +  // code roots that referenced into the region that owns this RSet
   55.49 +  // to the RSets of the new regions that they now point into.
   55.50 +  // Unsuccessfully evacuated code roots are not migrated.
   55.51 +  void migrate_strong_code_roots();
   55.52 +
   55.53 +  // Applies blk->do_code_blob() to each of the entries in
   55.54 +  // the strong code roots list
   55.55 +  void strong_code_roots_do(CodeBlobClosure* blk) const;
   55.56 +
   55.57 +  // Returns the number of elements in the strong code roots list
   55.58 +  int strong_code_roots_list_length() {
   55.59 +    return _strong_code_roots_list->length();
   55.60 +  }
   55.61 +
   55.62 +  // Returns true if the strong code roots contains the given
   55.63 +  // nmethod.
   55.64 +  bool strong_code_roots_list_contains(nmethod* nm) {
   55.65 +    return _strong_code_roots_list->contains(nm);
   55.66 +  }
   55.67 +
   55.68 +  // Returns the amount of memory, in bytes, currently
   55.69 +  // consumed by the strong code roots.
   55.70 +  size_t strong_code_roots_mem_size();
   55.71 +
   55.72    void print() const;
   55.73  
   55.74    // Called during a stop-world phase to perform any deferred cleanups.
    56.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Aug 23 22:12:18 2013 +0100
    56.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Aug 30 09:50:49 2013 +0100
    56.3 @@ -216,6 +216,7 @@
    56.4    young_gen()->update_counters();
    56.5    old_gen()->update_counters();
    56.6    MetaspaceCounters::update_performance_counters();
    56.7 +  CompressedClassSpaceCounters::update_performance_counters();
    56.8  }
    56.9  
   56.10  size_t ParallelScavengeHeap::capacity() const {
    57.1 --- a/src/share/vm/gc_interface/collectedHeap.cpp	Fri Aug 23 22:12:18 2013 +0100
    57.2 +++ b/src/share/vm/gc_interface/collectedHeap.cpp	Fri Aug 30 09:50:49 2013 +0100
    57.3 @@ -118,6 +118,14 @@
    57.4    }
    57.5  }
    57.6  
    57.7 +void CollectedHeap::register_nmethod(nmethod* nm) {
    57.8 +  assert_locked_or_safepoint(CodeCache_lock);
    57.9 +}
   57.10 +
   57.11 +void CollectedHeap::unregister_nmethod(nmethod* nm) {
   57.12 +  assert_locked_or_safepoint(CodeCache_lock);
   57.13 +}
   57.14 +
   57.15  void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
   57.16    const GCHeapSummary& heap_summary = create_heap_summary();
   57.17    const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
    58.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp	Fri Aug 23 22:12:18 2013 +0100
    58.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp	Fri Aug 30 09:50:49 2013 +0100
    58.3 @@ -49,6 +49,7 @@
    58.4  class Thread;
    58.5  class ThreadClosure;
    58.6  class VirtualSpaceSummary;
    58.7 +class nmethod;
    58.8  
    58.9  class GCMessage : public FormatBuffer<1024> {
   58.10   public:
   58.11 @@ -603,6 +604,11 @@
   58.12    void print_heap_before_gc();
   58.13    void print_heap_after_gc();
   58.14  
   58.15 +  // Registering and unregistering an nmethod (compiled code) with the heap.
   58.16 +  // Override with specific mechanism for each specialized heap type.
   58.17 +  virtual void register_nmethod(nmethod* nm);
   58.18 +  virtual void unregister_nmethod(nmethod* nm);
   58.19 +
   58.20    void trace_heap_before_gc(GCTracer* gc_tracer);
   58.21    void trace_heap_after_gc(GCTracer* gc_tracer);
   58.22  
    59.1 --- a/src/share/vm/memory/filemap.cpp	Fri Aug 23 22:12:18 2013 +0100
    59.2 +++ b/src/share/vm/memory/filemap.cpp	Fri Aug 30 09:50:49 2013 +0100
    59.3 @@ -362,15 +362,12 @@
    59.4  ReservedSpace FileMapInfo::reserve_shared_memory() {
    59.5    struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
    59.6    char* requested_addr = si->_base;
    59.7 -  size_t alignment = os::vm_allocation_granularity();
    59.8  
    59.9 -  size_t size = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
   59.10 -                              SharedMiscDataSize + SharedMiscCodeSize,
   59.11 -                              alignment);
   59.12 +  size_t size = FileMapInfo::shared_spaces_size();
   59.13  
   59.14    // Reserve the space first, then map otherwise map will go right over some
   59.15    // other reserved memory (like the code cache).
   59.16 -  ReservedSpace rs(size, alignment, false, requested_addr);
   59.17 +  ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
   59.18    if (!rs.is_reserved()) {
   59.19      fail_continue(err_msg("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr));
   59.20      return rs;
   59.21 @@ -559,3 +556,19 @@
   59.22                          si->_base, si->_base + si->_used);
   59.23    }
   59.24  }
   59.25 +
   59.26 +// Unmap mapped regions of shared space.
   59.27 +void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
   59.28 +  FileMapInfo *map_info = FileMapInfo::current_info();
   59.29 +  if (map_info) {
   59.30 +    map_info->fail_continue(msg);
   59.31 +    for (int i = 0; i < MetaspaceShared::n_regions; i++) {
   59.32 +      if (map_info->_header._space[i]._base != NULL) {
   59.33 +        map_info->unmap_region(i);
   59.34 +        map_info->_header._space[i]._base = NULL;
   59.35 +      }
   59.36 +    }
   59.37 +  } else if (DumpSharedSpaces) {
   59.38 +    fail_stop(msg, NULL);
   59.39 +  }
   59.40 +}
    60.1 --- a/src/share/vm/memory/filemap.hpp	Fri Aug 23 22:12:18 2013 +0100
    60.2 +++ b/src/share/vm/memory/filemap.hpp	Fri Aug 30 09:50:49 2013 +0100
    60.3 @@ -1,5 +1,5 @@
    60.4  /*
    60.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    60.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    60.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    60.8   *
    60.9   * This code is free software; you can redistribute it and/or modify it
   60.10 @@ -150,6 +150,15 @@
   60.11    // Return true if given address is in the mapped shared space.
   60.12    bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
   60.13    void print_shared_spaces() NOT_CDS_RETURN;
   60.14 +
   60.15 +  static size_t shared_spaces_size() {
   60.16 +    return align_size_up(SharedReadOnlySize + SharedReadWriteSize +
   60.17 +                         SharedMiscDataSize + SharedMiscCodeSize,
   60.18 +                         os::vm_allocation_granularity());
   60.19 +  }
   60.20 +
   60.21 +  // Stop CDS sharing and unmap CDS regions.
   60.22 +  static void stop_sharing_and_unmap(const char* msg);
   60.23  };
   60.24  
   60.25  #endif // SHARE_VM_MEMORY_FILEMAP_HPP
    61.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Fri Aug 23 22:12:18 2013 +0100
    61.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Fri Aug 30 09:50:49 2013 +0100
    61.3 @@ -1211,6 +1211,7 @@
    61.4    }
    61.5  
    61.6    MetaspaceCounters::update_performance_counters();
    61.7 +  CompressedClassSpaceCounters::update_performance_counters();
    61.8  
    61.9    always_do_update_barrier = UseConcMarkSweepGC;
   61.10  };
    62.1 --- a/src/share/vm/memory/heap.cpp	Fri Aug 23 22:12:18 2013 +0100
    62.2 +++ b/src/share/vm/memory/heap.cpp	Fri Aug 30 09:50:49 2013 +0100
    62.3 @@ -1,5 +1,5 @@
    62.4  /*
    62.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    62.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    62.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    62.8   *
    62.9   * This code is free software; you can redistribute it and/or modify it
   62.10 @@ -118,9 +118,12 @@
   62.11    _number_of_committed_segments = size_to_segments(_memory.committed_size());
   62.12    _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
   62.13    assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
   62.14 +  const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
   62.15 +  const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
   62.16 +  const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
   62.17  
   62.18    // reserve space for _segmap
   62.19 -  if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) {
   62.20 +  if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
   62.21      return false;
   62.22    }
   62.23  
    63.1 --- a/src/share/vm/memory/iterator.cpp	Fri Aug 23 22:12:18 2013 +0100
    63.2 +++ b/src/share/vm/memory/iterator.cpp	Fri Aug 30 09:50:49 2013 +0100
    63.3 @@ -64,7 +64,7 @@
    63.4  }
    63.5  
    63.6  void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
    63.7 -  nm->oops_do(_cl, /*do_strong_roots_only=*/ true);
    63.8 +  nm->oops_do(_cl, /*allow_zombie=*/ false);
    63.9  }
   63.10  
   63.11  void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
    64.1 --- a/src/share/vm/memory/metaspace.cpp	Fri Aug 23 22:12:18 2013 +0100
    64.2 +++ b/src/share/vm/memory/metaspace.cpp	Fri Aug 30 09:50:49 2013 +0100
    64.3 @@ -35,6 +35,7 @@
    64.4  #include "memory/resourceArea.hpp"
    64.5  #include "memory/universe.hpp"
    64.6  #include "runtime/globals.hpp"
    64.7 +#include "runtime/java.hpp"
    64.8  #include "runtime/mutex.hpp"
    64.9  #include "runtime/orderAccess.hpp"
   64.10  #include "services/memTracker.hpp"
   64.11 @@ -54,6 +55,8 @@
   64.12  
   64.13  MetaWord* last_allocated = 0;
   64.14  
   64.15 +size_t Metaspace::_class_metaspace_size;
   64.16 +
   64.17  // Used in declarations in SpaceManager and ChunkManager
   64.18  enum ChunkIndex {
   64.19    ZeroIndex = 0,
   64.20 @@ -261,10 +264,6 @@
   64.21    // count of chunks contained in this VirtualSpace
   64.22    uintx _container_count;
   64.23  
   64.24 -  // Convenience functions for logical bottom and end
   64.25 -  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   64.26 -  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   64.27 -
   64.28    // Convenience functions to access the _virtual_space
   64.29    char* low()  const { return virtual_space()->low(); }
   64.30    char* high() const { return virtual_space()->high(); }
   64.31 @@ -284,6 +283,10 @@
   64.32    VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
   64.33    ~VirtualSpaceNode();
   64.34  
   64.35 +  // Convenience functions for logical bottom and end
   64.36 +  MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   64.37 +  MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   64.38 +
   64.39    // address of next available space in _virtual_space;
   64.40    // Accessors
   64.41    VirtualSpaceNode* next() { return _next; }
   64.42 @@ -1313,7 +1316,8 @@
   64.43  
   64.44    // Class virtual space should always be expanded.  Call GC for the other
   64.45    // metadata virtual space.
   64.46 -  if (vsl == Metaspace::class_space_list()) return true;
   64.47 +  if (Metaspace::using_class_space() &&
   64.48 +      (vsl == Metaspace::class_space_list())) return true;
   64.49  
   64.50    // If this is part of an allocation after a GC, expand
   64.51    // unconditionally.
   64.52 @@ -2257,7 +2261,7 @@
   64.53    size_t raw_word_size = get_raw_word_size(word_size);
   64.54    size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
   64.55    assert(raw_word_size >= min_size,
   64.56 -    err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
   64.57 +         err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
   64.58    block_freelists()->return_block(p, raw_word_size);
   64.59  }
   64.60  
   64.61 @@ -2374,7 +2378,7 @@
   64.62    if (result == NULL) {
   64.63      result = grow_and_allocate(word_size);
   64.64    }
   64.65 -  if (result > 0) {
   64.66 +  if (result != 0) {
   64.67      inc_used_metrics(word_size);
   64.68      assert(result != (MetaWord*) chunks_in_use(MediumIndex),
   64.69             "Head of the list is being allocated");
   64.70 @@ -2476,15 +2480,13 @@
   64.71  size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
   64.72  size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
   64.73  
   64.74 +size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
   64.75 +  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
   64.76 +  return list == NULL ? 0 : list->free_bytes();
   64.77 +}
   64.78 +
   64.79  size_t MetaspaceAux::free_bytes() {
   64.80 -  size_t result = 0;
   64.81 -  if (Metaspace::class_space_list() != NULL) {
   64.82 -    result = result + Metaspace::class_space_list()->free_bytes();
   64.83 -  }
   64.84 -  if (Metaspace::space_list() != NULL) {
   64.85 -    result = result + Metaspace::space_list()->free_bytes();
   64.86 -  }
   64.87 -  return result;
   64.88 +  return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
   64.89  }
   64.90  
   64.91  void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
   64.92 @@ -2549,6 +2551,9 @@
   64.93  }
   64.94  
   64.95  size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
   64.96 +  if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
   64.97 +    return 0;
   64.98 +  }
   64.99    // Don't count the space in the freelists.  That space will be
  64.100    // added to the capacity calculation as needed.
  64.101    size_t capacity = 0;
  64.102 @@ -2563,18 +2568,18 @@
  64.103  }
  64.104  
  64.105  size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
  64.106 -  size_t reserved = (mdtype == Metaspace::ClassType) ?
  64.107 -                       Metaspace::class_space_list()->virtual_space_total() :
  64.108 -                       Metaspace::space_list()->virtual_space_total();
  64.109 -  return reserved * BytesPerWord;
  64.110 +  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  64.111 +  return list == NULL ? 0 : list->virtual_space_total();
  64.112  }
  64.113  
  64.114  size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
  64.115  
  64.116  size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
  64.117 -  ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
  64.118 -                            Metaspace::class_space_list()->chunk_manager() :
  64.119 -                            Metaspace::space_list()->chunk_manager();
  64.120 +  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  64.121 +  if (list == NULL) {
  64.122 +    return 0;
  64.123 +  }
  64.124 +  ChunkManager* chunk = list->chunk_manager();
  64.125    chunk->slow_verify();
  64.126    return chunk->free_chunks_total();
  64.127  }
  64.128 @@ -2615,7 +2620,6 @@
  64.129  
  64.130  // This is printed when PrintGCDetails
  64.131  void MetaspaceAux::print_on(outputStream* out) {
  64.132 -  Metaspace::MetadataType ct = Metaspace::ClassType;
  64.133    Metaspace::MetadataType nct = Metaspace::NonClassType;
  64.134  
  64.135    out->print_cr(" Metaspace total "
  64.136 @@ -2629,12 +2633,15 @@
  64.137                  allocated_capacity_bytes(nct)/K,
  64.138                  allocated_used_bytes(nct)/K,
  64.139                  reserved_in_bytes(nct)/K);
  64.140 -  out->print_cr("  class space    "
  64.141 -                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  64.142 -                " reserved " SIZE_FORMAT "K",
  64.143 -                allocated_capacity_bytes(ct)/K,
  64.144 -                allocated_used_bytes(ct)/K,
  64.145 -                reserved_in_bytes(ct)/K);
  64.146 +  if (Metaspace::using_class_space()) {
  64.147 +    Metaspace::MetadataType ct = Metaspace::ClassType;
  64.148 +    out->print_cr("  class space    "
  64.149 +                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  64.150 +                  " reserved " SIZE_FORMAT "K",
  64.151 +                  allocated_capacity_bytes(ct)/K,
  64.152 +                  allocated_used_bytes(ct)/K,
  64.153 +                  reserved_in_bytes(ct)/K);
  64.154 +  }
  64.155  }
  64.156  
  64.157  // Print information for class space and data space separately.
  64.158 @@ -2659,13 +2666,37 @@
  64.159    assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
  64.160  }
  64.161  
  64.162 -// Print total fragmentation for class and data metaspaces separately
  64.163 +// Print total fragmentation for class metaspaces
  64.164 +void MetaspaceAux::print_class_waste(outputStream* out) {
  64.165 +  assert(Metaspace::using_class_space(), "class metaspace not used");
  64.166 +  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
  64.167 +  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
  64.168 +  ClassLoaderDataGraphMetaspaceIterator iter;
  64.169 +  while (iter.repeat()) {
  64.170 +    Metaspace* msp = iter.get_next();
  64.171 +    if (msp != NULL) {
  64.172 +      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
  64.173 +      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
  64.174 +      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
  64.175 +      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
  64.176 +      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  64.177 +      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
  64.178 +      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
  64.179 +    }
  64.180 +  }
  64.181 +  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
  64.182 +                SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
  64.183 +                SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
  64.184 +                "large count " SIZE_FORMAT,
  64.185 +                cls_specialized_count, cls_specialized_waste,
  64.186 +                cls_small_count, cls_small_waste,
  64.187 +                cls_medium_count, cls_medium_waste, cls_humongous_count);
  64.188 +}
  64.189 +
  64.190 +// Print total fragmentation for data and class metaspaces separately
  64.191  void MetaspaceAux::print_waste(outputStream* out) {
  64.192 -
  64.193    size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
  64.194    size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
  64.195 -  size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
  64.196 -  size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
  64.197  
  64.198    ClassLoaderDataGraphMetaspaceIterator iter;
  64.199    while (iter.repeat()) {
  64.200 @@ -2678,14 +2709,6 @@
  64.201        medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  64.202        medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
  64.203        humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
  64.204 -
  64.205 -      cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
  64.206 -      cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
  64.207 -      cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
  64.208 -      cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
  64.209 -      cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
  64.210 -      cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
  64.211 -      cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
  64.212      }
  64.213    }
  64.214    out->print_cr("Total fragmentation waste (words) doesn't count free space");
  64.215 @@ -2695,13 +2718,9 @@
  64.216                          "large count " SIZE_FORMAT,
  64.217               specialized_count, specialized_waste, small_count,
  64.218               small_waste, medium_count, medium_waste, humongous_count);
  64.219 -  out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
  64.220 -                           SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
  64.221 -                           SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
  64.222 -                           "large count " SIZE_FORMAT,
  64.223 -             cls_specialized_count, cls_specialized_waste,
  64.224 -             cls_small_count, cls_small_waste,
  64.225 -             cls_medium_count, cls_medium_waste, cls_humongous_count);
  64.226 +  if (Metaspace::using_class_space()) {
  64.227 +    print_class_waste(out);
  64.228 +  }
  64.229  }
  64.230  
  64.231  // Dump global metaspace things from the end of ClassLoaderDataGraph
  64.232 @@ -2714,7 +2733,9 @@
  64.233  
  64.234  void MetaspaceAux::verify_free_chunks() {
  64.235    Metaspace::space_list()->chunk_manager()->verify();
  64.236 -  Metaspace::class_space_list()->chunk_manager()->verify();
  64.237 +  if (Metaspace::using_class_space()) {
  64.238 +    Metaspace::class_space_list()->chunk_manager()->verify();
  64.239 +  }
  64.240  }
  64.241  
  64.242  void MetaspaceAux::verify_capacity() {
  64.243 @@ -2776,7 +2797,9 @@
  64.244  
  64.245  Metaspace::~Metaspace() {
  64.246    delete _vsm;
  64.247 -  delete _class_vsm;
  64.248 +  if (using_class_space()) {
  64.249 +    delete _class_vsm;
  64.250 +  }
  64.251  }
  64.252  
  64.253  VirtualSpaceList* Metaspace::_space_list = NULL;
  64.254 @@ -2784,9 +2807,123 @@
  64.255  
  64.256  #define VIRTUALSPACEMULTIPLIER 2
  64.257  
  64.258 +#ifdef _LP64
  64.259 +void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
  64.260 +  // Figure out the narrow_klass_base and the narrow_klass_shift.  The
  64.261 +  // narrow_klass_base is the lower of the metaspace base and the cds base
  64.262 +  // (if cds is enabled).  The narrow_klass_shift depends on the distance
  64.263 +  // between the lower base and higher address.
  64.264 +  address lower_base;
  64.265 +  address higher_address;
  64.266 +  if (UseSharedSpaces) {
  64.267 +    higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
  64.268 +                          (address)(metaspace_base + class_metaspace_size()));
  64.269 +    lower_base = MIN2(metaspace_base, cds_base);
  64.270 +  } else {
  64.271 +    higher_address = metaspace_base + class_metaspace_size();
  64.272 +    lower_base = metaspace_base;
  64.273 +  }
  64.274 +  Universe::set_narrow_klass_base(lower_base);
  64.275 +  if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
  64.276 +    Universe::set_narrow_klass_shift(0);
  64.277 +  } else {
  64.278 +    assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
  64.279 +    Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
  64.280 +  }
  64.281 +}
  64.282 +
  64.283 +// Return TRUE if the specified metaspace_base and cds_base are close enough
  64.284 +// to work with compressed klass pointers.
  64.285 +bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
  64.286 +  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
  64.287 +  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
  64.288 +  address lower_base = MIN2((address)metaspace_base, cds_base);
  64.289 +  address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
  64.290 +                                (address)(metaspace_base + class_metaspace_size()));
  64.291 +  return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
  64.292 +}
  64.293 +
  64.294 +// Try to allocate the metaspace at the requested addr.
  64.295 +void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
  64.296 +  assert(using_class_space(), "called improperly");
  64.297 +  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
  64.298 +  assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
  64.299 +         "Metaspace size is too big");
  64.300 +
  64.301 +  ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
  64.302 +                                             os::vm_allocation_granularity(),
  64.303 +                                             false, requested_addr, 0);
  64.304 +  if (!metaspace_rs.is_reserved()) {
  64.305 +    if (UseSharedSpaces) {
  64.306 +      // Keep trying to allocate the metaspace, increasing the requested_addr
  64.307 +      // by 1GB each time, until we reach an address that will no longer allow
  64.308 +      // use of CDS with compressed klass pointers.
  64.309 +      char *addr = requested_addr;
  64.310 +      while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
  64.311 +             can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
  64.312 +        addr = addr + 1*G;
  64.313 +        metaspace_rs = ReservedSpace(class_metaspace_size(),
  64.314 +                                     os::vm_allocation_granularity(), false, addr, 0);
  64.315 +      }
  64.316 +    }
  64.317 +
  64.318 +    // If no successful allocation then try to allocate the space anywhere.  If
  64.319 +    // that fails then OOM doom.  At this point we cannot try allocating the
  64.320 +    // metaspace as if UseCompressedKlassPointers is off because too much
  64.321 +    // initialization has happened that depends on UseCompressedKlassPointers.
  64.322 +    // So, UseCompressedKlassPointers cannot be turned off at this point.
  64.323 +    if (!metaspace_rs.is_reserved()) {
  64.324 +      metaspace_rs = ReservedSpace(class_metaspace_size(),
  64.325 +                                   os::vm_allocation_granularity(), false);
  64.326 +      if (!metaspace_rs.is_reserved()) {
  64.327 +        vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
  64.328 +                                              class_metaspace_size()));
  64.329 +      }
  64.330 +    }
  64.331 +  }
  64.332 +
  64.333 +  // If we got here then the metaspace got allocated.
  64.334 +  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
  64.335 +
  64.336 +  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
  64.337 +  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
  64.338 +    FileMapInfo::stop_sharing_and_unmap(
  64.339 +        "Could not allocate metaspace at a compatible address");
  64.340 +  }
  64.341 +
  64.342 +  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
  64.343 +                                  UseSharedSpaces ? (address)cds_base : 0);
  64.344 +
  64.345 +  initialize_class_space(metaspace_rs);
  64.346 +
  64.347 +  if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
  64.348 +    gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
  64.349 +                            Universe::narrow_klass_base(), Universe::narrow_klass_shift());
  64.350 +    gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
  64.351 +                           class_metaspace_size(), metaspace_rs.base(), requested_addr);
  64.352 +  }
  64.353 +}
  64.354 +
  64.355 +// For UseCompressedKlassPointers the class space is reserved above the top of
  64.356 +// the Java heap.  The argument passed in is at the base of the compressed space.
  64.357 +void Metaspace::initialize_class_space(ReservedSpace rs) {
  64.358 +  // The reserved space size may be bigger because of alignment, esp with UseLargePages
  64.359 +  assert(rs.size() >= ClassMetaspaceSize,
  64.360 +         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
  64.361 +  assert(using_class_space(), "Must be using class space");
  64.362 +  _class_space_list = new VirtualSpaceList(rs);
  64.363 +}
  64.364 +
  64.365 +#endif
  64.366 +
  64.367  void Metaspace::global_initialize() {
  64.368    // Initialize the alignment for shared spaces.
  64.369    int max_alignment = os::vm_page_size();
  64.370 +  size_t cds_total = 0;
  64.371 +
  64.372 +  set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
  64.373 +                                         os::vm_allocation_granularity()));
  64.374 +
  64.375    MetaspaceShared::set_max_alignment(max_alignment);
  64.376  
  64.377    if (DumpSharedSpaces) {
  64.378 @@ -2798,15 +2935,31 @@
  64.379      // Initialize with the sum of the shared space sizes.  The read-only
  64.380      // and read write metaspace chunks will be allocated out of this and the
  64.381      // remainder is the misc code and data chunks.
  64.382 -    size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
  64.383 -                                 SharedMiscDataSize + SharedMiscCodeSize,
  64.384 -                                 os::vm_allocation_granularity());
  64.385 -    size_t word_size = total/wordSize;
  64.386 -    _space_list = new VirtualSpaceList(word_size);
  64.387 +    cds_total = FileMapInfo::shared_spaces_size();
  64.388 +    _space_list = new VirtualSpaceList(cds_total/wordSize);
  64.389 +
  64.390 +#ifdef _LP64
  64.391 +    // Set the compressed klass pointer base so that decoding of these pointers works
  64.392 +    // properly when creating the shared archive.
  64.393 +    assert(UseCompressedOops && UseCompressedKlassPointers,
  64.394 +      "UseCompressedOops and UseCompressedKlassPointers must be set");
  64.395 +    Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
  64.396 +    if (TraceMetavirtualspaceAllocation && Verbose) {
  64.397 +      gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
  64.398 +                             _space_list->current_virtual_space()->bottom());
  64.399 +    }
  64.400 +
  64.401 +    // Set the shift to zero.
  64.402 +    assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
  64.403 +           "CDS region is too large");
  64.404 +    Universe::set_narrow_klass_shift(0);
  64.405 +#endif
  64.406 +
  64.407    } else {
  64.408      // If using shared space, open the file that contains the shared space
  64.409      // and map in the memory before initializing the rest of metaspace (so
  64.410      // the addresses don't conflict)
  64.411 +    address cds_address = NULL;
  64.412      if (UseSharedSpaces) {
  64.413        FileMapInfo* mapinfo = new FileMapInfo();
  64.414        memset(mapinfo, 0, sizeof(FileMapInfo));
  64.415 @@ -2821,8 +2974,22 @@
  64.416          assert(!mapinfo->is_open() && !UseSharedSpaces,
  64.417                 "archive file not closed or shared spaces not disabled.");
  64.418        }
  64.419 +      cds_total = FileMapInfo::shared_spaces_size();
  64.420 +      cds_address = (address)mapinfo->region_base(0);
  64.421      }
  64.422  
  64.423 +#ifdef _LP64
  64.424 +    // If UseCompressedKlassPointers is set then allocate the metaspace area
  64.425 +    // above the heap and above the CDS area (if it exists).
  64.426 +    if (using_class_space()) {
  64.427 +      if (UseSharedSpaces) {
  64.428 +        allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
  64.429 +      } else {
  64.430 +        allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
  64.431 +      }
  64.432 +    }
  64.433 +#endif
  64.434 +
  64.435      // Initialize these before initializing the VirtualSpaceList
  64.436      _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
  64.437      _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
  64.438 @@ -2840,39 +3007,28 @@
  64.439    }
  64.440  }
  64.441  
  64.442 -// For UseCompressedKlassPointers the class space is reserved as a piece of the
  64.443 -// Java heap because the compression algorithm is the same for each.  The
  64.444 -// argument passed in is at the top of the compressed space
  64.445 -void Metaspace::initialize_class_space(ReservedSpace rs) {
  64.446 -  // The reserved space size may be bigger because of alignment, esp with UseLargePages
  64.447 -  assert(rs.size() >= ClassMetaspaceSize,
  64.448 -         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
  64.449 -  _class_space_list = new VirtualSpaceList(rs);
  64.450 -}
  64.451 -
  64.452 -void Metaspace::initialize(Mutex* lock,
  64.453 -                           MetaspaceType type) {
  64.454 +void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
  64.455  
  64.456    assert(space_list() != NULL,
  64.457      "Metadata VirtualSpaceList has not been initialized");
  64.458  
  64.459 -  _vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list());
  64.460 +  _vsm = new SpaceManager(NonClassType, lock, space_list());
  64.461    if (_vsm == NULL) {
  64.462      return;
  64.463    }
  64.464    size_t word_size;
  64.465    size_t class_word_size;
  64.466 -  vsm()->get_initial_chunk_sizes(type,
  64.467 -                                 &word_size,
  64.468 -                                 &class_word_size);
  64.469 -
  64.470 -  assert(class_space_list() != NULL,
  64.471 -    "Class VirtualSpaceList has not been initialized");
  64.472 -
  64.473 -  // Allocate SpaceManager for classes.
  64.474 -  _class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list());
  64.475 -  if (_class_vsm == NULL) {
  64.476 -    return;
  64.477 +  vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
  64.478 +
  64.479 +  if (using_class_space()) {
  64.480 +    assert(class_space_list() != NULL,
  64.481 +      "Class VirtualSpaceList has not been initialized");
  64.482 +
  64.483 +    // Allocate SpaceManager for classes.
  64.484 +    _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
  64.485 +    if (_class_vsm == NULL) {
  64.486 +      return;
  64.487 +    }
  64.488    }
  64.489  
  64.490    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  64.491 @@ -2888,11 +3044,13 @@
  64.492    }
  64.493  
  64.494    // Allocate chunk for class metadata objects
  64.495 -  Metachunk* class_chunk =
  64.496 -     class_space_list()->get_initialization_chunk(class_word_size,
  64.497 -                                                  class_vsm()->medium_chunk_bunch());
  64.498 -  if (class_chunk != NULL) {
  64.499 -    class_vsm()->add_chunk(class_chunk, true);
  64.500 +  if (using_class_space()) {
  64.501 +    Metachunk* class_chunk =
  64.502 +       class_space_list()->get_initialization_chunk(class_word_size,
  64.503 +                                                    class_vsm()->medium_chunk_bunch());
  64.504 +    if (class_chunk != NULL) {
  64.505 +      class_vsm()->add_chunk(class_chunk, true);
  64.506 +    }
  64.507    }
  64.508  
  64.509    _alloc_record_head = NULL;
  64.510 @@ -2906,7 +3064,8 @@
  64.511  
  64.512  MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
  64.513    // DumpSharedSpaces doesn't use class metadata area (yet)
  64.514 -  if (mdtype == ClassType && !DumpSharedSpaces) {
  64.515 +  // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
  64.516 +  if (mdtype == ClassType && using_class_space()) {
  64.517      return  class_vsm()->allocate(word_size);
  64.518    } else {
  64.519      return  vsm()->allocate(word_size);
  64.520 @@ -2937,14 +3096,19 @@
  64.521  }
  64.522  
  64.523  size_t Metaspace::used_words_slow(MetadataType mdtype) const {
  64.524 -  // return vsm()->allocated_used_words();
  64.525 -  return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
  64.526 -                               vsm()->sum_used_in_chunks_in_use();  // includes overhead!
  64.527 +  if (mdtype == ClassType) {
  64.528 +    return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
  64.529 +  } else {
  64.530 +    return vsm()->sum_used_in_chunks_in_use();  // includes overhead!
  64.531 +  }
  64.532  }
  64.533  
  64.534  size_t Metaspace::free_words(MetadataType mdtype) const {
  64.535 -  return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
  64.536 -                               vsm()->sum_free_in_chunks_in_use();
  64.537 +  if (mdtype == ClassType) {
  64.538 +    return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
  64.539 +  } else {
  64.540 +    return vsm()->sum_free_in_chunks_in_use();
  64.541 +  }
  64.542  }
  64.543  
  64.544  // Space capacity in the Metaspace.  It includes
  64.545 @@ -2953,8 +3117,11 @@
  64.546  // in the space available in the dictionary which
  64.547  // is already counted in some chunk.
  64.548  size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
  64.549 -  return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
  64.550 -                               vsm()->sum_capacity_in_chunks_in_use();
  64.551 +  if (mdtype == ClassType) {
  64.552 +    return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
  64.553 +  } else {
  64.554 +    return vsm()->sum_capacity_in_chunks_in_use();
  64.555 +  }
  64.556  }
  64.557  
  64.558  size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
  64.559 @@ -2977,8 +3144,8 @@
  64.560  #endif
  64.561        return;
  64.562      }
  64.563 -    if (is_class) {
  64.564 -       class_vsm()->deallocate(ptr, word_size);
  64.565 +    if (is_class && using_class_space()) {
  64.566 +      class_vsm()->deallocate(ptr, word_size);
  64.567      } else {
  64.568        vsm()->deallocate(ptr, word_size);
  64.569      }
  64.570 @@ -2992,7 +3159,7 @@
  64.571  #endif
  64.572        return;
  64.573      }
  64.574 -    if (is_class) {
  64.575 +    if (is_class && using_class_space()) {
  64.576        class_vsm()->deallocate(ptr, word_size);
  64.577      } else {
  64.578        vsm()->deallocate(ptr, word_size);
  64.579 @@ -3101,14 +3268,18 @@
  64.580    MutexLockerEx cl(SpaceManager::expand_lock(),
  64.581                     Mutex::_no_safepoint_check_flag);
  64.582    space_list()->purge();
  64.583 -  class_space_list()->purge();
  64.584 +  if (using_class_space()) {
  64.585 +    class_space_list()->purge();
  64.586 +  }
  64.587  }
  64.588  
  64.589  void Metaspace::print_on(outputStream* out) const {
  64.590    // Print both class virtual space counts and metaspace.
  64.591    if (Verbose) {
  64.592 -      vsm()->print_on(out);
  64.593 +    vsm()->print_on(out);
  64.594 +    if (using_class_space()) {
  64.595        class_vsm()->print_on(out);
  64.596 +    }
  64.597    }
  64.598  }
  64.599  
  64.600 @@ -3122,17 +3293,21 @@
  64.601    // be needed.  Note, locking this can cause inversion problems with the
  64.602    // caller in MetaspaceObj::is_metadata() function.
  64.603    return space_list()->contains(ptr) ||
  64.604 -         class_space_list()->contains(ptr);
  64.605 +         (using_class_space() && class_space_list()->contains(ptr));
  64.606  }
  64.607  
  64.608  void Metaspace::verify() {
  64.609    vsm()->verify();
  64.610 -  class_vsm()->verify();
  64.611 +  if (using_class_space()) {
  64.612 +    class_vsm()->verify();
  64.613 +  }
  64.614  }
  64.615  
  64.616  void Metaspace::dump(outputStream* const out) const {
  64.617    out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
  64.618    vsm()->dump(out);
  64.619 -  out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
  64.620 -  class_vsm()->dump(out);
  64.621 +  if (using_class_space()) {
  64.622 +    out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
  64.623 +    class_vsm()->dump(out);
  64.624 +  }
  64.625  }
    65.1 --- a/src/share/vm/memory/metaspace.hpp	Fri Aug 23 22:12:18 2013 +0100
    65.2 +++ b/src/share/vm/memory/metaspace.hpp	Fri Aug 30 09:50:49 2013 +0100
    65.3 @@ -105,6 +105,16 @@
    65.4    // Align up the word size to the allocation word size
    65.5    static size_t align_word_size_up(size_t);
    65.6  
    65.7 +  // Aligned size of the metaspace.
    65.8 +  static size_t _class_metaspace_size;
    65.9 +
   65.10 +  static size_t class_metaspace_size() {
   65.11 +    return _class_metaspace_size;
   65.12 +  }
   65.13 +  static void set_class_metaspace_size(size_t metaspace_size) {
   65.14 +    _class_metaspace_size = metaspace_size;
   65.15 +  }
   65.16 +
   65.17    static size_t _first_chunk_word_size;
   65.18    static size_t _first_class_chunk_word_size;
   65.19  
   65.20 @@ -126,11 +136,26 @@
   65.21  
   65.22    static VirtualSpaceList* space_list()       { return _space_list; }
   65.23    static VirtualSpaceList* class_space_list() { return _class_space_list; }
   65.24 +  static VirtualSpaceList* get_space_list(MetadataType mdtype) {
   65.25 +    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
   65.26 +    return mdtype == ClassType ? class_space_list() : space_list();
   65.27 +  }
   65.28  
   65.29    // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
   65.30    // maintain a single list for now.
   65.31    void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
   65.32  
   65.33 +#ifdef _LP64
   65.34 +  static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
   65.35 +
   65.36 +  // Returns true if can use CDS with metaspace allocated as specified address.
   65.37 +  static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base);
   65.38 +
   65.39 +  static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
   65.40 +
   65.41 +  static void initialize_class_space(ReservedSpace rs);
   65.42 +#endif
   65.43 +
   65.44    class AllocRecord : public CHeapObj<mtClass> {
   65.45    public:
   65.46      AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
   65.47 @@ -151,7 +176,6 @@
   65.48  
   65.49    // Initialize globals for Metaspace
   65.50    static void global_initialize();
   65.51 -  static void initialize_class_space(ReservedSpace rs);
   65.52  
   65.53    static size_t first_chunk_word_size() { return _first_chunk_word_size; }
   65.54    static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
   65.55 @@ -172,8 +196,6 @@
   65.56    MetaWord* expand_and_allocate(size_t size,
   65.57                                  MetadataType mdtype);
   65.58  
   65.59 -  static bool is_initialized() { return _class_space_list != NULL; }
   65.60 -
   65.61    static bool contains(const void *ptr);
   65.62    void dump(outputStream* const out) const;
   65.63  
   65.64 @@ -190,11 +212,16 @@
   65.65    };
   65.66  
   65.67    void iterate(AllocRecordClosure *closure);
   65.68 +
   65.69 +  // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
   65.70 +  static bool using_class_space() {
   65.71 +    return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
   65.72 +  }
   65.73 +
   65.74  };
   65.75  
   65.76  class MetaspaceAux : AllStatic {
   65.77    static size_t free_chunks_total(Metaspace::MetadataType mdtype);
   65.78 -  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
   65.79  
   65.80   public:
   65.81    // Statistics for class space and data space in metaspace.
   65.82 @@ -238,13 +265,15 @@
   65.83    // Used by MetaspaceCounters
   65.84    static size_t free_chunks_total();
   65.85    static size_t free_chunks_total_in_bytes();
   65.86 +  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
   65.87  
   65.88    static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
   65.89      return _allocated_capacity_words[mdtype];
   65.90    }
   65.91    static size_t allocated_capacity_words() {
   65.92 -    return _allocated_capacity_words[Metaspace::ClassType] +
   65.93 -           _allocated_capacity_words[Metaspace::NonClassType];
   65.94 +    return _allocated_capacity_words[Metaspace::NonClassType] +
   65.95 +           (Metaspace::using_class_space() ?
   65.96 +           _allocated_capacity_words[Metaspace::ClassType] : 0);
   65.97    }
   65.98    static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
   65.99      return allocated_capacity_words(mdtype) * BytesPerWord;
  65.100 @@ -257,8 +286,9 @@
  65.101      return _allocated_used_words[mdtype];
  65.102    }
  65.103    static size_t allocated_used_words() {
  65.104 -    return _allocated_used_words[Metaspace::ClassType] +
  65.105 -           _allocated_used_words[Metaspace::NonClassType];
  65.106 +    return _allocated_used_words[Metaspace::NonClassType] +
  65.107 +           (Metaspace::using_class_space() ?
  65.108 +           _allocated_used_words[Metaspace::ClassType] : 0);
  65.109    }
  65.110    static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
  65.111      return allocated_used_words(mdtype) * BytesPerWord;
  65.112 @@ -268,6 +298,7 @@
  65.113    }
  65.114  
  65.115    static size_t free_bytes();
  65.116 +  static size_t free_bytes(Metaspace::MetadataType mdtype);
  65.117  
  65.118    // Total capacity in all Metaspaces
  65.119    static size_t capacity_bytes_slow() {
  65.120 @@ -300,6 +331,7 @@
  65.121    static void print_on(outputStream * out);
  65.122    static void print_on(outputStream * out, Metaspace::MetadataType mdtype);
  65.123  
  65.124 +  static void print_class_waste(outputStream* out);
  65.125    static void print_waste(outputStream* out);
  65.126    static void dump(outputStream* out);
  65.127    static void verify_free_chunks();
    66.1 --- a/src/share/vm/memory/metaspaceCounters.cpp	Fri Aug 23 22:12:18 2013 +0100
    66.2 +++ b/src/share/vm/memory/metaspaceCounters.cpp	Fri Aug 30 09:50:49 2013 +0100
    66.3 @@ -25,11 +25,47 @@
    66.4  #include "precompiled.hpp"
    66.5  #include "memory/metaspaceCounters.hpp"
    66.6  #include "memory/resourceArea.hpp"
    66.7 +#include "runtime/globals.hpp"
    66.8 +#include "runtime/perfData.hpp"
    66.9  #include "utilities/exceptions.hpp"
   66.10  
   66.11 -MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL;
   66.12 +class MetaspacePerfCounters: public CHeapObj<mtInternal> {
   66.13 +  friend class VMStructs;
   66.14 +  PerfVariable*      _capacity;
   66.15 +  PerfVariable*      _used;
   66.16 +  PerfVariable*      _max_capacity;
   66.17  
   66.18 -size_t MetaspaceCounters::calc_total_capacity() {
   66.19 +  PerfVariable* create_variable(const char *ns, const char *name, size_t value, TRAPS) {
   66.20 +    const char *path = PerfDataManager::counter_name(ns, name);
   66.21 +    return PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value, THREAD);
   66.22 +  }
   66.23 +
   66.24 +  void create_constant(const char *ns, const char *name, size_t value, TRAPS) {
   66.25 +    const char *path = PerfDataManager::counter_name(ns, name);
   66.26 +    PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, THREAD);
   66.27 +  }
   66.28 +
   66.29 + public:
   66.30 +  MetaspacePerfCounters(const char* ns, size_t min_capacity, size_t curr_capacity, size_t max_capacity, size_t used) {
   66.31 +    EXCEPTION_MARK;
   66.32 +    ResourceMark rm;
   66.33 +
   66.34 +    create_constant(ns, "minCapacity", min_capacity, THREAD);
   66.35 +    _capacity = create_variable(ns, "capacity", curr_capacity, THREAD);
   66.36 +    _max_capacity = create_variable(ns, "maxCapacity", max_capacity, THREAD);
   66.37 +    _used = create_variable(ns, "used", used, THREAD);
   66.38 +  }
   66.39 +
   66.40 +  void update(size_t capacity, size_t max_capacity, size_t used) {
   66.41 +    _capacity->set_value(capacity);
   66.42 +    _max_capacity->set_value(max_capacity);
   66.43 +    _used->set_value(used);
   66.44 +  }
   66.45 +};
   66.46 +
   66.47 +MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
   66.48 +
   66.49 +size_t MetaspaceCounters::calculate_capacity() {
   66.50    // The total capacity is the sum of
   66.51    //   1) capacity of Metachunks in use by all Metaspaces
   66.52    //   2) unused space at the end of each Metachunk
   66.53 @@ -39,95 +75,65 @@
   66.54    return total_capacity;
   66.55  }
   66.56  
   66.57 -MetaspaceCounters::MetaspaceCounters() :
   66.58 -    _capacity(NULL),
   66.59 -    _used(NULL),
   66.60 -    _max_capacity(NULL) {
   66.61 +void MetaspaceCounters::initialize_performance_counters() {
   66.62    if (UsePerfData) {
   66.63 +    assert(_perf_counters == NULL, "Should only be initialized once");
   66.64 +
   66.65      size_t min_capacity = MetaspaceAux::min_chunk_size();
   66.66 +    size_t capacity = calculate_capacity();
   66.67      size_t max_capacity = MetaspaceAux::reserved_in_bytes();
   66.68 -    size_t curr_capacity = calc_total_capacity();
   66.69      size_t used = MetaspaceAux::allocated_used_bytes();
   66.70  
   66.71 -    initialize(min_capacity, max_capacity, curr_capacity, used);
   66.72 -  }
   66.73 -}
   66.74 -
   66.75 -static PerfVariable* create_ms_variable(const char *ns,
   66.76 -                                        const char *name,
   66.77 -                                        size_t value,
   66.78 -                                        TRAPS) {
   66.79 -  const char *path = PerfDataManager::counter_name(ns, name);
   66.80 -  PerfVariable *result =
   66.81 -      PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value,
   66.82 -                                       CHECK_NULL);
   66.83 -  return result;
   66.84 -}
   66.85 -
   66.86 -static void create_ms_constant(const char *ns,
   66.87 -                               const char *name,
   66.88 -                               size_t value,
   66.89 -                               TRAPS) {
   66.90 -  const char *path = PerfDataManager::counter_name(ns, name);
   66.91 -  PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, CHECK);
   66.92 -}
   66.93 -
   66.94 -void MetaspaceCounters::initialize(size_t min_capacity,
   66.95 -                                   size_t max_capacity,
   66.96 -                                   size_t curr_capacity,
   66.97 -                                   size_t used) {
   66.98 -
   66.99 -  if (UsePerfData) {
  66.100 -    EXCEPTION_MARK;
  66.101 -    ResourceMark rm;
  66.102 -
  66.103 -    const char *ms = "metaspace";
  66.104 -
  66.105 -    create_ms_constant(ms, "minCapacity", min_capacity, CHECK);
  66.106 -    _max_capacity = create_ms_variable(ms, "maxCapacity", max_capacity, CHECK);
  66.107 -    _capacity = create_ms_variable(ms, "capacity", curr_capacity, CHECK);
  66.108 -    _used = create_ms_variable(ms, "used", used, CHECK);
  66.109 -  }
  66.110 -}
  66.111 -
  66.112 -void MetaspaceCounters::update_capacity() {
  66.113 -  assert(UsePerfData, "Should not be called unless being used");
  66.114 -  size_t total_capacity = calc_total_capacity();
  66.115 -  _capacity->set_value(total_capacity);
  66.116 -}
  66.117 -
  66.118 -void MetaspaceCounters::update_used() {
  66.119 -  assert(UsePerfData, "Should not be called unless being used");
  66.120 -  size_t used_in_bytes = MetaspaceAux::allocated_used_bytes();
  66.121 -  _used->set_value(used_in_bytes);
  66.122 -}
  66.123 -
  66.124 -void MetaspaceCounters::update_max_capacity() {
  66.125 -  assert(UsePerfData, "Should not be called unless being used");
  66.126 -  assert(_max_capacity != NULL, "Should be initialized");
  66.127 -  size_t reserved_in_bytes = MetaspaceAux::reserved_in_bytes();
  66.128 -  _max_capacity->set_value(reserved_in_bytes);
  66.129 -}
  66.130 -
  66.131 -void MetaspaceCounters::update_all() {
  66.132 -  if (UsePerfData) {
  66.133 -    update_used();
  66.134 -    update_capacity();
  66.135 -    update_max_capacity();
  66.136 -  }
  66.137 -}
  66.138 -
  66.139 -void MetaspaceCounters::initialize_performance_counters() {
  66.140 -  if (UsePerfData) {
  66.141 -    assert(_metaspace_counters == NULL, "Should only be initialized once");
  66.142 -    _metaspace_counters = new MetaspaceCounters();
  66.143 +    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, capacity, max_capacity, used);
  66.144    }
  66.145  }
  66.146  
  66.147  void MetaspaceCounters::update_performance_counters() {
  66.148    if (UsePerfData) {
  66.149 -    assert(_metaspace_counters != NULL, "Should be initialized");
  66.150 -    _metaspace_counters->update_all();
  66.151 +    assert(_perf_counters != NULL, "Should be initialized");
  66.152 +
  66.153 +    size_t capacity = calculate_capacity();
  66.154 +    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
  66.155 +    size_t used = MetaspaceAux::allocated_used_bytes();
  66.156 +
  66.157 +    _perf_counters->update(capacity, max_capacity, used);
  66.158    }
  66.159  }
  66.160  
  66.161 +MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
  66.162 +
  66.163 +size_t CompressedClassSpaceCounters::calculate_capacity() {
  66.164 +    return MetaspaceAux::allocated_capacity_bytes(_class_type) +
  66.165 +           MetaspaceAux::free_bytes(_class_type) +
  66.166 +           MetaspaceAux::free_chunks_total_in_bytes(_class_type);
  66.167 +}
  66.168 +
  66.169 +void CompressedClassSpaceCounters::update_performance_counters() {
  66.170 +  if (UsePerfData && UseCompressedKlassPointers) {
  66.171 +    assert(_perf_counters != NULL, "Should be initialized");
  66.172 +
  66.173 +    size_t capacity = calculate_capacity();
  66.174 +    size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
  66.175 +    size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
  66.176 +
  66.177 +    _perf_counters->update(capacity, max_capacity, used);
  66.178 +  }
  66.179 +}
  66.180 +
  66.181 +void CompressedClassSpaceCounters::initialize_performance_counters() {
  66.182 +  if (UsePerfData) {
  66.183 +    assert(_perf_counters == NULL, "Should only be initialized once");
  66.184 +    const char* ns = "compressedclassspace";
  66.185 +
  66.186 +    if (UseCompressedKlassPointers) {
  66.187 +      size_t min_capacity = MetaspaceAux::min_chunk_size();
  66.188 +      size_t capacity = calculate_capacity();
  66.189 +      size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
  66.190 +      size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
  66.191 +
  66.192 +      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity, max_capacity, used);
  66.193 +    } else {
  66.194 +      _perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
  66.195 +    }
  66.196 +  }
  66.197 +}
    67.1 --- a/src/share/vm/memory/metaspaceCounters.hpp	Fri Aug 23 22:12:18 2013 +0100
    67.2 +++ b/src/share/vm/memory/metaspaceCounters.hpp	Fri Aug 30 09:50:49 2013 +0100
    67.3 @@ -25,31 +25,27 @@
    67.4  #ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
    67.5  #define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
    67.6  
    67.7 -#include "runtime/perfData.hpp"
    67.8 +#include "memory/metaspace.hpp"
    67.9  
   67.10 -class MetaspaceCounters: public CHeapObj<mtClass> {
   67.11 -  friend class VMStructs;
   67.12 -  PerfVariable*      _capacity;
   67.13 -  PerfVariable*      _used;
   67.14 -  PerfVariable*      _max_capacity;
   67.15 -  static MetaspaceCounters* _metaspace_counters;
   67.16 -  void initialize(size_t min_capacity,
   67.17 -                  size_t max_capacity,
   67.18 -                  size_t curr_capacity,
   67.19 -                  size_t used);
   67.20 -  size_t calc_total_capacity();
   67.21 +class MetaspacePerfCounters;
   67.22 +
   67.23 +class MetaspaceCounters: public AllStatic {
   67.24 +  static MetaspacePerfCounters* _perf_counters;
   67.25 +  static size_t calculate_capacity();
   67.26 +
   67.27   public:
   67.28 -  MetaspaceCounters();
   67.29 -  ~MetaspaceCounters();
   67.30 -
   67.31 -  void update_capacity();
   67.32 -  void update_used();
   67.33 -  void update_max_capacity();
   67.34 -
   67.35 -  void update_all();
   67.36 -
   67.37    static void initialize_performance_counters();
   67.38    static void update_performance_counters();
   67.39 +};
   67.40  
   67.41 +class CompressedClassSpaceCounters: public AllStatic {
   67.42 +  static MetaspacePerfCounters* _perf_counters;
   67.43 +  static size_t calculate_capacity();
   67.44 +  static const Metaspace::MetadataType _class_type = Metaspace::ClassType;
   67.45 +
   67.46 + public:
   67.47 +  static void initialize_performance_counters();
   67.48 +  static void update_performance_counters();
   67.49  };
   67.50 +
   67.51  #endif // SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
    68.1 --- a/src/share/vm/memory/metaspaceShared.cpp	Fri Aug 23 22:12:18 2013 +0100
    68.2 +++ b/src/share/vm/memory/metaspaceShared.cpp	Fri Aug 30 09:50:49 2013 +0100
    68.3 @@ -52,7 +52,6 @@
    68.4    int tag = 0;
    68.5    soc->do_tag(--tag);
    68.6  
    68.7 -  assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
    68.8    // Verify the sizes of various metadata in the system.
    68.9    soc->do_tag(sizeof(Method));
   68.10    soc->do_tag(sizeof(ConstMethod));
    69.1 --- a/src/share/vm/memory/universe.cpp	Fri Aug 23 22:12:18 2013 +0100
    69.2 +++ b/src/share/vm/memory/universe.cpp	Fri Aug 30 09:50:49 2013 +0100
    69.3 @@ -145,8 +145,6 @@
    69.4  NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
    69.5  address Universe::_narrow_ptrs_base;
    69.6  
    69.7 -size_t          Universe::_class_metaspace_size;
    69.8 -
    69.9  void Universe::basic_type_classes_do(void f(Klass*)) {
   69.10    f(boolArrayKlassObj());
   69.11    f(byteArrayKlassObj());
   69.12 @@ -641,6 +639,8 @@
   69.13      return status;
   69.14    }
   69.15  
   69.16 +  Metaspace::global_initialize();
   69.17 +
   69.18    // Create memory for metadata.  Must be after initializing heap for
   69.19    // DumpSharedSpaces.
   69.20    ClassLoaderData::init_null_class_loader_data();
   69.21 @@ -693,13 +693,9 @@
   69.22      if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
   69.23        base = HeapBaseMinAddress;
   69.24  
   69.25 -    // If the total size and the metaspace size are small enough to allow
   69.26 -    // UnscaledNarrowOop then just use UnscaledNarrowOop.
   69.27 -    } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) &&
   69.28 -        (!UseCompressedKlassPointers ||
   69.29 -          (((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) {
   69.30 -      // We don't need to check the metaspace size here because it is always smaller
   69.31 -      // than total_size.
   69.32 +    // If the total size is small enough to allow UnscaledNarrowOop then
   69.33 +    // just use UnscaledNarrowOop.
   69.34 +    } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
   69.35        if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
   69.36            (Universe::narrow_oop_shift() == 0)) {
   69.37          // Use 32-bits oops without encoding and
   69.38 @@ -716,13 +712,6 @@
   69.39            base = (OopEncodingHeapMax - heap_size);
   69.40          }
   69.41        }
   69.42 -
   69.43 -    // See if ZeroBaseNarrowOop encoding will work for a heap based at
   69.44 -    // (KlassEncodingMetaspaceMax - class_metaspace_size()).
   69.45 -    } else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) &&
   69.46 -        (Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) &&
   69.47 -        (KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) {
   69.48 -      base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size());
   69.49      } else {
   69.50        // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
   69.51        // HeapBasedNarrowOop encoding was requested.  So, can't reserve below 32Gb.
   69.52 @@ -732,8 +721,7 @@
   69.53      // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
   69.54      // used in ReservedHeapSpace() constructors.
   69.55      // The final values will be set in initialize_heap() below.
   69.56 -    if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) &&
   69.57 -        (!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) {
   69.58 +    if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
   69.59        // Use zero based compressed oops
   69.60        Universe::set_narrow_oop_base(NULL);
   69.61        // Don't need guard page for implicit checks in indexed
   69.62 @@ -816,9 +804,7 @@
   69.63        tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
   69.64                   Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
   69.65      }
   69.66 -    if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) ||
   69.67 -        (UseCompressedKlassPointers &&
   69.68 -        ((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) {
   69.69 +    if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
   69.70        // Can't reserve heap below 32Gb.
   69.71        // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
   69.72        Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
   69.73 @@ -849,20 +835,16 @@
   69.74          }
   69.75        }
   69.76      }
   69.77 +
   69.78      if (verbose) {
   69.79        tty->cr();
   69.80        tty->cr();
   69.81      }
   69.82 -    if (UseCompressedKlassPointers) {
   69.83 -      Universe::set_narrow_klass_base(Universe::narrow_oop_base());
   69.84 -      Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
   69.85 -    }
   69.86      Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
   69.87    }
   69.88 -  // Universe::narrow_oop_base() is one page below the metaspace
   69.89 -  // base. The actual metaspace base depends on alignment constraints
   69.90 -  // so we don't know its exact location here.
   69.91 -  assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
   69.92 +  // Universe::narrow_oop_base() is one page below the heap.
   69.93 +  assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
   69.94 +         os::vm_page_size()) ||
   69.95           Universe::narrow_oop_base() == NULL, "invalid value");
   69.96    assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
   69.97           Universe::narrow_oop_shift() == 0, "invalid value");
   69.98 @@ -882,12 +864,7 @@
   69.99  
  69.100  // Reserve the Java heap, which is now the same for all GCs.
  69.101  ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
  69.102 -  // Add in the class metaspace area so the classes in the headers can
  69.103 -  // be compressed the same as instances.
  69.104 -  // Need to round class space size up because it's below the heap and
  69.105 -  // the actual alignment depends on its size.
  69.106 -  Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
  69.107 -  size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
  69.108 +  size_t total_reserved = align_size_up(heap_size, alignment);
  69.109    assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
  69.110        "heap size is too big for compressed oops");
  69.111    char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
  69.112 @@ -923,28 +900,17 @@
  69.113      return total_rs;
  69.114    }
  69.115  
  69.116 -  // Split the reserved space into main Java heap and a space for
  69.117 -  // classes so that they can be compressed using the same algorithm
  69.118 -  // as compressed oops. If compress oops and compress klass ptrs are
  69.119 -  // used we need the meta space first: if the alignment used for
  69.120 -  // compressed oops is greater than the one used for compressed klass
  69.121 -  // ptrs, a metadata space on top of the heap could become
  69.122 -  // unreachable.
  69.123 -  ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size());
  69.124 -  ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment);
  69.125 -  Metaspace::initialize_class_space(class_rs);
  69.126 -
  69.127    if (UseCompressedOops) {
  69.128      // Universe::initialize_heap() will reset this to NULL if unscaled
  69.129      // or zero-based narrow oops are actually used.
  69.130      address base = (address)(total_rs.base() - os::vm_page_size());
  69.131      Universe::set_narrow_oop_base(base);
  69.132    }
  69.133 -  return heap_rs;
  69.134 +  return total_rs;
  69.135  }
  69.136  
  69.137  
  69.138 -// It's the caller's repsonsibility to ensure glitch-freedom
  69.139 +// It's the caller's responsibility to ensure glitch-freedom
  69.140  // (if required).
  69.141  void Universe::update_heap_info_at_gc() {
  69.142    _heap_capacity_at_last_gc = heap()->capacity();
  69.143 @@ -1135,6 +1101,8 @@
  69.144  
  69.145    // Initialize performance counters for metaspaces
  69.146    MetaspaceCounters::initialize_performance_counters();
  69.147 +  CompressedClassSpaceCounters::initialize_performance_counters();
  69.148 +
  69.149    MemoryService::add_metaspace_memory_pools();
  69.150  
  69.151    GC_locker::unlock();  // allow gc after bootstrapping
    70.1 --- a/src/share/vm/memory/universe.hpp	Fri Aug 23 22:12:18 2013 +0100
    70.2 +++ b/src/share/vm/memory/universe.hpp	Fri Aug 30 09:50:49 2013 +0100
    70.3 @@ -75,10 +75,10 @@
    70.4  };
    70.5  
    70.6  
    70.7 -// For UseCompressedOops and UseCompressedKlassPointers.
    70.8 +// For UseCompressedOops.
    70.9  struct NarrowPtrStruct {
   70.10 -  // Base address for oop/klass-within-java-object materialization.
   70.11 -  // NULL if using wide oops/klasses or zero based narrow oops/klasses.
   70.12 +  // Base address for oop-within-java-object materialization.
   70.13 +  // NULL if using wide oops or zero based narrow oops.
   70.14    address _base;
   70.15    // Number of shift bits for encoding/decoding narrow ptrs.
   70.16    // 0 if using wide ptrs or zero based unscaled narrow ptrs,
   70.17 @@ -106,6 +106,7 @@
   70.18    friend class SystemDictionary;
   70.19    friend class VMStructs;
   70.20    friend class VM_PopulateDumpSharedSpace;
   70.21 +  friend class Metaspace;
   70.22  
   70.23    friend jint  universe_init();
   70.24    friend void  universe2_init();
   70.25 @@ -184,9 +185,6 @@
   70.26    static struct NarrowPtrStruct _narrow_klass;
   70.27    static address _narrow_ptrs_base;
   70.28  
   70.29 -  // Aligned size of the metaspace.
   70.30 -  static size_t _class_metaspace_size;
   70.31 -
   70.32    // array of dummy objects used with +FullGCAlot
   70.33    debug_only(static objArrayOop _fullgc_alot_dummy_array;)
   70.34    // index of next entry to clear
   70.35 @@ -238,15 +236,6 @@
   70.36      assert(UseCompressedOops, "no compressed ptrs?");
   70.37      _narrow_oop._use_implicit_null_checks   = use;
   70.38    }
   70.39 -  static bool     reserve_metaspace_helper(bool with_base = false);
   70.40 -  static ReservedHeapSpace reserve_heap_metaspace(size_t heap_size, size_t alignment, bool& contiguous);
   70.41 -
   70.42 -  static size_t  class_metaspace_size() {
   70.43 -    return _class_metaspace_size;
   70.44 -  }
   70.45 -  static void    set_class_metaspace_size(size_t metaspace_size) {
   70.46 -    _class_metaspace_size = metaspace_size;
   70.47 -  }
   70.48  
   70.49    // Debugging
   70.50    static int _verify_count;                           // number of verifies done
    71.1 --- a/src/share/vm/oops/instanceKlass.cpp	Fri Aug 23 22:12:18 2013 +0100
    71.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Fri Aug 30 09:50:49 2013 +0100
    71.3 @@ -269,7 +269,7 @@
    71.4    set_fields(NULL, 0);
    71.5    set_constants(NULL);
    71.6    set_class_loader_data(NULL);
    71.7 -  set_source_file_name(NULL);
    71.8 +  set_source_file_name_index(0);
    71.9    set_source_debug_extension(NULL, 0);
   71.10    set_array_name(NULL);
   71.11    set_inner_classes(NULL);
   71.12 @@ -284,7 +284,7 @@
   71.13    set_osr_nmethods_head(NULL);
   71.14    set_breakpoints(NULL);
   71.15    init_previous_versions();
   71.16 -  set_generic_signature(NULL);
   71.17 +  set_generic_signature_index(0);
   71.18    release_set_methods_jmethod_ids(NULL);
   71.19    release_set_methods_cached_itable_indices(NULL);
   71.20    set_annotations(NULL);
   71.21 @@ -2368,18 +2368,12 @@
   71.22    // unreference array name derived from this class name (arrays of an unloaded
   71.23    // class can't be referenced anymore).
   71.24    if (_array_name != NULL)  _array_name->decrement_refcount();
   71.25 -  if (_source_file_name != NULL) _source_file_name->decrement_refcount();
   71.26    if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension, mtClass);
   71.27  
   71.28    assert(_total_instanceKlass_count >= 1, "Sanity check");
   71.29    Atomic::dec(&_total_instanceKlass_count);
   71.30  }
   71.31  
   71.32 -void InstanceKlass::set_source_file_name(Symbol* n) {
   71.33 -  _source_file_name = n;
   71.34 -  if (_source_file_name != NULL) _source_file_name->increment_refcount();
   71.35 -}
   71.36 -
   71.37  void InstanceKlass::set_source_debug_extension(char* array, int length) {
   71.38    if (array == NULL) {
   71.39      _source_debug_extension = NULL;
    72.1 --- a/src/share/vm/oops/instanceKlass.hpp	Fri Aug 23 22:12:18 2013 +0100
    72.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Fri Aug 30 09:50:49 2013 +0100
    72.3 @@ -201,14 +201,10 @@
    72.4    // number_of_inner_classes * 4 + enclosing_method_attribute_size.
    72.5    Array<jushort>* _inner_classes;
    72.6  
    72.7 -  // Name of source file containing this klass, NULL if not specified.
    72.8 -  Symbol*         _source_file_name;
    72.9    // the source debug extension for this klass, NULL if not specified.
   72.10    // Specified as UTF-8 string without terminating zero byte in the classfile,
   72.11    // it is stored in the instanceklass as a NULL-terminated UTF-8 string
   72.12    char*           _source_debug_extension;
   72.13 -  // Generic signature, or null if none.
   72.14 -  Symbol*         _generic_signature;
   72.15    // Array name derived from this class which needs unreferencing
   72.16    // if this class is unloaded.
   72.17    Symbol*         _array_name;
   72.18 @@ -217,6 +213,12 @@
   72.19    // (including inherited fields but after header_size()).
   72.20    int             _nonstatic_field_size;
   72.21    int             _static_field_size;    // number words used by static fields (oop and non-oop) in this klass
   72.22 +  // Constant pool index to the utf8 entry of the Generic signature,
   72.23 +  // or 0 if none.
   72.24 +  u2              _generic_signature_index;
   72.25 +  // Constant pool index to the utf8 entry for the name of source file
   72.26 +  // containing this klass, 0 if not specified.
   72.27 +  u2              _source_file_name_index;
   72.28    u2              _static_oop_field_count;// number of static oop fields in this klass
   72.29    u2              _java_fields_count;    // The number of declared Java fields
   72.30    int             _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks
   72.31 @@ -570,8 +572,16 @@
   72.32    }
   72.33  
   72.34    // source file name
   72.35 -  Symbol* source_file_name() const         { return _source_file_name; }
   72.36 -  void set_source_file_name(Symbol* n);
   72.37 +  Symbol* source_file_name() const               {
   72.38 +    return (_source_file_name_index == 0) ?
   72.39 +      (Symbol*)NULL : _constants->symbol_at(_source_file_name_index);
   72.40 +  }
   72.41 +  u2 source_file_name_index() const              {
   72.42 +    return _source_file_name_index;
   72.43 +  }
   72.44 +  void set_source_file_name_index(u2 sourcefile_index) {
   72.45 +    _source_file_name_index = sourcefile_index;
   72.46 +  }
   72.47  
   72.48    // minor and major version numbers of class file
   72.49    u2 minor_version() const                 { return _minor_version; }
   72.50 @@ -648,8 +658,16 @@
   72.51    void set_initial_method_idnum(u2 value)             { _idnum_allocated_count = value; }
   72.52  
   72.53    // generics support
   72.54 -  Symbol* generic_signature() const                   { return _generic_signature; }
   72.55 -  void set_generic_signature(Symbol* sig)             { _generic_signature = sig; }
   72.56 +  Symbol* generic_signature() const                   {
   72.57 +    return (_generic_signature_index == 0) ?
   72.58 +      (Symbol*)NULL : _constants->symbol_at(_generic_signature_index);
   72.59 +  }
   72.60 +  u2 generic_signature_index() const                  {
   72.61 +    return _generic_signature_index;
   72.62 +  }
   72.63 +  void set_generic_signature_index(u2 sig_index)      {
   72.64 +    _generic_signature_index = sig_index;
   72.65 +  }
   72.66  
   72.67    u2 enclosing_method_data(int offset);
   72.68    u2 enclosing_method_class_index() {
    73.1 --- a/src/share/vm/oops/klass.hpp	Fri Aug 23 22:12:18 2013 +0100
    73.2 +++ b/src/share/vm/oops/klass.hpp	Fri Aug 30 09:50:49 2013 +0100
    73.3 @@ -352,7 +352,8 @@
    73.4    static int layout_helper_log2_element_size(jint lh) {
    73.5      assert(lh < (jint)_lh_neutral_value, "must be array");
    73.6      int l2esz = (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask;
    73.7 -    assert(l2esz <= LogBitsPerLong, "sanity");
    73.8 +    assert(l2esz <= LogBitsPerLong,
    73.9 +        err_msg("sanity. l2esz: 0x%x for lh: 0x%x", (uint)l2esz, (uint)lh));
   73.10      return l2esz;
   73.11    }
   73.12    static jint array_layout_helper(jint tag, int hsize, BasicType etype, int log2_esize) {
   73.13 @@ -703,6 +704,16 @@
   73.14  
   73.15    virtual void oop_verify_on(oop obj, outputStream* st);
   73.16  
   73.17 +  static bool is_null(narrowKlass obj);
   73.18 +  static bool is_null(Klass* obj);
   73.19 +
   73.20 +  // klass encoding for klass pointer in objects.
   73.21 +  static narrowKlass encode_klass_not_null(Klass* v);
   73.22 +  static narrowKlass encode_klass(Klass* v);
   73.23 +
   73.24 +  static Klass* decode_klass_not_null(narrowKlass v);
   73.25 +  static Klass* decode_klass(narrowKlass v);
   73.26 +
   73.27   private:
   73.28    // barriers used by klass_oop_store
   73.29    void klass_update_barrier_set(oop v);
    74.1 --- a/src/share/vm/oops/klass.inline.hpp	Fri Aug 23 22:12:18 2013 +0100
    74.2 +++ b/src/share/vm/oops/klass.inline.hpp	Fri Aug 30 09:50:49 2013 +0100
    74.3 @@ -1,5 +1,5 @@
    74.4  /*
    74.5 - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
    74.6 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
    74.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    74.8   *
    74.9   * This code is free software; you can redistribute it and/or modify it
   74.10 @@ -25,6 +25,7 @@
   74.11  #ifndef SHARE_VM_OOPS_KLASS_INLINE_HPP
   74.12  #define SHARE_VM_OOPS_KLASS_INLINE_HPP
   74.13  
   74.14 +#include "memory/universe.hpp"
   74.15  #include "oops/klass.hpp"
   74.16  #include "oops/markOop.hpp"
   74.17  
   74.18 @@ -33,4 +34,41 @@
   74.19    _prototype_header = header;
   74.20  }
   74.21  
   74.22 +inline bool Klass::is_null(Klass* obj)  { return obj == NULL; }
   74.23 +inline bool Klass::is_null(narrowKlass obj) { return obj == 0; }
   74.24 +
   74.25 +// Encoding and decoding for klass field.
   74.26 +
   74.27 +inline bool check_klass_alignment(Klass* obj) {
   74.28 +  return (intptr_t)obj % KlassAlignmentInBytes == 0;
   74.29 +}
   74.30 +
   74.31 +inline narrowKlass Klass::encode_klass_not_null(Klass* v) {
   74.32 +  assert(!is_null(v), "klass value can never be zero");
   74.33 +  assert(check_klass_alignment(v), "Address not aligned");
   74.34 +  int    shift = Universe::narrow_klass_shift();
   74.35 +  uint64_t pd = (uint64_t)(pointer_delta((void*)v, Universe::narrow_klass_base(), 1));
   74.36 +  assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
   74.37 +  uint64_t result = pd >> shift;
   74.38 +  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
   74.39 +  assert(decode_klass(result) == v, "reversibility");
   74.40 +  return (narrowKlass)result;
   74.41 +}
   74.42 +
   74.43 +inline narrowKlass Klass::encode_klass(Klass* v) {
   74.44 +  return is_null(v) ? (narrowKlass)0 : encode_klass_not_null(v);
   74.45 +}
   74.46 +
   74.47 +inline Klass* Klass::decode_klass_not_null(narrowKlass v) {
   74.48 +  assert(!is_null(v), "narrow klass value can never be zero");
   74.49 +  int    shift = Universe::narrow_klass_shift();
   74.50 +  Klass* result = (Klass*)(void*)((uintptr_t)Universe::narrow_klass_base() + ((uintptr_t)v << shift));
   74.51 +  assert(check_klass_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
   74.52 +  return result;
   74.53 +}
   74.54 +
   74.55 +inline Klass* Klass::decode_klass(narrowKlass v) {
   74.56 +  return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
   74.57 +}
   74.58 +
   74.59  #endif // SHARE_VM_OOPS_KLASS_INLINE_HPP
    75.1 --- a/src/share/vm/oops/method.cpp	Fri Aug 23 22:12:18 2013 +0100
    75.2 +++ b/src/share/vm/oops/method.cpp	Fri Aug 30 09:50:49 2013 +0100
    75.3 @@ -747,6 +747,7 @@
    75.4        set_not_c2_compilable();
    75.5    }
    75.6    CompilationPolicy::policy()->disable_compilation(this);
    75.7 +  assert(!CompilationPolicy::can_be_compiled(this, comp_level), "sanity check");
    75.8  }
    75.9  
   75.10  bool Method::is_not_osr_compilable(int comp_level) const {
   75.11 @@ -773,6 +774,7 @@
   75.12        set_not_c2_osr_compilable();
   75.13    }
   75.14    CompilationPolicy::policy()->disable_compilation(this);
   75.15 +  assert(!CompilationPolicy::can_be_osr_compiled(this, comp_level), "sanity check");
   75.16  }
   75.17  
   75.18  // Revert to using the interpreter and clear out the nmethod
    76.1 --- a/src/share/vm/oops/oop.hpp	Fri Aug 23 22:12:18 2013 +0100
    76.2 +++ b/src/share/vm/oops/oop.hpp	Fri Aug 30 09:50:49 2013 +0100
    76.3 @@ -1,5 +1,5 @@
    76.4  /*
    76.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    76.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    76.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    76.8   *
    76.9   * This code is free software; you can redistribute it and/or modify it
   76.10 @@ -62,7 +62,7 @@
   76.11    volatile markOop  _mark;
   76.12    union _metadata {
   76.13      Klass*      _klass;
   76.14 -    narrowOop       _compressed_klass;
   76.15 +    narrowKlass _compressed_klass;
   76.16    } _metadata;
   76.17  
   76.18    // Fast access to barrier set.  Must be initialized.
   76.19 @@ -84,7 +84,7 @@
   76.20    Klass* klass() const;
   76.21    Klass* klass_or_null() const volatile;
   76.22    Klass** klass_addr();
   76.23 -  narrowOop* compressed_klass_addr();
   76.24 +  narrowKlass* compressed_klass_addr();
   76.25  
   76.26    void set_klass(Klass* k);
   76.27  
   76.28 @@ -189,13 +189,6 @@
   76.29                                           oop compare_value,
   76.30                                           bool prebarrier = false);
   76.31  
   76.32 -  // klass encoding for klass pointer in objects.
   76.33 -  static narrowOop encode_klass_not_null(Klass* v);
   76.34 -  static narrowOop encode_klass(Klass* v);
   76.35 -
   76.36 -  static Klass* decode_klass_not_null(narrowOop v);
   76.37 -  static Klass* decode_klass(narrowOop v);
   76.38 -
   76.39    // Access to fields in a instanceOop through these methods.
   76.40    oop obj_field(int offset) const;
   76.41    volatile oop obj_field_volatile(int offset) const;
    77.1 --- a/src/share/vm/oops/oop.inline.hpp	Fri Aug 23 22:12:18 2013 +0100
    77.2 +++ b/src/share/vm/oops/oop.inline.hpp	Fri Aug 30 09:50:49 2013 +0100
    77.3 @@ -35,7 +35,7 @@
    77.4  #include "memory/specialized_oop_closures.hpp"
    77.5  #include "oops/arrayKlass.hpp"
    77.6  #include "oops/arrayOop.hpp"
    77.7 -#include "oops/klass.hpp"
    77.8 +#include "oops/klass.inline.hpp"
    77.9  #include "oops/markOop.inline.hpp"
   77.10  #include "oops/oop.hpp"
   77.11  #include "runtime/atomic.hpp"
   77.12 @@ -70,7 +70,7 @@
   77.13  
   77.14  inline Klass* oopDesc::klass() const {
   77.15    if (UseCompressedKlassPointers) {
   77.16 -    return decode_klass_not_null(_metadata._compressed_klass);
   77.17 +    return Klass::decode_klass_not_null(_metadata._compressed_klass);
   77.18    } else {
   77.19      return _metadata._klass;
   77.20    }
   77.21 @@ -79,7 +79,7 @@
   77.22  inline Klass* oopDesc::klass_or_null() const volatile {
   77.23    // can be NULL in CMS
   77.24    if (UseCompressedKlassPointers) {
   77.25 -    return decode_klass(_metadata._compressed_klass);
   77.26 +    return Klass::decode_klass(_metadata._compressed_klass);
   77.27    } else {
   77.28      return _metadata._klass;
   77.29    }
   77.30 @@ -87,7 +87,7 @@
   77.31  
   77.32  inline int oopDesc::klass_gap_offset_in_bytes() {
   77.33    assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
   77.34 -  return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
   77.35 +  return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
   77.36  }
   77.37  
   77.38  inline Klass** oopDesc::klass_addr() {
   77.39 @@ -97,9 +97,9 @@
   77.40    return (Klass**) &_metadata._klass;
   77.41  }
   77.42  
   77.43 -inline narrowOop* oopDesc::compressed_klass_addr() {
   77.44 +inline narrowKlass* oopDesc::compressed_klass_addr() {
   77.45    assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
   77.46 -  return (narrowOop*) &_metadata._compressed_klass;
   77.47 +  return &_metadata._compressed_klass;
   77.48  }
   77.49  
   77.50  inline void oopDesc::set_klass(Klass* k) {
   77.51 @@ -107,7 +107,7 @@
   77.52    assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
   77.53    assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
   77.54    if (UseCompressedKlassPointers) {
   77.55 -    *compressed_klass_addr() = encode_klass_not_null(k);
   77.56 +    *compressed_klass_addr() = Klass::encode_klass_not_null(k);
   77.57    } else {
   77.58      *klass_addr() = k;
   77.59    }
   77.60 @@ -127,7 +127,7 @@
   77.61    // This is only to be used during GC, for from-space objects, so no
   77.62    // barrier is needed.
   77.63    if (UseCompressedKlassPointers) {
   77.64 -    _metadata._compressed_klass = encode_heap_oop(k);  // may be null (parnew overflow handling)
   77.65 +    _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k);  // may be null (parnew overflow handling)
   77.66    } else {
   77.67      _metadata._klass = (Klass*)(address)k;
   77.68    }
   77.69 @@ -136,7 +136,7 @@
   77.70  inline oop oopDesc::list_ptr_from_klass() {
   77.71    // This is only to be used during GC, for from-space objects.
   77.72    if (UseCompressedKlassPointers) {
   77.73 -    return decode_heap_oop(_metadata._compressed_klass);
   77.74 +    return decode_heap_oop((narrowOop)_metadata._compressed_klass);
   77.75    } else {
   77.76      // Special case for GC
   77.77      return (oop)(address)_metadata._klass;
   77.78 @@ -176,7 +176,6 @@
   77.79  // the right type and inlines the appopriate code).
   77.80  
   77.81  inline bool oopDesc::is_null(oop obj)       { return obj == NULL; }
   77.82 -inline bool oopDesc::is_null(Klass* obj)  { return obj == NULL; }
   77.83  inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
   77.84  
   77.85  // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
   77.86 @@ -186,9 +185,6 @@
   77.87  inline bool check_obj_alignment(oop obj) {
   77.88    return (intptr_t)obj % MinObjAlignmentInBytes == 0;
   77.89  }
   77.90 -inline bool check_klass_alignment(Klass* obj) {
   77.91 -  return (intptr_t)obj % KlassAlignmentInBytes == 0;
   77.92 -}
   77.93  
   77.94  inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
   77.95    assert(!is_null(v), "oop value can never be zero");
   77.96 @@ -224,39 +220,6 @@
   77.97  inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
   77.98  inline oop oopDesc::decode_heap_oop(oop v)  { return v; }
   77.99  
  77.100 -// Encoding and decoding for klass field.  It is copied code, but someday
  77.101 -// might not be the same as oop.
  77.102 -
  77.103 -inline narrowOop oopDesc::encode_klass_not_null(Klass* v) {
  77.104 -  assert(!is_null(v), "klass value can never be zero");
  77.105 -  assert(check_klass_alignment(v), "Address not aligned");
  77.106 -  address base = Universe::narrow_klass_base();
  77.107 -  int    shift = Universe::narrow_klass_shift();
  77.108 -  uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
  77.109 -  assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
  77.110 -  uint64_t result = pd >> shift;
  77.111 -  assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
  77.112 -  assert(decode_klass(result) == v, "reversibility");
  77.113 -  return (narrowOop)result;
  77.114 -}
  77.115 -
  77.116 -inline narrowOop oopDesc::encode_klass(Klass* v) {
  77.117 -  return (is_null(v)) ? (narrowOop)0 : encode_klass_not_null(v);
  77.118 -}
  77.119 -
  77.120 -inline Klass* oopDesc::decode_klass_not_null(narrowOop v) {
  77.121 -  assert(!is_null(v), "narrow oop value can never be zero");
  77.122 -  address base = Universe::narrow_klass_base();
  77.123 -  int    shift = Universe::narrow_klass_shift();
  77.124 -  Klass* result = (Klass*)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
  77.125 -  assert(check_klass_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
  77.126 -  return result;
  77.127 -}
  77.128 -
  77.129 -inline Klass* oopDesc::decode_klass(narrowOop v) {
  77.130 -  return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
  77.131 -}
  77.132 -
  77.133  // Load an oop out of the Java heap as is without decoding.
  77.134  // Called by GC to check for null before decoding.
  77.135  inline oop       oopDesc::load_heap_oop(oop* p)          { return *p; }
    78.1 --- a/src/share/vm/oops/oopsHierarchy.hpp	Fri Aug 23 22:12:18 2013 +0100
    78.2 +++ b/src/share/vm/oops/oopsHierarchy.hpp	Fri Aug 30 09:50:49 2013 +0100
    78.3 @@ -1,5 +1,5 @@
    78.4  /*
    78.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    78.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    78.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    78.8   *
    78.9   * This code is free software; you can redistribute it and/or modify it
   78.10 @@ -33,6 +33,10 @@
   78.11  // of B, A's representation is a prefix of B's representation.
   78.12  
   78.13  typedef juint narrowOop; // Offset instead of address for an oop within a java object
   78.14 +
   78.15 +// If compressed klass pointers then use narrowKlass.
   78.16 +typedef juint  narrowKlass;
   78.17 +
   78.18  typedef void* OopOrNarrowOopStar;
   78.19  typedef class   markOopDesc*                markOop;
   78.20  
    79.1 --- a/src/share/vm/opto/block.cpp	Fri Aug 23 22:12:18 2013 +0100
    79.2 +++ b/src/share/vm/opto/block.cpp	Fri Aug 30 09:50:49 2013 +0100
    79.3 @@ -35,10 +35,6 @@
    79.4  #include "opto/rootnode.hpp"
    79.5  #include "utilities/copy.hpp"
    79.6  
    79.7 -// Optimization - Graph Style
    79.8 -
    79.9 -
   79.10 -//-----------------------------------------------------------------------------
   79.11  void Block_Array::grow( uint i ) {
   79.12    assert(i >= Max(), "must be an overflow");
   79.13    debug_only(_limit = i+1);
   79.14 @@ -54,7 +50,6 @@
   79.15    Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) );
   79.16  }
   79.17  
   79.18 -//=============================================================================
   79.19  void Block_List::remove(uint i) {
   79.20    assert(i < _cnt, "index out of bounds");
   79.21    Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*)));
   79.22 @@ -76,8 +71,6 @@
   79.23  }
   79.24  #endif
   79.25  
   79.26 -//=============================================================================
   79.27 -
   79.28  uint Block::code_alignment() {
   79.29    // Check for Root block
   79.30    if (_pre_order == 0) return CodeEntryAlignment;
   79.31 @@ -113,7 +106,6 @@
   79.32    return unit_sz; // no particular alignment
   79.33  }
   79.34  
   79.35 -//-----------------------------------------------------------------------------
   79.36  // Compute the size of first 'inst_cnt' instructions in this block.
   79.37  // Return the number of instructions left to compute if the block has
   79.38  // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size
   79.39 @@ -138,7 +130,6 @@
   79.40    return inst_cnt;
   79.41  }
   79.42  
   79.43 -//-----------------------------------------------------------------------------
   79.44  uint Block::find_node( const Node *n ) const {
   79.45    for( uint i = 0; i < _nodes.size(); i++ ) {
   79.46      if( _nodes[i] == n )
   79.47 @@ -153,7 +144,6 @@
   79.48    _nodes.remove(find_node(n));
   79.49  }
   79.50  
   79.51 -//------------------------------is_Empty---------------------------------------
   79.52  // Return empty status of a block.  Empty blocks contain only the head, other
   79.53  // ideal nodes, and an optional trailing goto.
   79.54  int Block::is_Empty() const {
   79.55 @@ -192,7 +182,6 @@
   79.56    return not_empty;
   79.57  }
   79.58  
   79.59 -//------------------------------has_uncommon_code------------------------------
   79.60  // Return true if the block's code implies that it is likely to be
   79.61  // executed infrequently.  Check to see if the block ends in a Halt or
   79.62  // a low probability call.
   79.63 @@ -218,7 +207,6 @@
   79.64    return op == Op_Halt;
   79.65  }
   79.66  
   79.67 -//------------------------------is_uncommon------------------------------------
   79.68  // True if block is low enough frequency or guarded by a test which
   79.69  // mostly does not go here.
   79.70  bool Block::is_uncommon(PhaseCFG* cfg) const {
   79.71 @@ -271,7 +259,6 @@
   79.72    return false;
   79.73  }
   79.74  
   79.75 -//------------------------------dump-------------------------------------------
   79.76  #ifndef PRODUCT
   79.77  void Block::dump_bidx(const Block* orig, outputStream* st) const {
   79.78    if (_pre_order) st->print("B%d",_pre_order);
   79.79 @@ -364,13 +351,12 @@
   79.80  }
   79.81  #endif
   79.82  
   79.83 -//=============================================================================
   79.84 -//------------------------------PhaseCFG---------------------------------------
   79.85  PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
   79.86  : Phase(CFG)
   79.87  , _block_arena(arena)
   79.88 +, _root(root)
   79.89 +, _matcher(matcher)
   79.90  , _node_to_block_mapping(arena)
   79.91 -, _root(root)
   79.92  , _node_latency(NULL)
   79.93  #ifndef PRODUCT
   79.94  , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
   79.95 @@ -390,11 +376,10 @@
   79.96    _goto->set_req(0,_goto);
   79.97  
   79.98    // Build the CFG in Reverse Post Order
   79.99 -  _num_blocks = build_cfg();
  79.100 -  _broot = get_block_for_node(_root);
  79.101 +  _number_of_blocks = build_cfg();
  79.102 +  _root_block = get_block_for_node(_root);
  79.103  }
  79.104  
  79.105 -//------------------------------build_cfg--------------------------------------
  79.106  // Build a proper looking CFG.  Make every block begin with either a StartNode
  79.107  // or a RegionNode.  Make every block end with either a Goto, If or Return.
  79.108  // The RootNode both starts and ends it's own block.  Do this with a recursive
  79.109 @@ -496,13 +481,12 @@
  79.110    return sum;
  79.111  }
  79.112  
  79.113 -//------------------------------insert_goto_at---------------------------------
  79.114  // Inserts a goto & corresponding basic block between
  79.115  // block[block_no] and its succ_no'th successor block
  79.116  void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
  79.117    // get block with block_no
  79.118 -  assert(block_no < _num_blocks, "illegal block number");
  79.119 -  Block* in  = _blocks[block_no];
  79.120 +  assert(block_no < number_of_blocks(), "illegal block number");
  79.121 +  Block* in  = get_block(block_no);
  79.122    // get successor block succ_no
  79.123    assert(succ_no < in->_num_succs, "illegal successor number");
  79.124    Block* out = in->_succs[succ_no];
  79.125 @@ -537,11 +521,9 @@
  79.126    // Set the frequency of the new block
  79.127    block->_freq = freq;
  79.128    // add new basic block to basic block list
  79.129 -  _blocks.insert(block_no + 1, block);
  79.130 -  _num_blocks++;
  79.131 +  add_block_at(block_no + 1, block);
  79.132  }
  79.133  
  79.134 -//------------------------------no_flip_branch---------------------------------
  79.135  // Does this block end in a multiway branch that cannot have the default case
  79.136  // flipped for another case?
  79.137  static bool no_flip_branch( Block *b ) {
  79.138 @@ -560,7 +542,6 @@
  79.139    return false;
  79.140  }
  79.141  
  79.142 -//------------------------------convert_NeverBranch_to_Goto--------------------
  79.143  // Check for NeverBranch at block end.  This needs to become a GOTO to the
  79.144  // true target.  NeverBranch are treated as a conditional branch that always
  79.145  // goes the same direction for most of the optimizer and are used to give a
  79.146 @@ -598,7 +579,6 @@
  79.147      dead->_nodes[k]->del_req(j);
  79.148  }
  79.149  
  79.150 -//------------------------------move_to_next-----------------------------------
  79.151  // Helper function to move block bx to the slot following b_index. Return
  79.152  // true if the move is successful, otherwise false
  79.153  bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
  79.154 @@ -606,20 +586,22 @@
  79.155  
  79.156    // Return false if bx is already scheduled.
  79.157    uint bx_index = bx->_pre_order;
  79.158 -  if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) {
  79.159 +  if ((bx_index <= b_index) && (get_block(bx_index) == bx)) {
  79.160      return false;
  79.161    }
  79.162  
  79.163    // Find the current index of block bx on the block list
  79.164    bx_index = b_index + 1;
  79.165 -  while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++;
  79.166 -  assert(_blocks[bx_index] == bx, "block not found");
  79.167 +  while (bx_index < number_of_blocks() && get_block(bx_index) != bx) {
  79.168 +    bx_index++;
  79.169 +  }
  79.170 +  assert(get_block(bx_index) == bx, "block not found");
  79.171  
  79.172    // If the previous block conditionally falls into bx, return false,
  79.173    // because moving bx will create an extra jump.
  79.174    for(uint k = 1; k < bx->num_preds(); k++ ) {
  79.175      Block* pred = get_block_for_node(bx->pred(k));
  79.176 -    if (pred == _blocks[bx_index-1]) {
  79.177 +    if (pred == get_block(bx_index - 1)) {
  79.178        if (pred->_num_succs != 1) {
  79.179          return false;
  79.180        }
  79.181 @@ -632,7 +614,6 @@
  79.182    return true;
  79.183  }
  79.184  
  79.185 -//------------------------------move_to_end------------------------------------
  79.186  // Move empty and uncommon blocks to the end.
  79.187  void PhaseCFG::move_to_end(Block *b, uint i) {
  79.188    int e = b->is_Empty();
  79.189 @@ -650,31 +631,31 @@
  79.190    _blocks.push(b);
  79.191  }
  79.192  
  79.193 -//---------------------------set_loop_alignment--------------------------------
  79.194  // Set loop alignment for every block
  79.195  void PhaseCFG::set_loop_alignment() {
  79.196 -  uint last = _num_blocks;
  79.197 -  assert( _blocks[0] == _broot, "" );
  79.198 +  uint last = number_of_blocks();
  79.199 +  assert(get_block(0) == get_root_block(), "");
  79.200  
  79.201 -  for (uint i = 1; i < last; i++ ) {
  79.202 -    Block *b = _blocks[i];
  79.203 -    if (b->head()->is_Loop()) {
  79.204 -      b->set_loop_alignment(b);
  79.205 +  for (uint i = 1; i < last; i++) {
  79.206 +    Block* block = get_block(i);
  79.207 +    if (block->head()->is_Loop()) {
  79.208 +      block->set_loop_alignment(block);
  79.209      }
  79.210    }
  79.211  }
  79.212  
  79.213 -//-----------------------------remove_empty------------------------------------
  79.214  // Make empty basic blocks to be "connector" blocks, Move uncommon blocks
  79.215  // to the end.
  79.216 -void PhaseCFG::remove_empty() {
  79.217 +void PhaseCFG::remove_empty_blocks() {
  79.218    // Move uncommon blocks to the end
  79.219 -  uint last = _num_blocks;
  79.220 -  assert( _blocks[0] == _broot, "" );
  79.221 +  uint last = number_of_blocks();
  79.222 +  assert(get_block(0) == get_root_block(), "");
  79.223  
  79.224    for (uint i = 1; i < last; i++) {
  79.225 -    Block *b = _blocks[i];
  79.226 -    if (b->is_connector()) break;
  79.227 +    Block* block = get_block(i);
  79.228 +    if (block->is_connector()) {
  79.229 +      break;
  79.230 +    }
  79.231  
  79.232      // Check for NeverBranch at block end.  This needs to become a GOTO to the
  79.233      // true target.  NeverBranch are treated as a conditional branch that
  79.234 @@ -682,124 +663,127 @@
  79.235      // to give a fake exit path to infinite loops.  At this late stage they
  79.236      // need to turn into Goto's so that when you enter the infinite loop you
  79.237      // indeed hang.
  79.238 -    if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch )
  79.239 -      convert_NeverBranch_to_Goto(b);
  79.240 +    if (block->_nodes[block->end_idx()]->Opcode() == Op_NeverBranch) {
  79.241 +      convert_NeverBranch_to_Goto(block);
  79.242 +    }
  79.243  
  79.244      // Look for uncommon blocks and move to end.
  79.245      if (!C->do_freq_based_layout()) {
  79.246 -      if (b->is_uncommon(this)) {
  79.247 -        move_to_end(b, i);
  79.248 +      if (block->is_uncommon(this)) {
  79.249 +        move_to_end(block, i);
  79.250          last--;                   // No longer check for being uncommon!
  79.251 -        if( no_flip_branch(b) ) { // Fall-thru case must follow?
  79.252 -          b = _blocks[i];         // Find the fall-thru block
  79.253 -          move_to_end(b, i);
  79.254 +        if (no_flip_branch(block)) { // Fall-thru case must follow?
  79.255 +          // Find the fall-thru block
  79.256 +          block = get_block(i);
  79.257 +          move_to_end(block, i);
  79.258            last--;
  79.259          }
  79.260 -        i--;                      // backup block counter post-increment
  79.261 +        // backup block counter post-increment
  79.262 +        i--;
  79.263        }
  79.264      }
  79.265    }
  79.266  
  79.267    // Move empty blocks to the end
  79.268 -  last = _num_blocks;
  79.269 +  last = number_of_blocks();
  79.270    for (uint i = 1; i < last; i++) {
  79.271 -    Block *b = _blocks[i];
  79.272 -    if (b->is_Empty() != Block::not_empty) {
  79.273 -      move_to_end(b, i);
  79.274 +    Block* block = get_block(i);
  79.275 +    if (block->is_Empty() != Block::not_empty) {
  79.276 +      move_to_end(block, i);
  79.277        last--;
  79.278        i--;
  79.279      }
  79.280    } // End of for all blocks
  79.281  }
  79.282  
  79.283 -//-----------------------------fixup_flow--------------------------------------
  79.284  // Fix up the final control flow for basic blocks.
  79.285  void PhaseCFG::fixup_flow() {
  79.286    // Fixup final control flow for the blocks.  Remove jump-to-next
  79.287    // block.  If neither arm of a IF follows the conditional branch, we
  79.288    // have to add a second jump after the conditional.  We place the
  79.289    // TRUE branch target in succs[0] for both GOTOs and IFs.
  79.290 -  for (uint i=0; i < _num_blocks; i++) {
  79.291 -    Block *b = _blocks[i];
  79.292 -    b->_pre_order = i;          // turn pre-order into block-index
  79.293 +  for (uint i = 0; i < number_of_blocks(); i++) {
  79.294 +    Block* block = get_block(i);
  79.295 +    block->_pre_order = i;          // turn pre-order into block-index
  79.296  
  79.297      // Connector blocks need no further processing.
  79.298 -    if (b->is_connector()) {
  79.299 -      assert((i+1) == _num_blocks || _blocks[i+1]->is_connector(),
  79.300 -             "All connector blocks should sink to the end");
  79.301 +    if (block->is_connector()) {
  79.302 +      assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end");
  79.303        continue;
  79.304      }
  79.305 -    assert(b->is_Empty() != Block::completely_empty,
  79.306 -           "Empty blocks should be connectors");
  79.307 +    assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors");
  79.308  
  79.309 -    Block *bnext = (i < _num_blocks-1) ? _blocks[i+1] : NULL;
  79.310 -    Block *bs0 = b->non_connector_successor(0);
  79.311 +    Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL;
  79.312 +    Block* bs0 = block->non_connector_successor(0);
  79.313  
  79.314      // Check for multi-way branches where I cannot negate the test to
  79.315      // exchange the true and false targets.
  79.316 -    if( no_flip_branch( b ) ) {
  79.317 +    if (no_flip_branch(block)) {
  79.318        // Find fall through case - if must fall into its target
  79.319 -      int branch_idx = b->_nodes.size() - b->_num_succs;
  79.320 -      for (uint j2 = 0; j2 < b->_num_succs; j2++) {
  79.321 -        const ProjNode* p = b->_nodes[branch_idx + j2]->as_Proj();
  79.322 +      int branch_idx = block->_nodes.size() - block->_num_succs;
  79.323 +      for (uint j2 = 0; j2 < block->_num_succs; j2++) {
  79.324 +        const ProjNode* p = block->_nodes[branch_idx + j2]->as_Proj();
  79.325          if (p->_con == 0) {
  79.326            // successor j2 is fall through case
  79.327 -          if (b->non_connector_successor(j2) != bnext) {
  79.328 +          if (block->non_connector_successor(j2) != bnext) {
  79.329              // but it is not the next block => insert a goto
  79.330              insert_goto_at(i, j2);
  79.331            }
  79.332            // Put taken branch in slot 0
  79.333 -          if( j2 == 0 && b->_num_succs == 2) {
  79.334 +          if (j2 == 0 && block->_num_succs == 2) {
  79.335              // Flip targets in succs map
  79.336 -            Block *tbs0 = b->_succs[0];
  79.337 -            Block *tbs1 = b->_succs[1];
  79.338 -            b->_succs.map( 0, tbs1 );
  79.339 -            b->_succs.map( 1, tbs0 );
  79.340 +            Block *tbs0 = block->_succs[0];
  79.341 +            Block *tbs1 = block->_succs[1];
  79.342 +            block->_succs.map(0, tbs1);
  79.343 +            block->_succs.map(1, tbs0);
  79.344            }
  79.345            break;
  79.346          }
  79.347        }
  79.348 +
  79.349        // Remove all CatchProjs
  79.350 -      for (uint j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop();
  79.351 +      for (uint j = 0; j < block->_num_succs; j++) {
  79.352 +        block->_nodes.pop();
  79.353 +      }
  79.354  
  79.355 -    } else if (b->_num_succs == 1) {
  79.356 +    } else if (block->_num_succs == 1) {
  79.357        // Block ends in a Goto?
  79.358        if (bnext == bs0) {
  79.359          // We fall into next block; remove the Goto
  79.360 -        b->_nodes.pop();
  79.361 +        block->_nodes.pop();
  79.362        }
  79.363  
  79.364 -    } else if( b->_num_succs == 2 ) { // Block ends in a If?
  79.365 +    } else if(block->_num_succs == 2) { // Block ends in a If?
  79.366        // Get opcode of 1st projection (matches _succs[0])
  79.367        // Note: Since this basic block has 2 exits, the last 2 nodes must
  79.368        //       be projections (in any order), the 3rd last node must be
  79.369        //       the IfNode (we have excluded other 2-way exits such as
  79.370        //       CatchNodes already).
  79.371 -      MachNode *iff   = b->_nodes[b->_nodes.size()-3]->as_Mach();
  79.372 -      ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
  79.373 -      ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
  79.374 +      MachNode* iff   = block->_nodes[block->_nodes.size() - 3]->as_Mach();
  79.375 +      ProjNode* proj0 = block->_nodes[block->_nodes.size() - 2]->as_Proj();
  79.376 +      ProjNode* proj1 = block->_nodes[block->_nodes.size() - 1]->as_Proj();
  79.377  
  79.378        // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
  79.379 -      assert(proj0->raw_out(0) == b->_succs[0]->head(), "Mismatch successor 0");
  79.380 -      assert(proj1->raw_out(0) == b->_succs[1]->head(), "Mismatch successor 1");
  79.381 +      assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
  79.382 +      assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1");
  79.383  
  79.384 -      Block *bs1 = b->non_connector_successor(1);
  79.385 +      Block* bs1 = block->non_connector_successor(1);
  79.386  
  79.387        // Check for neither successor block following the current
  79.388        // block ending in a conditional. If so, move one of the
  79.389        // successors after the current one, provided that the
  79.390        // successor was previously unscheduled, but moveable
  79.391        // (i.e., all paths to it involve a branch).
  79.392 -      if( !C->do_freq_based_layout() && bnext != bs0 && bnext != bs1 ) {
  79.393 +      if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) {
  79.394          // Choose the more common successor based on the probability
  79.395          // of the conditional branch.
  79.396 -        Block *bx = bs0;
  79.397 -        Block *by = bs1;
  79.398 +        Block* bx = bs0;
  79.399 +        Block* by = bs1;
  79.400  
  79.401          // _prob is the probability of taking the true path. Make
  79.402          // p the probability of taking successor #1.
  79.403          float p = iff->as_MachIf()->_prob;
  79.404 -        if( proj0->Opcode() == Op_IfTrue ) {
  79.405 +        if (proj0->Opcode() == Op_IfTrue) {
  79.406            p = 1.0 - p;
  79.407          }
  79.408  
  79.409 @@ -826,14 +810,16 @@
  79.410        // succs[1].
  79.411        if (bnext == bs0) {
  79.412          // Fall-thru case in succs[0], so flip targets in succs map
  79.413 -        Block *tbs0 = b->_succs[0];
  79.414 -        Block *tbs1 = b->_succs[1];
  79.415 -        b->_succs.map( 0, tbs1 );
  79.416 -        b->_succs.map( 1, tbs0 );
  79.417 +        Block* tbs0 = block->_succs[0];
  79.418 +        Block* tbs1 = block->_succs[1];
  79.419 +        block->_succs.map(0, tbs1);
  79.420 +        block->_succs.map(1, tbs0);
  79.421          // Flip projection for each target
  79.422 -        { ProjNode *tmp = proj0; proj0 = proj1; proj1 = tmp; }
  79.423 +        ProjNode* tmp = proj0;
  79.424 +        proj0 = proj1;
  79.425 +        proj1 = tmp;
  79.426  
  79.427 -      } else if( bnext != bs1 ) {
  79.428 +      } else if(bnext != bs1) {
  79.429          // Need a double-branch
  79.430          // The existing conditional branch need not change.
  79.431          // Add a unconditional branch to the false target.
  79.432 @@ -843,12 +829,12 @@
  79.433        }
  79.434  
  79.435        // Make sure we TRUE branch to the target
  79.436 -      if( proj0->Opcode() == Op_IfFalse ) {
  79.437 +      if (proj0->Opcode() == Op_IfFalse) {
  79.438          iff->as_MachIf()->negate();
  79.439        }
  79.440  
  79.441 -      b->_nodes.pop();          // Remove IfFalse & IfTrue projections
  79.442 -      b->_nodes.pop();
  79.443 +      block->_nodes.pop();          // Remove IfFalse & IfTrue projections
  79.444 +      block->_nodes.pop();
  79.445  
  79.446      } else {
  79.447        // Multi-exit block, e.g. a switch statement
  79.448 @@ -858,7 +844,6 @@
  79.449  }
  79.450  
  79.451  
  79.452 -//------------------------------dump-------------------------------------------
  79.453  #ifndef PRODUCT
  79.454  void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited  ) const {
  79.455    const Node *x = end->is_block_proj();
  79.456 @@ -884,10 +869,11 @@
  79.457  }
  79.458  
  79.459  void PhaseCFG::dump( ) const {
  79.460 -  tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
  79.461 +  tty->print("\n--- CFG --- %d BBs\n", number_of_blocks());
  79.462    if (_blocks.size()) {        // Did we do basic-block layout?
  79.463 -    for (uint i = 0; i < _num_blocks; i++) {
  79.464 -      _blocks[i]->dump(this);
  79.465 +    for (uint i = 0; i < number_of_blocks(); i++) {
  79.466 +      const Block* block = get_block(i);
  79.467 +      block->dump(this);
  79.468      }
  79.469    } else {                      // Else do it with a DFS
  79.470      VectorSet visited(_block_arena);
  79.471 @@ -896,27 +882,26 @@
  79.472  }
  79.473  
  79.474  void PhaseCFG::dump_headers() {
  79.475 -  for( uint i = 0; i < _num_blocks; i++ ) {
  79.476 -    if (_blocks[i]) {
  79.477 -      _blocks[i]->dump_head(this);
  79.478 +  for (uint i = 0; i < number_of_blocks(); i++) {
  79.479 +    Block* block = get_block(i);
  79.480 +    if (block != NULL) {
  79.481 +      block->dump_head(this);
  79.482      }
  79.483    }
  79.484  }
  79.485  
  79.486 -void PhaseCFG::verify( ) const {
  79.487 +void PhaseCFG::verify() const {
  79.488  #ifdef ASSERT
  79.489    // Verify sane CFG
  79.490 -  for (uint i = 0; i < _num_blocks; i++) {
  79.491 -    Block *b = _blocks[i];
  79.492 -    uint cnt = b->_nodes.size();
  79.493 +  for (uint i = 0; i < number_of_blocks(); i++) {
  79.494 +    Block* block = get_block(i);
  79.495 +    uint cnt = block->_nodes.size();
  79.496      uint j;
  79.497      for (j = 0; j < cnt; j++)  {
  79.498 -      Node *n = b->_nodes[j];
  79.499 -      assert(get_block_for_node(n) == b, "");
  79.500 -      if (j >= 1 && n->is_Mach() &&
  79.501 -          n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
  79.502 -        assert(j == 1 || b->_nodes[j-1]->is_Phi(),
  79.503 -               "CreateEx must be first instruction in block");
  79.504 +      Node *n = block->_nodes[j];
  79.505 +      assert(get_block_for_node(n) == block, "");
  79.506 +      if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
  79.507 +        assert(j == 1 || block->_nodes[j-1]->is_Phi(), "CreateEx must be first instruction in block");
  79.508        }
  79.509        for (uint k = 0; k < n->req(); k++) {
  79.510          Node *def = n->in(k);
  79.511 @@ -926,8 +911,7 @@
  79.512            // Uses must follow their definition if they are at the same block.
  79.513            // Mostly done to check that MachSpillCopy nodes are placed correctly
  79.514            // when CreateEx node is moved in build_ifg_physical().
  79.515 -          if (get_block_for_node(def) == b &&
  79.516 -              !(b->head()->is_Loop() && n->is_Phi()) &&
  79.517 +          if (get_block_for_node(def) == block && !(block->head()->is_Loop() && n->is_Phi()) &&
  79.518                // See (+++) comment in reg_split.cpp
  79.519                !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
  79.520              bool is_loop = false;
  79.521 @@ -939,29 +923,29 @@
  79.522                  }
  79.523                }
  79.524              }
  79.525 -            assert(is_loop || b->find_node(def) < j, "uses must follow definitions");
  79.526 +            assert(is_loop || block->find_node(def) < j, "uses must follow definitions");
  79.527            }
  79.528          }
  79.529        }
  79.530      }
  79.531  
  79.532 -    j = b->end_idx();
  79.533 -    Node *bp = (Node*)b->_nodes[b->_nodes.size()-1]->is_block_proj();
  79.534 -    assert( bp, "last instruction must be a block proj" );
  79.535 -    assert( bp == b->_nodes[j], "wrong number of successors for this block" );
  79.536 +    j = block->end_idx();
  79.537 +    Node* bp = (Node*)block->_nodes[block->_nodes.size() - 1]->is_block_proj();
  79.538 +    assert(bp, "last instruction must be a block proj");
  79.539 +    assert(bp == block->_nodes[j], "wrong number of successors for this block");
  79.540      if (bp->is_Catch()) {
  79.541 -      while (b->_nodes[--j]->is_MachProj()) ;
  79.542 -      assert(b->_nodes[j]->is_MachCall(), "CatchProj must follow call");
  79.543 +      while (block->_nodes[--j]->is_MachProj()) {
  79.544 +        ;
  79.545 +      }
  79.546 +      assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
  79.547      } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
  79.548 -      assert(b->_num_succs == 2, "Conditional branch must have two targets");
  79.549 +      assert(block->_num_succs == 2, "Conditional branch must have two targets");
  79.550      }
  79.551    }
  79.552  #endif
  79.553  }
  79.554  #endif
  79.555  
  79.556 -//=============================================================================
  79.557 -//------------------------------UnionFind--------------------------------------
  79.558  UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) {
  79.559    Copy::zero_to_bytes( _indices, sizeof(uint)*max );
  79.560  }
  79.561 @@ -986,7 +970,6 @@
  79.562    for( uint i=0; i<max; i++ ) map(i,i);
  79.563  }
  79.564  
  79.565 -//------------------------------Find_compress----------------------------------
  79.566  // Straight out of Tarjan's union-find algorithm
  79.567  uint UnionFind::Find_compress( uint idx ) {
  79.568    uint cur  = idx;
  79.569 @@ -1006,7 +989,6 @@
  79.570    return idx;
  79.571  }
  79.572  
  79.573 -//------------------------------Find_const-------------------------------------
  79.574  // Like Find above, but no path compress, so bad asymptotic behavior
  79.575  uint UnionFind::Find_const( uint idx ) const {
  79.576    if( idx == 0 ) return idx;    // Ignore the zero idx
  79.577 @@ -1021,7 +1003,6 @@
  79.578    return next;
  79.579  }
  79.580  
  79.581 -//------------------------------Union------------------------------------------
  79.582  // union 2 sets together.
  79.583  void UnionFind::Union( uint idx1, uint idx2 ) {
  79.584    uint src = Find(idx1);
  79.585 @@ -1070,9 +1051,6 @@
  79.586  }
  79.587  #endif
  79.588  
  79.589 -//=============================================================================
  79.590 -
  79.591 -//------------------------------edge_order-------------------------------------
  79.592  // Comparison function for edges
  79.593  static int edge_order(CFGEdge **e0, CFGEdge **e1) {
  79.594    float freq0 = (*e0)->freq();
  79.595 @@ -1087,7 +1065,6 @@
  79.596    return dist1 - dist0;
  79.597  }
  79.598  
  79.599 -//------------------------------trace_frequency_order--------------------------
  79.600  // Comparison function for edges
  79.601  extern "C" int trace_frequency_order(const void *p0, const void *p1) {
  79.602    Trace *tr0 = *(Trace **) p0;
  79.603 @@ -1113,17 +1090,15 @@
  79.604    return diff;
  79.605  }
  79.606  
  79.607 -//------------------------------find_edges-------------------------------------
  79.608  // Find edges of interest, i.e, those which can fall through. Presumes that
  79.609  // edges which don't fall through are of low frequency and can be generally
  79.610  // ignored.  Initialize the list of traces.
  79.611 -void PhaseBlockLayout::find_edges()
  79.612 -{
  79.613 +void PhaseBlockLayout::find_edges() {
  79.614    // Walk the blocks, creating edges and Traces
  79.615    uint i;
  79.616    Trace *tr = NULL;
  79.617 -  for (i = 0; i < _cfg._num_blocks; i++) {
  79.618 -    Block *b = _cfg._blocks[i];
  79.619 +  for (i = 0; i < _cfg.number_of_blocks(); i++) {
  79.620 +    Block* b = _cfg.get_block(i);
  79.621      tr = new Trace(b, next, prev);
  79.622      traces[tr->id()] = tr;
  79.623  
  79.624 @@ -1147,7 +1122,7 @@
  79.625        if (n->num_preds() != 1) break;
  79.626  
  79.627        i++;
  79.628 -      assert(n = _cfg._blocks[i], "expecting next block");
  79.629 +      assert(n = _cfg.get_block(i), "expecting next block");
  79.630        tr->append(n);
  79.631        uf->map(n->_pre_order, tr->id());
  79.632        traces[n->_pre_order] = NULL;
  79.633 @@ -1171,8 +1146,8 @@
  79.634    }
  79.635  
  79.636    // Group connector blocks into one trace
  79.637 -  for (i++; i < _cfg._num_blocks; i++) {
  79.638 -    Block *b = _cfg._blocks[i];
  79.639 +  for (i++; i < _cfg.number_of_blocks(); i++) {
  79.640 +    Block *b = _cfg.get_block(i);
  79.641      assert(b->is_connector(), "connector blocks at the end");
  79.642      tr->append(b);
  79.643      uf->map(b->_pre_order, tr->id());
  79.644 @@ -1180,10 +1155,8 @@
  79.645    }
  79.646  }
  79.647  
  79.648 -//------------------------------union_traces----------------------------------
  79.649  // Union two traces together in uf, and null out the trace in the list
  79.650 -void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace)
  79.651 -{
  79.652 +void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) {
  79.653    uint old_id = old_trace->id();
  79.654    uint updated_id = updated_trace->id();
  79.655  
  79.656 @@ -1207,10 +1180,8 @@
  79.657    traces[hi_id] = NULL;
  79.658  }
  79.659  
  79.660 -//------------------------------grow_traces-------------------------------------
  79.661  // Append traces together via the most frequently executed edges
  79.662 -void PhaseBlockLayout::grow_traces()
  79.663 -{
  79.664 +void PhaseBlockLayout::grow_traces() {
  79.665    // Order the edges, and drive the growth of Traces via the most
  79.666    // frequently executed edges.
  79.667    edges->sort(edge_order);
  79.668 @@ -1252,11 +1223,9 @@
  79.669    }
  79.670  }
  79.671  
  79.672 -//------------------------------merge_traces-----------------------------------
  79.673  // Embed one trace into another, if the fork or join points are sufficiently
  79.674  // balanced.
  79.675 -void PhaseBlockLayout::merge_traces(bool fall_thru_only)
  79.676 -{
  79.677 +void PhaseBlockLayout::merge_traces(bool fall_thru_only) {
  79.678    // Walk the edge list a another time, looking at unprocessed edges.
  79.679    // Fold in diamonds
  79.680    for (int i = 0; i < edges->length(); i++) {
  79.681 @@ -1310,7 +1279,7 @@
  79.682          src_trace->insert_after(src_block, targ_trace);
  79.683          union_traces(src_trace, targ_trace);
  79.684        } else if (src_at_tail) {
  79.685 -        if (src_trace != trace(_cfg._broot)) {
  79.686 +        if (src_trace != trace(_cfg.get_root_block())) {
  79.687            e->set_state(CFGEdge::connected);
  79.688            targ_trace->insert_before(targ_block, src_trace);
  79.689            union_traces(targ_trace, src_trace);
  79.690 @@ -1319,7 +1288,7 @@
  79.691      } else if (e->state() == CFGEdge::open) {
  79.692        // Append traces, even without a fall-thru connection.
  79.693        // But leave root entry at the beginning of the block list.
  79.694 -      if (targ_trace != trace(_cfg._broot)) {
  79.695 +      if (targ_trace != trace(_cfg.get_root_block())) {
  79.696          e->set_state(CFGEdge::connected);
  79.697          src_trace->append(targ_trace);
  79.698          union_traces(src_trace, targ_trace);
  79.699 @@ -1328,11 +1297,9 @@
  79.700    }
  79.701  }
  79.702  
  79.703 -//----------------------------reorder_traces-----------------------------------
  79.704  // Order the sequence of the traces in some desirable way, and fixup the
  79.705  // jumps at the end of each block.
  79.706 -void PhaseBlockLayout::reorder_traces(int count)
  79.707 -{
  79.708 +void PhaseBlockLayout::reorder_traces(int count) {
  79.709    ResourceArea *area = Thread::current()->resource_area();
  79.710    Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count);
  79.711    Block_List worklist;
  79.712 @@ -1347,15 +1314,14 @@
  79.713    }
  79.714  
  79.715    // The entry block should be first on the new trace list.
  79.716 -  Trace *tr = trace(_cfg._broot);
  79.717 +  Trace *tr = trace(_cfg.get_root_block());
  79.718    assert(tr == new_traces[0], "entry trace misplaced");
  79.719  
  79.720    // Sort the new trace list by frequency
  79.721    qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order);
  79.722  
  79.723    // Patch up the successor blocks
  79.724 -  _cfg._blocks.reset();
  79.725 -  _cfg._num_blocks = 0;
  79.726 +  _cfg.clear_blocks();
  79.727    for (int i = 0; i < new_count; i++) {
  79.728      Trace *tr = new_traces[i];
  79.729      if (tr != NULL) {
  79.730 @@ -1364,17 +1330,15 @@
  79.731    }
  79.732  }
  79.733  
  79.734 -//------------------------------PhaseBlockLayout-------------------------------
  79.735  // Order basic blocks based on frequency
  79.736 -PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) :
  79.737 -  Phase(BlockLayout),
  79.738 -  _cfg(cfg)
  79.739 -{
  79.740 +PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg)
  79.741 +: Phase(BlockLayout)
  79.742 +, _cfg(cfg) {
  79.743    ResourceMark rm;
  79.744    ResourceArea *area = Thread::current()->resource_area();
  79.745  
  79.746    // List of traces
  79.747 -  int size = _cfg._num_blocks + 1;
  79.748 +  int size = _cfg.number_of_blocks() + 1;
  79.749    traces = NEW_ARENA_ARRAY(area, Trace *, size);
  79.750    memset(traces, 0, size*sizeof(Trace*));
  79.751    next = NEW_ARENA_ARRAY(area, Block *, size);
  79.752 @@ -1407,11 +1371,10 @@
  79.753    // Re-order all the remaining traces by frequency
  79.754    reorder_traces(size);
  79.755  
  79.756 -  assert(_cfg._num_blocks >= (uint) (size - 1), "number of blocks can not shrink");
  79.757 +  assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink");
  79.758  }
  79.759  
  79.760  
  79.761 -//------------------------------backedge---------------------------------------
  79.762  // Edge e completes a loop in a trace. If the target block is head of the
  79.763  // loop, rotate the loop block so that the loop ends in a conditional branch.
  79.764  bool Trace::backedge(CFGEdge *e) {
  79.765 @@ -1463,14 +1426,12 @@
  79.766    return loop_rotated;
  79.767  }
  79.768  
  79.769 -//------------------------------fixup_blocks-----------------------------------
  79.770  // push blocks onto the CFG list
  79.771  // ensure that blocks have the correct two-way branch sense
  79.772  void Trace::fixup_blocks(PhaseCFG &cfg) {
  79.773    Block *last = last_block();
  79.774    for (Block *b = first_block(); b != NULL; b = next(b)) {
  79.775 -    cfg._blocks.push(b);
  79.776 -    cfg._num_blocks++;
  79.777 +    cfg.add_block(b);
  79.778      if (!b->is_connector()) {
  79.779        int nfallthru = b->num_fall_throughs();
  79.780        if (b != last) {
    80.1 --- a/src/share/vm/opto/block.hpp	Fri Aug 23 22:12:18 2013 +0100
    80.2 +++ b/src/share/vm/opto/block.hpp	Fri Aug 30 09:50:49 2013 +0100
    80.3 @@ -348,20 +348,77 @@
    80.4  class PhaseCFG : public Phase {
    80.5    friend class VMStructs;
    80.6   private:
    80.7 +
    80.8 +  // Root of whole program
    80.9 +  RootNode* _root;
   80.10 +
   80.11 +  // The block containing the root node
   80.12 +  Block* _root_block;
   80.13 +
   80.14 +  // List of basic blocks that are created during CFG creation
   80.15 +  Block_List _blocks;
   80.16 +
   80.17 +  // Count of basic blocks
   80.18 +  uint _number_of_blocks;
   80.19 +
   80.20    // Arena for the blocks to be stored in
   80.21    Arena* _block_arena;
   80.22  
   80.23 +  // The matcher for this compilation
   80.24 +  Matcher& _matcher;
   80.25 +
   80.26    // Map nodes to owning basic block
   80.27    Block_Array _node_to_block_mapping;
   80.28  
   80.29 +  // Loop from the root
   80.30 +  CFGLoop* _root_loop;
   80.31 +
   80.32 +  // Outmost loop frequency
   80.33 +  float _outer_loop_frequency;
   80.34 +
   80.35 +  // Per node latency estimation, valid only during GCM
   80.36 +  GrowableArray<uint>* _node_latency;
   80.37 +
   80.38    // Build a proper looking cfg.  Return count of basic blocks
   80.39    uint build_cfg();
   80.40  
   80.41 -  // Perform DFS search.
   80.42 +  // Build the dominator tree so that we know where we can move instructions
   80.43 +  void build_dominator_tree();
   80.44 +
   80.45 +  // Estimate block frequencies based on IfNode probabilities, so that we know where we want to move instructions
   80.46 +  void estimate_block_frequency();
   80.47 +
   80.48 +  // Global Code Motion.  See Click's PLDI95 paper.  Place Nodes in specific
   80.49 +  // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
   80.50 +  // Move nodes to ensure correctness from GVN and also try to move nodes out of loops.
   80.51 +  void global_code_motion();
   80.52 +
   80.53 +  // Schedule Nodes early in their basic blocks.
   80.54 +  bool schedule_early(VectorSet &visited, Node_List &roots);
   80.55 +
   80.56 +  // For each node, find the latest block it can be scheduled into
   80.57 +  // and then select the cheapest block between the latest and earliest
   80.58 +  // block to place the node.
   80.59 +  void schedule_late(VectorSet &visited, Node_List &stack);
   80.60 +
   80.61 +  // Compute the (backwards) latency of a node from a single use
   80.62 +  int latency_from_use(Node *n, const Node *def, Node *use);
   80.63 +
   80.64 +  // Compute the (backwards) latency of a node from the uses of this instruction
   80.65 +  void partial_latency_of_defs(Node *n);
   80.66 +
   80.67 +  // Compute the instruction global latency with a backwards walk
   80.68 +  void compute_latencies_backwards(VectorSet &visited, Node_List &stack);
   80.69 +
   80.70 +  // Pick a block between early and late that is a cheaper alternative
   80.71 +  // to late. Helper for schedule_late.
   80.72 +  Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
   80.73 +
   80.74 +  // Perform a Depth First Search (DFS).
   80.75    // Setup 'vertex' as DFS to vertex mapping.
   80.76    // Setup 'semi' as vertex to DFS mapping.
   80.77    // Set 'parent' to DFS parent.
   80.78 -  uint DFS( Tarjan *tarjan );
   80.79 +  uint do_DFS(Tarjan* tarjan, uint rpo_counter);
   80.80  
   80.81    // Helper function to insert a node into a block
   80.82    void schedule_node_into_block( Node *n, Block *b );
   80.83 @@ -372,7 +429,8 @@
   80.84    void schedule_pinned_nodes( VectorSet &visited );
   80.85  
   80.86    // I'll need a few machine-specific GotoNodes.  Clone from this one.
   80.87 -  MachNode *_goto;
   80.88 +  // Used when building the CFG and creating end nodes for blocks.
   80.89 +  MachNode* _goto;
   80.90  
   80.91    Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
   80.92    void verify_anti_dependences(Block* LCA, Node* load) {
   80.93 @@ -380,17 +438,77 @@
   80.94      insert_anti_dependences(LCA, load, true);
   80.95    }
   80.96  
   80.97 +  bool move_to_next(Block* bx, uint b_index);
   80.98 +  void move_to_end(Block* bx, uint b_index);
   80.99 +
  80.100 +  void insert_goto_at(uint block_no, uint succ_no);
  80.101 +
  80.102 +  // Check for NeverBranch at block end.  This needs to become a GOTO to the
  80.103 +  // true target.  NeverBranch are treated as a conditional branch that always
  80.104 +  // goes the same direction for most of the optimizer and are used to give a
  80.105 +  // fake exit path to infinite loops.  At this late stage they need to turn
  80.106 +  // into Goto's so that when you enter the infinite loop you indeed hang.
  80.107 +  void convert_NeverBranch_to_Goto(Block *b);
  80.108 +
  80.109 +  CFGLoop* create_loop_tree();
  80.110 +
  80.111 +  #ifndef PRODUCT
  80.112 +  bool _trace_opto_pipelining;  // tracing flag
  80.113 +  #endif
  80.114 +
  80.115   public:
  80.116    PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
  80.117  
  80.118 -  uint _num_blocks;             // Count of basic blocks
  80.119 -  Block_List _blocks;           // List of basic blocks
  80.120 -  RootNode *_root;              // Root of whole program
  80.121 -  Block *_broot;                // Basic block of root
  80.122 -  uint _rpo_ctr;
  80.123 -  CFGLoop* _root_loop;
  80.124 -  float _outer_loop_freq;       // Outmost loop frequency
  80.125 +  void set_latency_for_node(Node* node, int latency) {
  80.126 +    _node_latency->at_put_grow(node->_idx, latency);
  80.127 +  }
  80.128  
  80.129 +  uint get_latency_for_node(Node* node) {
  80.130 +    return _node_latency->at_grow(node->_idx);
  80.131 +  }
  80.132 +
  80.133 +  // Get the outer most frequency
  80.134 +  float get_outer_loop_frequency() const {
  80.135 +    return _outer_loop_frequency;
  80.136 +  }
  80.137 +
  80.138 +  // Get the root node of the CFG
  80.139 +  RootNode* get_root_node() const {
  80.140 +    return _root;
  80.141 +  }
  80.142 +
  80.143 +  // Get the block of the root node
  80.144 +  Block* get_root_block() const {
  80.145 +    return _root_block;
  80.146 +  }
  80.147 +
  80.148 +  // Add a block at a position and moves the later ones one step
  80.149 +  void add_block_at(uint pos, Block* block) {
  80.150 +    _blocks.insert(pos, block);
  80.151 +    _number_of_blocks++;
  80.152 +  }
  80.153 +
  80.154 +  // Adds a block to the top of the block list
  80.155 +  void add_block(Block* block) {
  80.156 +    _blocks.push(block);
  80.157 +    _number_of_blocks++;
  80.158 +  }
  80.159 +
  80.160 +  // Clear the list of blocks
  80.161 +  void clear_blocks() {
  80.162 +    _blocks.reset();
  80.163 +    _number_of_blocks = 0;
  80.164 +  }
  80.165 +
  80.166 +  // Get the block at position pos in _blocks
  80.167 +  Block* get_block(uint pos) const {
  80.168 +    return _blocks[pos];
  80.169 +  }
  80.170 +
  80.171 +  // Number of blocks
  80.172 +  uint number_of_blocks() const {
  80.173 +    return _number_of_blocks;
  80.174 +  }
  80.175  
  80.176    // set which block this node should reside in
  80.177    void map_node_to_block(const Node* node, Block* block) {
  80.178 @@ -412,72 +530,26 @@
  80.179      return (_node_to_block_mapping.lookup(node->_idx) != NULL);
  80.180    }
  80.181  
  80.182 -  // Per node latency estimation, valid only during GCM
  80.183 -  GrowableArray<uint> *_node_latency;
  80.184 -
  80.185 -#ifndef PRODUCT
  80.186 -  bool _trace_opto_pipelining;  // tracing flag
  80.187 -#endif
  80.188 -
  80.189  #ifdef ASSERT
  80.190    Unique_Node_List _raw_oops;
  80.191  #endif
  80.192  
  80.193 -  // Build dominators
  80.194 -  void Dominators();
  80.195 -
  80.196 -  // Estimate block frequencies based on IfNode probabilities
  80.197 -  void Estimate_Block_Frequency();
  80.198 -
  80.199 -  // Global Code Motion.  See Click's PLDI95 paper.  Place Nodes in specific
  80.200 -  // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
  80.201 -  void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
  80.202 +  // Do global code motion by first building dominator tree and estimate block frequency
  80.203 +  // Returns true on success
  80.204 +  bool do_global_code_motion();
  80.205  
  80.206    // Compute the (backwards) latency of a node from the uses
  80.207    void latency_from_uses(Node *n);
  80.208  
  80.209 -  // Compute the (backwards) latency of a node from a single use
  80.210 -  int latency_from_use(Node *n, const Node *def, Node *use);
  80.211 -
  80.212 -  // Compute the (backwards) latency of a node from the uses of this instruction
  80.213 -  void partial_latency_of_defs(Node *n);
  80.214 -
  80.215 -  // Schedule Nodes early in their basic blocks.
  80.216 -  bool schedule_early(VectorSet &visited, Node_List &roots);
  80.217 -
  80.218 -  // For each node, find the latest block it can be scheduled into
  80.219 -  // and then select the cheapest block between the latest and earliest
  80.220 -  // block to place the node.
  80.221 -  void schedule_late(VectorSet &visited, Node_List &stack);
  80.222 -
  80.223 -  // Pick a block between early and late that is a cheaper alternative
  80.224 -  // to late. Helper for schedule_late.
  80.225 -  Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
  80.226 -
  80.227 -  // Compute the instruction global latency with a backwards walk
  80.228 -  void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack);
  80.229 -
  80.230    // Set loop alignment
  80.231    void set_loop_alignment();
  80.232  
  80.233    // Remove empty basic blocks
  80.234 -  void remove_empty();
  80.235 +  void remove_empty_blocks();
  80.236    void fixup_flow();
  80.237 -  bool move_to_next(Block* bx, uint b_index);
  80.238 -  void move_to_end(Block* bx, uint b_index);
  80.239 -  void insert_goto_at(uint block_no, uint succ_no);
  80.240  
  80.241 -  // Check for NeverBranch at block end.  This needs to become a GOTO to the
  80.242 -  // true target.  NeverBranch are treated as a conditional branch that always
  80.243 -  // goes the same direction for most of the optimizer and are used to give a
  80.244 -  // fake exit path to infinite loops.  At this late stage they need to turn
  80.245 -  // into Goto's so that when you enter the infinite loop you indeed hang.
  80.246 -  void convert_NeverBranch_to_Goto(Block *b);
  80.247 -
  80.248 -  CFGLoop* create_loop_tree();
  80.249 -
  80.250 -  // Insert a node into a block, and update the _bbs
  80.251 -  void insert( Block *b, uint idx, Node *n ) {
  80.252 +  // Insert a node into a block at index and map the node to the block
  80.253 +  void insert(Block *b, uint idx, Node *n) {
  80.254      b->_nodes.insert( idx, n );
  80.255      map_node_to_block(n, b);
  80.256    }
    81.1 --- a/src/share/vm/opto/buildOopMap.cpp	Fri Aug 23 22:12:18 2013 +0100
    81.2 +++ b/src/share/vm/opto/buildOopMap.cpp	Fri Aug 30 09:50:49 2013 +0100
    81.3 @@ -87,7 +87,6 @@
    81.4  // OptoReg::Bad for not-callee-saved.
    81.5  
    81.6  
    81.7 -//------------------------------OopFlow----------------------------------------
    81.8  // Structure to pass around
    81.9  struct OopFlow : public ResourceObj {
   81.10    short *_callees;              // Array mapping register to callee-saved
   81.11 @@ -119,7 +118,6 @@
   81.12    OopMap *build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live );
   81.13  };
   81.14  
   81.15 -//------------------------------compute_reach----------------------------------
   81.16  // Given reaching-defs for this block start, compute it for this block end
   81.17  void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
   81.18  
   81.19 @@ -177,7 +175,6 @@
   81.20    }
   81.21  }
   81.22  
   81.23 -//------------------------------merge------------------------------------------
   81.24  // Merge the given flow into the 'this' flow
   81.25  void OopFlow::merge( OopFlow *flow, int max_reg ) {
   81.26    assert( _b == NULL, "merging into a happy flow" );
   81.27 @@ -197,14 +194,12 @@
   81.28  
   81.29  }
   81.30  
   81.31 -//------------------------------clone------------------------------------------
   81.32  void OopFlow::clone( OopFlow *flow, int max_size ) {
   81.33    _b = flow->_b;
   81.34    memcpy( _callees, flow->_callees, sizeof(short)*max_size);
   81.35    memcpy( _defs   , flow->_defs   , sizeof(Node*)*max_size);
   81.36  }
   81.37  
   81.38 -//------------------------------make-------------------------------------------
   81.39  OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) {
   81.40    short *callees = NEW_ARENA_ARRAY(A,short,max_size+1);
   81.41    Node **defs    = NEW_ARENA_ARRAY(A,Node*,max_size+1);
   81.42 @@ -215,7 +210,6 @@
   81.43    return flow;
   81.44  }
   81.45  
   81.46 -//------------------------------bit twiddlers----------------------------------
   81.47  static int get_live_bit( int *live, int reg ) {
   81.48    return live[reg>>LogBitsPerInt] &   (1<<(reg&(BitsPerInt-1))); }
   81.49  static void set_live_bit( int *live, int reg ) {
   81.50 @@ -223,7 +217,6 @@
   81.51  static void clr_live_bit( int *live, int reg ) {
   81.52           live[reg>>LogBitsPerInt] &= ~(1<<(reg&(BitsPerInt-1))); }
   81.53  
   81.54 -//------------------------------build_oop_map----------------------------------
   81.55  // Build an oopmap from the current flow info
   81.56  OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ) {
   81.57    int framesize = regalloc->_framesize;
   81.58 @@ -412,19 +405,18 @@
   81.59    return omap;
   81.60  }
   81.61  
   81.62 -//------------------------------do_liveness------------------------------------
   81.63  // Compute backwards liveness on registers
   81.64 -static void do_liveness( PhaseRegAlloc *regalloc, PhaseCFG *cfg, Block_List *worklist, int max_reg_ints, Arena *A, Dict *safehash ) {
   81.65 -  int *live = NEW_ARENA_ARRAY(A, int, (cfg->_num_blocks+1) * max_reg_ints);
   81.66 -  int *tmp_live = &live[cfg->_num_blocks * max_reg_ints];
   81.67 -  Node *root = cfg->C->root();
   81.68 +static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* worklist, int max_reg_ints, Arena* A, Dict* safehash) {
   81.69 +  int* live = NEW_ARENA_ARRAY(A, int, (cfg->number_of_blocks() + 1) * max_reg_ints);
   81.70 +  int* tmp_live = &live[cfg->number_of_blocks() * max_reg_ints];
   81.71 +  Node* root = cfg->get_root_node();
   81.72    // On CISC platforms, get the node representing the stack pointer  that regalloc
   81.73    // used for spills
   81.74    Node *fp = NodeSentinel;
   81.75    if (UseCISCSpill && root->req() > 1) {
   81.76      fp = root->in(1)->in(TypeFunc::FramePtr);
   81.77    }
   81.78 -  memset( live, 0, cfg->_num_blocks * (max_reg_ints<<LogBytesPerInt) );
   81.79 +  memset(live, 0, cfg->number_of_blocks() * (max_reg_ints << LogBytesPerInt));
   81.80    // Push preds onto worklist
   81.81    for (uint i = 1; i < root->req(); i++) {
   81.82      Block* block = cfg->get_block_for_node(root->in(i));
   81.83 @@ -549,29 +541,32 @@
   81.84      // Scan for any missing safepoints.  Happens to infinite loops
   81.85      // ala ZKM.jar
   81.86      uint i;
   81.87 -    for( i=1; i<cfg->_num_blocks; i++ ) {
   81.88 -      Block *b = cfg->_blocks[i];
   81.89 +    for (i = 1; i < cfg->number_of_blocks(); i++) {
   81.90 +      Block* block = cfg->get_block(i);
   81.91        uint j;
   81.92 -      for( j=1; j<b->_nodes.size(); j++ )
   81.93 -        if( b->_nodes[j]->jvms() &&
   81.94 -            (*safehash)[b->_nodes[j]] == NULL )
   81.95 +      for (j = 1; j < block->_nodes.size(); j++) {
   81.96 +        if (block->_nodes[j]->jvms() && (*safehash)[block->_nodes[j]] == NULL) {
   81.97             break;
   81.98 -      if( j<b->_nodes.size() ) break;
   81.99 +        }
  81.100 +      }
  81.101 +      if (j < block->_nodes.size()) {
  81.102 +        break;
  81.103 +      }
  81.104      }
  81.105 -    if( i == cfg->_num_blocks )
  81.106 +    if (i == cfg->number_of_blocks()) {
  81.107        break;                    // Got 'em all
  81.108 +    }
  81.109  #ifndef PRODUCT
  81.110      if( PrintOpto && Verbose )
  81.111        tty->print_cr("retripping live calc");
  81.112  #endif
  81.113      // Force the issue (expensively): recheck everybody
  81.114 -    for( i=1; i<cfg->_num_blocks; i++ )
  81.115 -      worklist->push(cfg->_blocks[i]);
  81.116 +    for (i = 1; i < cfg->number_of_blocks(); i++) {
  81.117 +      worklist->push(cfg->get_block(i));
  81.118 +    }
  81.119    }
  81.120 -
  81.121  }
  81.122  
  81.123 -//------------------------------BuildOopMaps-----------------------------------
  81.124  // Collect GC mask info - where are all the OOPs?
  81.125  void Compile::BuildOopMaps() {
  81.126    NOT_PRODUCT( TracePhase t3("bldOopMaps", &_t_buildOopMaps, TimeCompiler); )
  81.127 @@ -592,12 +587,12 @@
  81.128    OopFlow *free_list = NULL;    // Free, unused
  81.129  
  81.130    // Array mapping blocks to completed oopflows
  81.131 -  OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, _cfg->_num_blocks);
  81.132 -  memset( flows, 0, _cfg->_num_blocks*sizeof(OopFlow*) );
  81.133 +  OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, _cfg->number_of_blocks());
  81.134 +  memset( flows, 0, _cfg->number_of_blocks() * sizeof(OopFlow*) );
  81.135  
  81.136  
  81.137    // Do the first block 'by hand' to prime the worklist
  81.138 -  Block *entry = _cfg->_blocks[1];
  81.139 +  Block *entry = _cfg->get_block(1);
  81.140    OopFlow *rootflow = OopFlow::make(A,max_reg,this);
  81.141    // Initialize to 'bottom' (not 'top')
  81.142    memset( rootflow->_callees, OptoReg::Bad, max_reg*sizeof(short) );
  81.143 @@ -623,7 +618,9 @@
  81.144  
  81.145      Block *b = worklist.pop();
  81.146      // Ignore root block
  81.147 -    if( b == _cfg->_broot ) continue;
  81.148 +    if (b == _cfg->get_root_block()) {
  81.149 +      continue;
  81.150 +    }
  81.151      // Block is already done?  Happens if block has several predecessors,
  81.152      // he can get on the worklist more than once.
  81.153      if( flows[b->_pre_order] ) continue;
    82.1 --- a/src/share/vm/opto/chaitin.cpp	Fri Aug 23 22:12:18 2013 +0100
    82.2 +++ b/src/share/vm/opto/chaitin.cpp	Fri Aug 30 09:50:49 2013 +0100
    82.3 @@ -40,10 +40,8 @@
    82.4  #include "opto/opcodes.hpp"
    82.5  #include "opto/rootnode.hpp"
    82.6  
    82.7 -//=============================================================================
    82.8 -
    82.9  #ifndef PRODUCT
   82.10 -void LRG::dump( ) const {
   82.11 +void LRG::dump() const {
   82.12    ttyLocker ttyl;
   82.13    tty->print("%d ",num_regs());
   82.14    _mask.dump();
   82.15 @@ -94,7 +92,6 @@
   82.16  }
   82.17  #endif
   82.18  
   82.19 -//------------------------------score------------------------------------------
   82.20  // Compute score from cost and area.  Low score is best to spill.
   82.21  static double raw_score( double cost, double area ) {
   82.22    return cost - (area*RegisterCostAreaRatio) * 1.52588e-5;
   82.23 @@ -125,7 +122,6 @@
   82.24    return score;
   82.25  }
   82.26  
   82.27 -//------------------------------LRG_List---------------------------------------
   82.28  LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) {
   82.29    memset( _lidxs, 0, sizeof(uint)*max );
   82.30  }
   82.31 @@ -211,7 +207,6 @@
   82.32    return next;
   82.33  }
   82.34  
   82.35 -//------------------------------Chaitin----------------------------------------
   82.36  PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
   82.37    : PhaseRegAlloc(unique, cfg, matcher,
   82.38  #ifndef PRODUCT
   82.39 @@ -232,31 +227,31 @@
   82.40  {
   82.41    NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); )
   82.42  
   82.43 -  _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq);
   82.44 +  _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
   82.45  
   82.46    // Build a list of basic blocks, sorted by frequency
   82.47 -  _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
   82.48 +  _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
   82.49    // Experiment with sorting strategies to speed compilation
   82.50    double  cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket
   82.51    Block **buckets[NUMBUCKS];             // Array of buckets
   82.52    uint    buckcnt[NUMBUCKS];             // Array of bucket counters
   82.53    double  buckval[NUMBUCKS];             // Array of bucket value cutoffs
   82.54    for (uint i = 0; i < NUMBUCKS; i++) {
   82.55 -    buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks);
   82.56 +    buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
   82.57      buckcnt[i] = 0;
   82.58      // Bump by three orders of magnitude each time
   82.59      cutoff *= 0.001;
   82.60      buckval[i] = cutoff;
   82.61 -    for (uint j = 0; j < _cfg._num_blocks; j++) {
   82.62 +    for (uint j = 0; j < _cfg.number_of_blocks(); j++) {
   82.63        buckets[i][j] = NULL;
   82.64      }
   82.65    }
   82.66    // Sort blocks into buckets
   82.67 -  for (uint i = 0; i < _cfg._num_blocks; i++) {
   82.68 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
   82.69      for (uint j = 0; j < NUMBUCKS; j++) {
   82.70 -      if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) {
   82.71 +      if ((j == NUMBUCKS - 1) || (_cfg.get_block(i)->_freq > buckval[j])) {
   82.72          // Assign block to end of list for appropriate bucket
   82.73 -        buckets[j][buckcnt[j]++] = _cfg._blocks[i];
   82.74 +        buckets[j][buckcnt[j]++] = _cfg.get_block(i);
   82.75          break; // kick out of inner loop
   82.76        }
   82.77      }
   82.78 @@ -269,10 +264,9 @@
   82.79      }
   82.80    }
   82.81  
   82.82 -  assert(blkcnt == _cfg._num_blocks, "Block array not totally filled");
   82.83 +  assert(blkcnt == _cfg.number_of_blocks(), "Block array not totally filled");
   82.84  }
   82.85  
   82.86 -//------------------------------Union------------------------------------------
   82.87  // union 2 sets together.
   82.88  void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
   82.89    uint src = _lrg_map.find(src_n);
   82.90 @@ -285,7 +279,6 @@
   82.91    _lrg_map.uf_map(dst, src);
   82.92  }
   82.93  
   82.94 -//------------------------------new_lrg----------------------------------------
   82.95  void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
   82.96    // Make the Node->LRG mapping
   82.97    _lrg_map.extend(x->_idx,lrg);
   82.98 @@ -294,24 +287,28 @@
   82.99  }
  82.100  
  82.101  
  82.102 -bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
  82.103 -  Block* bcon = _cfg.get_block_for_node(con);
  82.104 -  uint cindex = bcon->find_node(con);
  82.105 -  Node *con_next = bcon->_nodes[cindex+1];
  82.106 -  if (con_next->in(0) != con || !con_next->is_MachProj()) {
  82.107 -    return false;               // No MachProj's follow
  82.108 +int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) {
  82.109 +  assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections");
  82.110 +  DEBUG_ONLY( Block* borig = _cfg.get_block_for_node(orig); )
  82.111 +  int found_projs = 0;
  82.112 +  uint cnt = orig->outcnt();
  82.113 +  for (uint i = 0; i < cnt; i++) {
  82.114 +    Node* proj = orig->raw_out(i);
  82.115 +    if (proj->is_MachProj()) {
  82.116 +      assert(proj->outcnt() == 0, "only kill projections are expected here");
  82.117 +      assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections");
  82.118 +      found_projs++;
  82.119 +      // Copy kill projections after the cloned node
  82.120 +      Node* kills = proj->clone();
  82.121 +      kills->set_req(0, copy);
  82.122 +      b->_nodes.insert(idx++, kills);
  82.123 +      _cfg.map_node_to_block(kills, b);
  82.124 +      new_lrg(kills, max_lrg_id++);
  82.125 +    }
  82.126    }
  82.127 -
  82.128 -  // Copy kills after the cloned constant
  82.129 -  Node *kills = con_next->clone();
  82.130 -  kills->set_req(0, copy);
  82.131 -  b->_nodes.insert(idx, kills);
  82.132 -  _cfg.map_node_to_block(kills, b);
  82.133 -  new_lrg(kills, max_lrg_id);
  82.134 -  return true;
  82.135 +  return found_projs;
  82.136  }
  82.137  
  82.138 -//------------------------------compact----------------------------------------
  82.139  // Renumber the live ranges to compact them.  Makes the IFG smaller.
  82.140  void PhaseChaitin::compact() {
  82.141    // Current the _uf_map contains a series of short chains which are headed
  82.142 @@ -677,20 +674,19 @@
  82.143    C->set_indexSet_arena(NULL);  // ResourceArea is at end of scope
  82.144  }
  82.145  
  82.146 -//------------------------------de_ssa-----------------------------------------
  82.147  void PhaseChaitin::de_ssa() {
  82.148    // Set initial Names for all Nodes.  Most Nodes get the virtual register
  82.149    // number.  A few get the ZERO live range number.  These do not
  82.150    // get allocated, but instead rely on correct scheduling to ensure that
  82.151    // only one instance is simultaneously live at a time.
  82.152    uint lr_counter = 1;
  82.153 -  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
  82.154 -    Block *b = _cfg._blocks[i];
  82.155 -    uint cnt = b->_nodes.size();
  82.156 +  for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
  82.157 +    Block* block = _cfg.get_block(i);
  82.158 +    uint cnt = block->_nodes.size();
  82.159  
  82.160      // Handle all the normal Nodes in the block
  82.161      for( uint j = 0; j < cnt; j++ ) {
  82.162 -      Node *n = b->_nodes[j];
  82.163 +      Node *n = block->_nodes[j];
  82.164        // Pre-color to the zero live range, or pick virtual register
  82.165        const RegMask &rm = n->out_RegMask();
  82.166        _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
  82.167 @@ -701,52 +697,55 @@
  82.168  }
  82.169  
  82.170  
  82.171 -//------------------------------gather_lrg_masks-------------------------------
  82.172  // Gather LiveRanGe information, including register masks.  Modification of
  82.173  // cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
  82.174  void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
  82.175  
  82.176    // Nail down the frame pointer live range
  82.177 -  uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr));
  82.178 +  uint fp_lrg = _lrg_map.live_range_id(_cfg.get_root_node()->in(1)->in(TypeFunc::FramePtr));
  82.179    lrgs(fp_lrg)._cost += 1e12;   // Cost is infinite
  82.180  
  82.181    // For all blocks
  82.182 -  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
  82.183 -    Block *b = _cfg._blocks[i];
  82.184 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
  82.185 +    Block* block = _cfg.get_block(i);
  82.186  
  82.187      // For all instructions
  82.188 -    for( uint j = 1; j < b->_nodes.size(); j++ ) {
  82.189 -      Node *n = b->_nodes[j];
  82.190 +    for (uint j = 1; j < block->_nodes.size(); j++) {
  82.191 +      Node* n = block->_nodes[j];
  82.192        uint input_edge_start =1; // Skip control most nodes
  82.193 -      if( n->is_Mach() ) input_edge_start = n->as_Mach()->oper_input_base();
  82.194 +      if (n->is_Mach()) {
  82.195 +        input_edge_start = n->as_Mach()->oper_input_base();
  82.196 +      }
  82.197        uint idx = n->is_Copy();
  82.198  
  82.199        // Get virtual register number, same as LiveRanGe index
  82.200        uint vreg = _lrg_map.live_range_id(n);
  82.201 -      LRG &lrg = lrgs(vreg);
  82.202 -      if( vreg ) {              // No vreg means un-allocable (e.g. memory)
  82.203 +      LRG& lrg = lrgs(vreg);
  82.204 +      if (vreg) {              // No vreg means un-allocable (e.g. memory)
  82.205  
  82.206          // Collect has-copy bit
  82.207 -        if( idx ) {
  82.208 +        if (idx) {
  82.209            lrg._has_copy = 1;
  82.210            uint clidx = _lrg_map.live_range_id(n->in(idx));
  82.211 -          LRG &copy_src = lrgs(clidx);
  82.212 +          LRG& copy_src = lrgs(clidx);
  82.213            copy_src._has_copy = 1;
  82.214          }
  82.215  
  82.216          // Check for float-vs-int live range (used in register-pressure
  82.217          // calculations)
  82.218          const Type *n_type = n->bottom_type();
  82.219 -        if (n_type->is_floatingpoint())
  82.220 +        if (n_type->is_floatingpoint()) {
  82.221            lrg._is_float = 1;
  82.222 +        }
  82.223  
  82.224          // Check for twice prior spilling.  Once prior spilling might have
  82.225          // spilled 'soft', 2nd prior spill should have spilled 'hard' and
  82.226          // further spilling is unlikely to make progress.
  82.227 -        if( _spilled_once.test(n->_idx) ) {
  82.228 +        if (_spilled_once.test(n->_idx)) {
  82.229            lrg._was_spilled1 = 1;
  82.230 -          if( _spilled_twice.test(n->_idx) )
  82.231 +          if (_spilled_twice.test(n->_idx)) {
  82.232              lrg._was_spilled2 = 1;
  82.233 +          }
  82.234          }
  82.235  
  82.236  #ifndef PRODUCT
  82.237 @@ -783,16 +782,18 @@
  82.238  
  82.239          // Check for bound register masks
  82.240          const RegMask &lrgmask = lrg.mask();
  82.241 -        if (lrgmask.is_bound(ireg))
  82.242 +        if (lrgmask.is_bound(ireg)) {
  82.243            lrg._is_bound = 1;
  82.244 +        }
  82.245  
  82.246          // Check for maximum frequency value
  82.247 -        if (lrg._maxfreq < b->_freq)
  82.248 -          lrg._maxfreq = b->_freq;
  82.249 +        if (lrg._maxfreq < block->_freq) {
  82.250 +          lrg._maxfreq = block->_freq;
  82.251 +        }
  82.252  
  82.253          // Check for oop-iness, or long/double
  82.254          // Check for multi-kill projection
  82.255 -        switch( ireg ) {
  82.256 +        switch (ireg) {
  82.257          case MachProjNode::fat_proj:
  82.258            // Fat projections have size equal to number of registers killed
  82.259            lrg.set_num_regs(rm.Size());
  82.260 @@ -962,7 +963,7 @@
  82.261          // AggressiveCoalesce.  This effectively pre-virtual-splits
  82.262          // around uncommon uses of common defs.
  82.263          const RegMask &rm = n->in_RegMask(k);
  82.264 -        if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * b->_freq) {
  82.265 +        if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) {
  82.266            // Since we are BEFORE aggressive coalesce, leave the register
  82.267            // mask untrimmed by the call.  This encourages more coalescing.
  82.268            // Later, AFTER aggressive, this live range will have to spill
  82.269 @@ -1006,8 +1007,9 @@
  82.270          }
  82.271  
  82.272          // Check for maximum frequency value
  82.273 -        if( lrg._maxfreq < b->_freq )
  82.274 -          lrg._maxfreq = b->_freq;
  82.275 +        if (lrg._maxfreq < block->_freq) {
  82.276 +          lrg._maxfreq = block->_freq;
  82.277 +        }
  82.278  
  82.279        } // End for all allocated inputs
  82.280      } // end for all instructions
  82.281 @@ -1029,7 +1031,6 @@
  82.282    }
  82.283  }
  82.284  
  82.285 -//------------------------------set_was_low------------------------------------
  82.286  // Set the was-lo-degree bit.  Conservative coalescing should not change the
  82.287  // colorability of the graph.  If any live range was of low-degree before
  82.288  // coalescing, it should Simplify.  This call sets the was-lo-degree bit.
  82.289 @@ -1066,7 +1067,6 @@
  82.290  
  82.291  #define REGISTER_CONSTRAINED 16
  82.292  
  82.293 -//------------------------------cache_lrg_info---------------------------------
  82.294  // Compute cost/area ratio, in case we spill.  Build the lo-degree list.
  82.295  void PhaseChaitin::cache_lrg_info( ) {
  82.296  
  82.297 @@ -1100,7 +1100,6 @@
  82.298    }
  82.299  }
  82.300  
  82.301 -//------------------------------Pre-Simplify-----------------------------------
  82.302  // Simplify the IFG by removing LRGs of low degree that have NO copies
  82.303  void PhaseChaitin::Pre_Simplify( ) {
  82.304  
  82.305 @@ -1151,7 +1150,6 @@
  82.306    // No more lo-degree no-copy live ranges to simplify
  82.307  }
  82.308  
  82.309 -//------------------------------Simplify---------------------------------------
  82.310  // Simplify the IFG by removing LRGs of low degree.
  82.311  void PhaseChaitin::Simplify( ) {
  82.312  
  82.313 @@ -1288,7 +1286,6 @@
  82.314  
  82.315  }
  82.316  
  82.317 -//------------------------------is_legal_reg-----------------------------------
  82.318  // Is 'reg' register legal for 'lrg'?
  82.319  static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
  82.320    if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) &&
  82.321 @@ -1315,7 +1312,6 @@
  82.322    return false;
  82.323  }
  82.324  
  82.325 -//------------------------------bias_color-------------------------------------
  82.326  // Choose a color using the biasing heuristic
  82.327  OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
  82.328  
  82.329 @@ -1377,7 +1373,6 @@
  82.330    return OptoReg::add( reg, chunk );
  82.331  }
  82.332  
  82.333 -//------------------------------choose_color-----------------------------------
  82.334  // Choose a color in the current chunk
  82.335  OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
  82.336    assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
  82.337 @@ -1399,7 +1394,6 @@
  82.338    return lrg.mask().find_last_elem();
  82.339  }
  82.340  
  82.341 -//------------------------------Select-----------------------------------------
  82.342  // Select colors by re-inserting LRGs back into the IFG.  LRGs are re-inserted
  82.343  // in reverse order of removal.  As long as nothing of hi-degree was yanked,
  82.344  // everything going back is guaranteed a color.  Select that color.  If some
  82.345 @@ -1574,8 +1568,6 @@
  82.346    return spill_reg-LRG::SPILL_REG;      // Return number of spills
  82.347  }
  82.348  
  82.349 -
  82.350 -//------------------------------copy_was_spilled-------------------------------
  82.351  // Copy 'was_spilled'-edness from the source Node to the dst Node.
  82.352  void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
  82.353    if( _spilled_once.test(src->_idx) ) {
  82.354 @@ -1588,14 +1580,12 @@
  82.355    }
  82.356  }
  82.357  
  82.358 -//------------------------------set_was_spilled--------------------------------
  82.359  // Set the 'spilled_once' or 'spilled_twice' flag on a node.
  82.360  void PhaseChaitin::set_was_spilled( Node *n ) {
  82.361    if( _spilled_once.test_set(n->_idx) )
  82.362      _spilled_twice.set(n->_idx);
  82.363  }
  82.364  
  82.365 -//------------------------------fixup_spills-----------------------------------
  82.366  // Convert Ideal spill instructions into proper FramePtr + offset Loads and
  82.367  // Stores.  Use-def chains are NOT preserved, but Node->LRG->reg maps are.
  82.368  void PhaseChaitin::fixup_spills() {
  82.369 @@ -1605,16 +1595,16 @@
  82.370    NOT_PRODUCT( Compile::TracePhase t3("fixupSpills", &_t_fixupSpills, TimeCompiler); )
  82.371  
  82.372    // Grab the Frame Pointer
  82.373 -  Node *fp = _cfg._broot->head()->in(1)->in(TypeFunc::FramePtr);
  82.374 +  Node *fp = _cfg.get_root_block()->head()->in(1)->in(TypeFunc::FramePtr);
  82.375  
  82.376    // For all blocks
  82.377 -  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
  82.378 -    Block *b = _cfg._blocks[i];
  82.379 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
  82.380 +    Block* block = _cfg.get_block(i);
  82.381  
  82.382      // For all instructions in block
  82.383 -    uint last_inst = b->end_idx();
  82.384 -    for( uint j = 1; j <= last_inst; j++ ) {
  82.385 -      Node *n = b->_nodes[j];
  82.386 +    uint last_inst = block->end_idx();
  82.387 +    for (uint j = 1; j <= last_inst; j++) {
  82.388 +      Node* n = block->_nodes[j];
  82.389  
  82.390        // Dead instruction???
  82.391        assert( n->outcnt() != 0 ||// Nothing dead after post alloc
  82.392 @@ -1651,7 +1641,7 @@
  82.393              assert( cisc->oper_input_base() == 2, "Only adding one edge");
  82.394              cisc->ins_req(1,src);         // Requires a memory edge
  82.395            }
  82.396 -          b->_nodes.map(j,cisc);          // Insert into basic block
  82.397 +          block->_nodes.map(j,cisc);          // Insert into basic block
  82.398            n->subsume_by(cisc, C); // Correct graph
  82.399            //
  82.400            ++_used_cisc_instructions;
  82.401 @@ -1677,7 +1667,6 @@
  82.402    } // End of for all blocks
  82.403  }
  82.404  
  82.405 -//------------------------------find_base_for_derived--------------------------
  82.406  // Helper to stretch above; recursively discover the base Node for a
  82.407  // given derived Node.  Easy for AddP-related machine nodes, but needs
  82.408  // to be recursive for derived Phis.
  82.409 @@ -1707,7 +1696,7 @@
  82.410        // Initialize it once and make it shared:
  82.411        // set control to _root and place it into Start block
  82.412        // (where top() node is placed).
  82.413 -      base->init_req(0, _cfg._root);
  82.414 +      base->init_req(0, _cfg.get_root_node());
  82.415        Block *startb = _cfg.get_block_for_node(C->top());
  82.416        startb->_nodes.insert(startb->find_node(C->top()), base );
  82.417        _cfg.map_node_to_block(base, startb);
  82.418 @@ -1716,7 +1705,7 @@
  82.419      if (_lrg_map.live_range_id(base) == 0) {
  82.420        new_lrg(base, maxlrg++);
  82.421      }
  82.422 -    assert(base->in(0) == _cfg._root && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
  82.423 +    assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
  82.424      derived_base_map[derived->_idx] = base;
  82.425      return base;
  82.426    }
  82.427 @@ -1779,8 +1768,6 @@
  82.428    return base;
  82.429  }
  82.430  
  82.431 -
  82.432 -//------------------------------stretch_base_pointer_live_ranges---------------
  82.433  // At each Safepoint, insert extra debug edges for each pair of derived value/
  82.434  // base pointer that is live across the Safepoint for oopmap building.  The
  82.435  // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
  82.436 @@ -1792,14 +1779,14 @@
  82.437    memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
  82.438  
  82.439    // For all blocks in RPO do...
  82.440 -  for( uint i=0; i<_cfg._num_blocks; i++ ) {
  82.441 -    Block *b = _cfg._blocks[i];
  82.442 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
  82.443 +    Block* block = _cfg.get_block(i);
  82.444      // Note use of deep-copy constructor.  I cannot hammer the original
  82.445      // liveout bits, because they are needed by the following coalesce pass.
  82.446 -    IndexSet liveout(_live->live(b));
  82.447 +    IndexSet liveout(_live->live(block));
  82.448  
  82.449 -    for( uint j = b->end_idx() + 1; j > 1; j-- ) {
  82.450 -      Node *n = b->_nodes[j-1];
  82.451 +    for (uint j = block->end_idx() + 1; j > 1; j--) {
  82.452 +      Node* n = block->_nodes[j - 1];
  82.453  
  82.454        // Pre-split compares of loop-phis.  Loop-phis form a cycle we would
  82.455        // like to see in the same register.  Compare uses the loop-phi and so
  82.456 @@ -1814,7 +1801,7 @@
  82.457          Node *phi = n->in(1);
  82.458          if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
  82.459            Block *phi_block = _cfg.get_block_for_node(phi);
  82.460 -          if (_cfg.get_block_for_node(phi_block->pred(2)) == b) {
  82.461 +          if (_cfg.get_block_for_node(phi_block->pred(2)) == block) {
  82.462              const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
  82.463              Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
  82.464              insert_proj( phi_block, 1, spill, maxlrg++ );
  82.465 @@ -1868,7 +1855,7 @@
  82.466              if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
  82.467                   !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
  82.468                   (_lrg_map.live_range_id(base) > 0) && // not a constant
  82.469 -                 _cfg.get_block_for_node(base) != b) { // base not def'd in blk)
  82.470 +                 _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
  82.471                // Base pointer is not currently live.  Since I stretched
  82.472                // the base pointer to here and it crosses basic-block
  82.473                // boundaries, the global live info is now incorrect.
  82.474 @@ -1903,15 +1890,12 @@
  82.475    return must_recompute_live != 0;
  82.476  }
  82.477  
  82.478 -
  82.479 -//------------------------------add_reference----------------------------------
  82.480  // Extend the node to LRG mapping
  82.481  
  82.482  void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
  82.483    _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
  82.484  }
  82.485  
  82.486 -//------------------------------dump-------------------------------------------
  82.487  #ifndef PRODUCT
  82.488  void PhaseChaitin::dump(const Node *n) const {
  82.489    uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
  82.490 @@ -2017,8 +2001,9 @@
  82.491                _matcher._new_SP, _framesize );
  82.492  
  82.493    // For all blocks
  82.494 -  for( uint i = 0; i < _cfg._num_blocks; i++ )
  82.495 -    dump(_cfg._blocks[i]);
  82.496 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
  82.497 +    dump(_cfg.get_block(i));
  82.498 +  }
  82.499    // End of per-block dump
  82.500    tty->print("\n");
  82.501  
  82.502 @@ -2059,7 +2044,6 @@
  82.503    tty->print_cr("");
  82.504  }
  82.505  
  82.506 -//------------------------------dump_degree_lists------------------------------
  82.507  void PhaseChaitin::dump_degree_lists() const {
  82.508    // Dump lo-degree list
  82.509    tty->print("Lo degree: ");
  82.510 @@ -2080,7 +2064,6 @@
  82.511    tty->print_cr("");
  82.512  }
  82.513  
  82.514 -//------------------------------dump_simplified--------------------------------
  82.515  void PhaseChaitin::dump_simplified() const {
  82.516    tty->print("Simplified: ");
  82.517    for( uint i = _simplified; i; i = lrgs(i)._next )
  82.518 @@ -2099,7 +2082,6 @@
  82.519    return buf+strlen(buf);
  82.520  }
  82.521  
  82.522 -//------------------------------dump_register----------------------------------
  82.523  // Dump a register name into a buffer.  Be intelligent if we get called
  82.524  // before allocation is complete.
  82.525  char *PhaseChaitin::dump_register( const Node *n, char *buf  ) const {
  82.526 @@ -2133,7 +2115,6 @@
  82.527    return buf+strlen(buf);
  82.528  }
  82.529  
  82.530 -//----------------------dump_for_spill_split_recycle--------------------------
  82.531  void PhaseChaitin::dump_for_spill_split_recycle() const {
  82.532    if( WizardMode && (PrintCompilation || PrintOpto) ) {
  82.533      // Display which live ranges need to be split and the allocator's state
  82.534 @@ -2149,7 +2130,6 @@
  82.535    }
  82.536  }
  82.537  
  82.538 -//------------------------------dump_frame------------------------------------
  82.539  void PhaseChaitin::dump_frame() const {
  82.540    const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
  82.541    const TypeTuple *domain = C->tf()->domain();
  82.542 @@ -2255,17 +2235,16 @@
  82.543    tty->print_cr("#");
  82.544  }
  82.545  
  82.546 -//------------------------------dump_bb----------------------------------------
  82.547  void PhaseChaitin::dump_bb( uint pre_order ) const {
  82.548    tty->print_cr("---dump of B%d---",pre_order);
  82.549 -  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
  82.550 -    Block *b = _cfg._blocks[i];
  82.551 -    if( b->_pre_order == pre_order )
  82.552 -      dump(b);
  82.553 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
  82.554 +    Block* block = _cfg.get_block(i);
  82.555 +    if (block->_pre_order == pre_order) {
  82.556 +      dump(block);
  82.557 +    }
  82.558    }
  82.559  }
  82.560  
  82.561 -//------------------------------dump_lrg---------------------------------------
  82.562  void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
  82.563    tty->print_cr("---dump of L%d---",lidx);
  82.564  
  82.565 @@ -2287,17 +2266,17 @@
  82.566      tty->cr();
  82.567    }
  82.568    // For all blocks
  82.569 -  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
  82.570 -    Block *b = _cfg._blocks[i];
  82.571 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
  82.572 +    Block* block = _cfg.get_block(i);
  82.573      int dump_once = 0;
  82.574  
  82.575      // For all instructions
  82.576 -    for( uint j = 0; j < b->_nodes.size(); j++ ) {
  82.577 -      Node *n = b->_nodes[j];
  82.578 +    for( uint j = 0; j < block->_nodes.size(); j++ ) {
  82.579 +      Node *n = block->_nodes[j];
  82.580        if (_lrg_map.find_const(n) == lidx) {
  82.581          if (!dump_once++) {
  82.582            tty->cr();
  82.583 -          b->dump_head(&_cfg);
  82.584 +          block->dump_head(&_cfg);
  82.585          }
  82.586          dump(n);
  82.587          continue;
  82.588 @@ -2312,7 +2291,7 @@
  82.589            if (_lrg_map.find_const(m) == lidx) {
  82.590              if (!dump_once++) {
  82.591                tty->cr();
  82.592 -              b->dump_head(&_cfg);
  82.593 +              block->dump_head(&_cfg);
  82.594              }
  82.595              dump(n);
  82.596            }
  82.597 @@ -2324,7 +2303,6 @@
  82.598  }
  82.599  #endif // not PRODUCT
  82.600  
  82.601 -//------------------------------print_chaitin_statistics-------------------------------
  82.602  int PhaseChaitin::_final_loads  = 0;
  82.603  int PhaseChaitin::_final_stores = 0;
  82.604  int PhaseChaitin::_final_memoves= 0;
    83.1 --- a/src/share/vm/opto/chaitin.hpp	Fri Aug 23 22:12:18 2013 +0100
    83.2 +++ b/src/share/vm/opto/chaitin.hpp	Fri Aug 30 09:50:49 2013 +0100
    83.3 @@ -412,33 +412,22 @@
    83.4    uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
    83.5    uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
    83.6  
    83.7 -  bool clone_projs(Block *b, uint idx, Node *con, Node *copy, LiveRangeMap &lrg_map) {
    83.8 -    bool found_projs = clone_projs_shared(b, idx, con, copy, lrg_map.max_lrg_id());
    83.9 -
   83.10 -    if(found_projs) {
   83.11 -      uint max_lrg_id = lrg_map.max_lrg_id();
   83.12 -      lrg_map.set_max_lrg_id(max_lrg_id + 1);
   83.13 -    }
   83.14 -
   83.15 -    return found_projs;
   83.16 -  }
   83.17 -
   83.18    //------------------------------clone_projs------------------------------------
   83.19    // After cloning some rematerialized instruction, clone any MachProj's that
   83.20    // follow it.  Example: Intel zero is XOR, kills flags.  Sparc FP constants
   83.21    // use G3 as an address temp.
   83.22 -  bool clone_projs(Block *b, uint idx, Node *con, Node *copy, uint &max_lrg_id) {
   83.23 -    bool found_projs = clone_projs_shared(b, idx, con, copy, max_lrg_id);
   83.24 +  int clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id);
   83.25  
   83.26 -    if(found_projs) {
   83.27 -      max_lrg_id++;
   83.28 +  int clone_projs(Block* b, uint idx, Node* orig, Node* copy, LiveRangeMap& lrg_map) {
   83.29 +    uint max_lrg_id = lrg_map.max_lrg_id();
   83.30 +    int found_projs = clone_projs(b, idx, orig, copy, max_lrg_id);
   83.31 +    if (found_projs > 0) {
   83.32 +      // max_lrg_id is updated during call above
   83.33 +      lrg_map.set_max_lrg_id(max_lrg_id);
   83.34      }
   83.35 -
   83.36      return found_projs;
   83.37    }
   83.38  
   83.39 -  bool clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id);
   83.40 -
   83.41    Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits,
   83.42                              int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru);
   83.43    // True if lidx is used before any real register is def'd in the block
    84.1 --- a/src/share/vm/opto/coalesce.cpp	Fri Aug 23 22:12:18 2013 +0100
    84.2 +++ b/src/share/vm/opto/coalesce.cpp	Fri Aug 30 09:50:49 2013 +0100
    84.3 @@ -34,8 +34,6 @@
    84.4  #include "opto/matcher.hpp"
    84.5  #include "opto/regmask.hpp"
    84.6  
    84.7 -//=============================================================================
    84.8 -//------------------------------Dump-------------------------------------------
    84.9  #ifndef PRODUCT
   84.10  void PhaseCoalesce::dump(Node *n) const {
   84.11    // Being a const function means I cannot use 'Find'
   84.12 @@ -43,12 +41,11 @@
   84.13    tty->print("L%d/N%d ",r,n->_idx);
   84.14  }
   84.15  
   84.16 -//------------------------------dump-------------------------------------------
   84.17  void PhaseCoalesce::dump() const {
   84.18    // I know I have a block layout now, so I can print blocks in a loop
   84.19 -  for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
   84.20 +  for( uint i=0; i<_phc._cfg.number_of_blocks(); i++ ) {
   84.21      uint j;
   84.22 -    Block *b = _phc._cfg._blocks[i];
   84.23 +    Block* b = _phc._cfg.get_block(i);
   84.24      // Print a nice block header
   84.25      tty->print("B%d: ",b->_pre_order);
   84.26      for( j=1; j<b->num_preds(); j++ )
   84.27 @@ -85,7 +82,6 @@
   84.28  }
   84.29  #endif
   84.30  
   84.31 -//------------------------------combine_these_two------------------------------
   84.32  // Combine the live ranges def'd by these 2 Nodes.  N2 is an input to N1.
   84.33  void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) {
   84.34    uint lr1 = _phc._lrg_map.find(n1);
   84.35 @@ -127,18 +123,15 @@
   84.36    }
   84.37  }
   84.38  
   84.39 -//------------------------------coalesce_driver--------------------------------
   84.40  // Copy coalescing
   84.41 -void PhaseCoalesce::coalesce_driver( ) {
   84.42 -
   84.43 +void PhaseCoalesce::coalesce_driver() {
   84.44    verify();
   84.45    // Coalesce from high frequency to low
   84.46 -  for( uint i=0; i<_phc._cfg._num_blocks; i++ )
   84.47 -    coalesce( _phc._blks[i] );
   84.48 -
   84.49 +  for (uint i = 0; i < _phc._cfg.number_of_blocks(); i++) {
   84.50 +    coalesce(_phc._blks[i]);
   84.51 +  }
   84.52  }
   84.53  
   84.54 -//------------------------------insert_copy_with_overlap-----------------------
   84.55  // I am inserting copies to come out of SSA form.  In the general case, I am
   84.56  // doing a parallel renaming.  I'm in the Named world now, so I can't do a
   84.57  // general parallel renaming.  All the copies now use  "names" (live-ranges)
   84.58 @@ -216,7 +209,6 @@
   84.59    b->_nodes.insert(last_use_idx+1,copy);
   84.60  }
   84.61  
   84.62 -//------------------------------insert_copies----------------------------------
   84.63  void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
   84.64    // We do LRGs compressing and fix a liveout data only here since the other
   84.65    // place in Split() is guarded by the assert which we never hit.
   84.66 @@ -225,8 +217,8 @@
   84.67    for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) {
   84.68      uint compressed_lrg = _phc._lrg_map.find(lrg);
   84.69      if (lrg != compressed_lrg) {
   84.70 -      for (uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++) {
   84.71 -        IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]);
   84.72 +      for (uint bidx = 0; bidx < _phc._cfg.number_of_blocks(); bidx++) {
   84.73 +        IndexSet *liveout = _phc._live->live(_phc._cfg.get_block(bidx));
   84.74          if (liveout->member(lrg)) {
   84.75            liveout->remove(lrg);
   84.76            liveout->insert(compressed_lrg);
   84.77 @@ -239,10 +231,10 @@
   84.78    // Nodes with index less than '_unique' are original, non-virtual Nodes.
   84.79    _unique = C->unique();
   84.80  
   84.81 -  for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
   84.82 +  for (uint i = 0; i < _phc._cfg.number_of_blocks(); i++) {
   84.83      C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce");
   84.84      if (C->failing()) return;
   84.85 -    Block *b = _phc._cfg._blocks[i];
   84.86 +    Block *b = _phc._cfg.get_block(i);
   84.87      uint cnt = b->num_preds();  // Number of inputs to the Phi
   84.88  
   84.89      for( uint l = 1; l<b->_nodes.size(); l++ ) {
   84.90 @@ -330,9 +322,7 @@
   84.91                copy = m->clone();
   84.92                // Insert the copy in the basic block, just before us
   84.93                b->_nodes.insert(l++, copy);
   84.94 -              if(_phc.clone_projs(b, l, m, copy, _phc._lrg_map)) {
   84.95 -                l++;
   84.96 -              }
   84.97 +              l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
   84.98              } else {
   84.99                const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
  84.100                copy = new (C) MachSpillCopyNode(m, *rm, *rm);
  84.101 @@ -403,8 +393,7 @@
  84.102    } // End of for all blocks
  84.103  }
  84.104  
  84.105 -//=============================================================================
  84.106 -//------------------------------coalesce---------------------------------------
  84.107 +
  84.108  // Aggressive (but pessimistic) copy coalescing of a single block
  84.109  
  84.110  // The following coalesce pass represents a single round of aggressive
  84.111 @@ -464,20 +453,16 @@
  84.112    } // End of for all instructions in block
  84.113  }
  84.114  
  84.115 -//=============================================================================
  84.116 -//------------------------------PhaseConservativeCoalesce----------------------
  84.117  PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) {
  84.118    _ulr.initialize(_phc._lrg_map.max_lrg_id());
  84.119  }
  84.120  
  84.121 -//------------------------------verify-----------------------------------------
  84.122  void PhaseConservativeCoalesce::verify() {
  84.123  #ifdef ASSERT
  84.124    _phc.set_was_low();
  84.125  #endif
  84.126  }
  84.127  
  84.128 -//------------------------------union_helper-----------------------------------
  84.129  void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, uint lr1, uint lr2, Node *src_def, Node *dst_copy, Node *src_copy, Block *b, uint bindex ) {
  84.130    // Join live ranges.  Merge larger into smaller.  Union lr2 into lr1 in the
  84.131    // union-find tree
  84.132 @@ -520,7 +505,6 @@
  84.133    }
  84.134  }
  84.135  
  84.136 -//------------------------------compute_separating_interferences---------------
  84.137  // Factored code from copy_copy that computes extra interferences from
  84.138  // lengthening a live range by double-coalescing.
  84.139  uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint reg_degree, uint rm_size, uint lr1, uint lr2 ) {
  84.140 @@ -586,7 +570,6 @@
  84.141    return reg_degree;
  84.142  }
  84.143  
  84.144 -//------------------------------update_ifg-------------------------------------
  84.145  void PhaseConservativeCoalesce::update_ifg(uint lr1, uint lr2, IndexSet *n_lr1, IndexSet *n_lr2) {
  84.146    // Some original neighbors of lr1 might have gone away
  84.147    // because the constrained register mask prevented them.
  84.148 @@ -616,7 +599,6 @@
  84.149        lrgs(neighbor).inc_degree( lrg1.compute_degree(lrgs(neighbor)) );
  84.150  }
  84.151  
  84.152 -//------------------------------record_bias------------------------------------
  84.153  static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) {
  84.154    // Tag copy bias here
  84.155    if( !ifg->lrgs(lr1)._copy_bias )
  84.156 @@ -625,7 +607,6 @@
  84.157      ifg->lrgs(lr2)._copy_bias = lr1;
  84.158  }
  84.159  
  84.160 -//------------------------------copy_copy--------------------------------------
  84.161  // See if I can coalesce a series of multiple copies together.  I need the
  84.162  // final dest copy and the original src copy.  They can be the same Node.
  84.163  // Compute the compatible register masks.
  84.164 @@ -785,7 +766,6 @@
  84.165    return true;
  84.166  }
  84.167  
  84.168 -//------------------------------coalesce---------------------------------------
  84.169  // Conservative (but pessimistic) copy coalescing of a single block
  84.170  void PhaseConservativeCoalesce::coalesce( Block *b ) {
  84.171    // Bail out on infrequent blocks
    85.1 --- a/src/share/vm/opto/compile.cpp	Fri Aug 23 22:12:18 2013 +0100
    85.2 +++ b/src/share/vm/opto/compile.cpp	Fri Aug 30 09:50:49 2013 +0100
    85.3 @@ -2136,7 +2136,9 @@
    85.4  //------------------------------Code_Gen---------------------------------------
    85.5  // Given a graph, generate code for it
    85.6  void Compile::Code_Gen() {
    85.7 -  if (failing())  return;
    85.8 +  if (failing()) {
    85.9 +    return;
   85.10 +  }
   85.11  
   85.12    // Perform instruction selection.  You might think we could reclaim Matcher
   85.13    // memory PDQ, but actually the Matcher is used in generating spill code.
   85.14 @@ -2148,12 +2150,11 @@
   85.15    // nodes.  Mapping is only valid at the root of each matched subtree.
   85.16    NOT_PRODUCT( verify_graph_edges(); )
   85.17  
   85.18 -  Node_List proj_list;
   85.19 -  Matcher m(proj_list);
   85.20 -  _matcher = &m;
   85.21 +  Matcher matcher;
   85.22 +  _matcher = &matcher;
   85.23    {
   85.24      TracePhase t2("matcher", &_t_matcher, true);
   85.25 -    m.match();
   85.26 +    matcher.match();
   85.27    }
   85.28    // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
   85.29    // nodes.  Mapping is only valid at the root of each matched subtree.
   85.30 @@ -2161,31 +2162,26 @@
   85.31  
   85.32    // If you have too many nodes, or if matching has failed, bail out
   85.33    check_node_count(0, "out of nodes matching instructions");
   85.34 -  if (failing())  return;
   85.35 +  if (failing()) {
   85.36 +    return;
   85.37 +  }
   85.38  
   85.39    // Build a proper-looking CFG
   85.40 -  PhaseCFG cfg(node_arena(), root(), m);
   85.41 +  PhaseCFG cfg(node_arena(), root(), matcher);
   85.42    _cfg = &cfg;
   85.43    {
   85.44      NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); )
   85.45 -    cfg.Dominators();
   85.46 -    if (failing())  return;
   85.47 -
   85.48 +    bool success = cfg.do_global_code_motion();
   85.49 +    if (!success) {
   85.50 +      return;
   85.51 +    }
   85.52 +
   85.53 +    print_method(PHASE_GLOBAL_CODE_MOTION, 2);
   85.54      NOT_PRODUCT( verify_graph_edges(); )
   85.55 -
   85.56 -    cfg.Estimate_Block_Frequency();
   85.57 -    cfg.GlobalCodeMotion(m,unique(),proj_list);
   85.58 -    if (failing())  return;
   85.59 -
   85.60 -    print_method(PHASE_GLOBAL_CODE_MOTION, 2);
   85.61 -
   85.62 -    NOT_PRODUCT( verify_graph_edges(); )
   85.63 -
   85.64      debug_only( cfg.verify(); )
   85.65    }
   85.66 -  NOT_PRODUCT( verify_graph_edges(); )
   85.67 -
   85.68 -  PhaseChaitin regalloc(unique(), cfg, m);
   85.69 +
   85.70 +  PhaseChaitin regalloc(unique(), cfg, matcher);
   85.71    _regalloc = &regalloc;
   85.72    {
   85.73      TracePhase t2("regalloc", &_t_registerAllocation, true);
   85.74 @@ -2206,7 +2202,7 @@
   85.75    // can now safely remove it.
   85.76    {
   85.77      NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); )
   85.78 -    cfg.remove_empty();
   85.79 +    cfg.remove_empty_blocks();
   85.80      if (do_freq_based_layout()) {
   85.81        PhaseBlockLayout layout(cfg);
   85.82      } else {
   85.83 @@ -2253,38 +2249,50 @@
   85.84    _regalloc->dump_frame();
   85.85  
   85.86    Node *n = NULL;
   85.87 -  for( uint i=0; i<_cfg->_num_blocks; i++ ) {
   85.88 -    if (VMThread::should_terminate()) { cut_short = true; break; }
   85.89 -    Block *b = _cfg->_blocks[i];
   85.90 -    if (b->is_connector() && !Verbose) continue;
   85.91 -    n = b->_nodes[0];
   85.92 -    if (pcs && n->_idx < pc_limit)
   85.93 +  for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
   85.94 +    if (VMThread::should_terminate()) {
   85.95 +      cut_short = true;
   85.96 +      break;
   85.97 +    }
   85.98 +    Block* block = _cfg->get_block(i);
   85.99 +    if (block->is_connector() && !Verbose) {
  85.100 +      continue;
  85.101 +    }
  85.102 +    n = block->_nodes[0];
  85.103 +    if (pcs && n->_idx < pc_limit) {
  85.104        tty->print("%3.3x   ", pcs[n->_idx]);
  85.105 -    else
  85.106 +    } else {
  85.107        tty->print("      ");
  85.108 -    b->dump_head(_cfg);
  85.109 -    if (b->is_connector()) {
  85.110 +    }
  85.111 +    block->dump_head(_cfg);
  85.112 +    if (block->is_connector()) {
  85.113        tty->print_cr("        # Empty connector block");
  85.114 -    } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
  85.115 +    } else if (block->num_preds() == 2 && block->pred(1)->is_CatchProj() && block->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
  85.116        tty->print_cr("        # Block is sole successor of call");
  85.117      }
  85.118  
  85.119      // For all instructions
  85.120      Node *delay = NULL;
  85.121 -    for( uint j = 0; j<b->_nodes.size(); j++ ) {
  85.122 -      if (VMThread::should_terminate()) { cut_short = true; break; }
  85.123 -      n = b->_nodes[j];
  85.124 +    for (uint j = 0; j < block->_nodes.size(); j++) {
  85.125 +      if (VMThread::should_terminate()) {
  85.126 +        cut_short = true;
  85.127 +        break;
  85.128 +      }
  85.129 +      n = block->_nodes[j];
  85.130        if (valid_bundle_info(n)) {
  85.131 -        Bundle *bundle = node_bundling(n);
  85.132 +        Bundle* bundle = node_bundling(n);
  85.133          if (bundle->used_in_unconditional_delay()) {
  85.134            delay = n;
  85.135            continue;
  85.136          }
  85.137 -        if (bundle->starts_bundle())
  85.138 +        if (bundle->starts_bundle()) {
  85.139            starts_bundle = '+';
  85.140 +        }
  85.141        }
  85.142  
  85.143 -      if (WizardMode) n->dump();
  85.144 +      if (WizardMode) {
  85.145 +        n->dump();
  85.146 +      }
  85.147  
  85.148        if( !n->is_Region() &&    // Dont print in the Assembly
  85.149            !n->is_Phi() &&       // a few noisely useless nodes
    86.1 --- a/src/share/vm/opto/domgraph.cpp	Fri Aug 23 22:12:18 2013 +0100
    86.2 +++ b/src/share/vm/opto/domgraph.cpp	Fri Aug 30 09:50:49 2013 +0100
    86.3 @@ -32,9 +32,6 @@
    86.4  
    86.5  // Portions of code courtesy of Clifford Click
    86.6  
    86.7 -// Optimization - Graph Style
    86.8 -
    86.9 -//------------------------------Tarjan-----------------------------------------
   86.10  // A data structure that holds all the information needed to find dominators.
   86.11  struct Tarjan {
   86.12    Block *_block;                // Basic block for this info
   86.13 @@ -60,23 +57,21 @@
   86.14  
   86.15  };
   86.16  
   86.17 -//------------------------------Dominator--------------------------------------
   86.18  // Compute the dominator tree of the CFG.  The CFG must already have been
   86.19  // constructed.  This is the Lengauer & Tarjan O(E-alpha(E,V)) algorithm.
   86.20 -void PhaseCFG::Dominators( ) {
   86.21 +void PhaseCFG::build_dominator_tree() {
   86.22    // Pre-grow the blocks array, prior to the ResourceMark kicking in
   86.23 -  _blocks.map(_num_blocks,0);
   86.24 +  _blocks.map(number_of_blocks(), 0);
   86.25  
   86.26    ResourceMark rm;
   86.27    // Setup mappings from my Graph to Tarjan's stuff and back
   86.28    // Note: Tarjan uses 1-based arrays
   86.29 -  Tarjan *tarjan = NEW_RESOURCE_ARRAY(Tarjan,_num_blocks+1);
   86.30 +  Tarjan* tarjan = NEW_RESOURCE_ARRAY(Tarjan, number_of_blocks() + 1);
   86.31  
   86.32    // Tarjan's algorithm, almost verbatim:
   86.33    // Step 1:
   86.34 -  _rpo_ctr = _num_blocks;
   86.35 -  uint dfsnum = DFS( tarjan );
   86.36 -  if( dfsnum-1 != _num_blocks ) {// Check for unreachable loops!
   86.37 +  uint dfsnum = do_DFS(tarjan, number_of_blocks());
   86.38 +  if (dfsnum - 1 != number_of_blocks()) { // Check for unreachable loops!
   86.39      // If the returned dfsnum does not match the number of blocks, then we
   86.40      // must have some unreachable loops.  These can be made at any time by
   86.41      // IterGVN.  They are cleaned up by CCP or the loop opts, but the last
   86.42 @@ -93,14 +88,13 @@
   86.43      C->record_method_not_compilable("unreachable loop");
   86.44      return;
   86.45    }
   86.46 -  _blocks._cnt = _num_blocks;
   86.47 +  _blocks._cnt = number_of_blocks();
   86.48  
   86.49    // Tarjan is using 1-based arrays, so these are some initialize flags
   86.50    tarjan[0]._size = tarjan[0]._semi = 0;
   86.51    tarjan[0]._label = &tarjan[0];
   86.52  
   86.53 -  uint i;
   86.54 -  for( i=_num_blocks; i>=2; i-- ) { // For all vertices in DFS order
   86.55 +  for (uint i = number_of_blocks(); i >= 2; i--) { // For all vertices in DFS order
   86.56      Tarjan *w = &tarjan[i];     // Get vertex from DFS
   86.57  
   86.58      // Step 2:
   86.59 @@ -130,19 +124,19 @@
   86.60    }
   86.61  
   86.62    // Step 4:
   86.63 -  for( i=2; i <= _num_blocks; i++ ) {
   86.64 +  for (uint i = 2; i <= number_of_blocks(); i++) {
   86.65      Tarjan *w = &tarjan[i];
   86.66      if( w->_dom != &tarjan[w->_semi] )
   86.67        w->_dom = w->_dom->_dom;
   86.68      w->_dom_next = w->_dom_child = NULL;  // Initialize for building tree later
   86.69    }
   86.70    // No immediate dominator for the root
   86.71 -  Tarjan *w = &tarjan[_broot->_pre_order];
   86.72 +  Tarjan *w = &tarjan[get_root_block()->_pre_order];
   86.73    w->_dom = NULL;
   86.74    w->_dom_next = w->_dom_child = NULL;  // Initialize for building tree later
   86.75  
   86.76    // Convert the dominator tree array into my kind of graph
   86.77 -  for( i=1; i<=_num_blocks;i++){// For all Tarjan vertices
   86.78 +  for(uint i = 1; i <= number_of_blocks(); i++){ // For all Tarjan vertices
   86.79      Tarjan *t = &tarjan[i];     // Handy access
   86.80      Tarjan *tdom = t->_dom;     // Handy access to immediate dominator
   86.81      if( tdom )  {               // Root has no immediate dominator
   86.82 @@ -152,11 +146,10 @@
   86.83      } else
   86.84        t->_block->_idom = NULL;  // Root
   86.85    }
   86.86 -  w->setdepth( _num_blocks+1 ); // Set depth in dominator tree
   86.87 +  w->setdepth(number_of_blocks() + 1); // Set depth in dominator tree
   86.88  
   86.89  }
   86.90  
   86.91 -//----------------------------Block_Stack--------------------------------------
   86.92  class Block_Stack {
   86.93    private:
   86.94      struct Block_Descr {
   86.95 @@ -214,7 +207,6 @@
   86.96      }
   86.97  };
   86.98  
   86.99 -//-------------------------most_frequent_successor-----------------------------
  86.100  // Find the index into the b->succs[] array of the most frequent successor.
  86.101  uint Block_Stack::most_frequent_successor( Block *b ) {
  86.102    uint freq_idx = 0;
  86.103 @@ -258,40 +250,38 @@
  86.104    return freq_idx;
  86.105  }
  86.106  
  86.107 -//------------------------------DFS--------------------------------------------
  86.108  // Perform DFS search.  Setup 'vertex' as DFS to vertex mapping.  Setup
  86.109  // 'semi' as vertex to DFS mapping.  Set 'parent' to DFS parent.
  86.110 -uint PhaseCFG::DFS( Tarjan *tarjan ) {
  86.111 -  Block *b = _broot;
  86.112 +uint PhaseCFG::do_DFS(Tarjan *tarjan, uint rpo_counter) {
  86.113 +  Block* root_block = get_root_block();
  86.114    uint pre_order = 1;
  86.115 -  // Allocate stack of size _num_blocks+1 to avoid frequent realloc
  86.116 -  Block_Stack bstack(tarjan, _num_blocks+1);
  86.117 +  // Allocate stack of size number_of_blocks() + 1 to avoid frequent realloc
  86.118 +  Block_Stack bstack(tarjan, number_of_blocks() + 1);
  86.119  
  86.120    // Push on stack the state for the first block
  86.121 -  bstack.push(pre_order, b);
  86.122 +  bstack.push(pre_order, root_block);
  86.123    ++pre_order;
  86.124  
  86.125    while (bstack.is_nonempty()) {
  86.126      if (!bstack.last_successor()) {
  86.127        // Walk over all successors in pre-order (DFS).
  86.128 -      Block *s = bstack.next_successor();
  86.129 -      if (s->_pre_order == 0) { // Check for no-pre-order, not-visited
  86.130 +      Block* next_block = bstack.next_successor();
  86.131 +      if (next_block->_pre_order == 0) { // Check for no-pre-order, not-visited
  86.132          // Push on stack the state of successor
  86.133 -        bstack.push(pre_order, s);
  86.134 +        bstack.push(pre_order, next_block);
  86.135          ++pre_order;
  86.136        }
  86.137      }
  86.138      else {
  86.139        // Build a reverse post-order in the CFG _blocks array
  86.140        Block *stack_top = bstack.pop();
  86.141 -      stack_top->_rpo = --_rpo_ctr;
  86.142 +      stack_top->_rpo = --rpo_counter;
  86.143        _blocks.map(stack_top->_rpo, stack_top);
  86.144      }
  86.145    }
  86.146    return pre_order;
  86.147  }
  86.148  
  86.149 -//------------------------------COMPRESS---------------------------------------
  86.150  void Tarjan::COMPRESS()
  86.151  {
  86.152    assert( _ancestor != 0, "" );
  86.153 @@ -303,14 +293,12 @@
  86.154    }
  86.155  }
  86.156  
  86.157 -//------------------------------EVAL-------------------------------------------
  86.158  Tarjan *Tarjan::EVAL() {
  86.159    if( !_ancestor ) return _label;
  86.160    COMPRESS();
  86.161    return (_ancestor->_label->_semi >= _label->_semi) ? _label : _ancestor->_label;
  86.162  }
  86.163  
  86.164 -//------------------------------LINK-------------------------------------------
  86.165  void Tarjan::LINK( Tarjan *w, Tarjan *tarjan0 ) {
  86.166    Tarjan *s = w;
  86.167    while( w->_label->_semi < s->_child->_label->_semi ) {
  86.168 @@ -333,7 +321,6 @@
  86.169    }
  86.170  }
  86.171  
  86.172 -//------------------------------setdepth---------------------------------------
  86.173  void Tarjan::setdepth( uint stack_size ) {
  86.174    Tarjan **top  = NEW_RESOURCE_ARRAY(Tarjan*, stack_size);
  86.175    Tarjan **next = top;
  86.176 @@ -362,8 +349,7 @@
  86.177    } while (last < top);
  86.178  }
  86.179  
  86.180 -//*********************** DOMINATORS ON THE SEA OF NODES***********************
  86.181 -//------------------------------NTarjan----------------------------------------
  86.182 +// Compute dominators on the Sea of Nodes form
  86.183  // A data structure that holds all the information needed to find dominators.
  86.184  struct NTarjan {
  86.185    Node *_control;               // Control node associated with this info
  86.186 @@ -396,7 +382,6 @@
  86.187  #endif
  86.188  };
  86.189  
  86.190 -//------------------------------Dominator--------------------------------------
  86.191  // Compute the dominator tree of the sea of nodes.  This version walks all CFG
  86.192  // nodes (using the is_CFG() call) and places them in a dominator tree.  Thus,
  86.193  // it needs a count of the CFG nodes for the mapping table. This is the
  86.194 @@ -517,7 +502,6 @@
  86.195    }
  86.196  }
  86.197  
  86.198 -//------------------------------DFS--------------------------------------------
  86.199  // Perform DFS search.  Setup 'vertex' as DFS to vertex mapping.  Setup
  86.200  // 'semi' as vertex to DFS mapping.  Set 'parent' to DFS parent.
  86.201  int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uint *dfsorder) {
  86.202 @@ -560,7 +544,6 @@
  86.203    return dfsnum;
  86.204  }
  86.205  
  86.206 -//------------------------------COMPRESS---------------------------------------
  86.207  void NTarjan::COMPRESS()
  86.208  {
  86.209    assert( _ancestor != 0, "" );
  86.210 @@ -572,14 +555,12 @@
  86.211    }
  86.212  }
  86.213  
  86.214 -//------------------------------EVAL-------------------------------------------
  86.215  NTarjan *NTarjan::EVAL() {
  86.216    if( !_ancestor ) return _label;
  86.217    COMPRESS();
  86.218    return (_ancestor->_label->_semi >= _label->_semi) ? _label : _ancestor->_label;
  86.219  }
  86.220  
  86.221 -//------------------------------LINK-------------------------------------------
  86.222  void NTarjan::LINK( NTarjan *w, NTarjan *ntarjan0 ) {
  86.223    NTarjan *s = w;
  86.224    while( w->_label->_semi < s->_child->_label->_semi ) {
  86.225 @@ -602,7 +583,6 @@
  86.226    }
  86.227  }
  86.228  
  86.229 -//------------------------------setdepth---------------------------------------
  86.230  void NTarjan::setdepth( uint stack_size, uint *dom_depth ) {
  86.231    NTarjan **top  = NEW_RESOURCE_ARRAY(NTarjan*, stack_size);
  86.232    NTarjan **next = top;
  86.233 @@ -631,7 +611,6 @@
  86.234    } while (last < top);
  86.235  }
  86.236  
  86.237 -//------------------------------dump-------------------------------------------
  86.238  #ifndef PRODUCT
  86.239  void NTarjan::dump(int offset) const {
  86.240    // Dump the data from this node
    87.1 --- a/src/share/vm/opto/gcm.cpp	Fri Aug 23 22:12:18 2013 +0100
    87.2 +++ b/src/share/vm/opto/gcm.cpp	Fri Aug 30 09:50:49 2013 +0100
    87.3 @@ -121,27 +121,30 @@
    87.4  
    87.5  //------------------------------schedule_pinned_nodes--------------------------
    87.6  // Set the basic block for Nodes pinned into blocks
    87.7 -void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) {
    87.8 +void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
    87.9    // Allocate node stack of size C->unique()+8 to avoid frequent realloc
   87.10 -  GrowableArray <Node *> spstack(C->unique()+8);
   87.11 +  GrowableArray <Node *> spstack(C->unique() + 8);
   87.12    spstack.push(_root);
   87.13 -  while ( spstack.is_nonempty() ) {
   87.14 -    Node *n = spstack.pop();
   87.15 -    if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
   87.16 -      if( n->pinned() && !has_block(n)) {  // Pinned?  Nail it down!
   87.17 -        assert( n->in(0), "pinned Node must have Control" );
   87.18 +  while (spstack.is_nonempty()) {
   87.19 +    Node* node = spstack.pop();
   87.20 +    if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
   87.21 +      if (node->pinned() && !has_block(node)) {  // Pinned?  Nail it down!
   87.22 +        assert(node->in(0), "pinned Node must have Control");
   87.23          // Before setting block replace block_proj control edge
   87.24 -        replace_block_proj_ctrl(n);
   87.25 -        Node *input = n->in(0);
   87.26 +        replace_block_proj_ctrl(node);
   87.27 +        Node* input = node->in(0);
   87.28          while (!input->is_block_start()) {
   87.29            input = input->in(0);
   87.30          }
   87.31 -        Block *b = get_block_for_node(input); // Basic block of controlling input
   87.32 -        schedule_node_into_block(n, b);
   87.33 +        Block* block = get_block_for_node(input); // Basic block of controlling input
   87.34 +        schedule_node_into_block(node, block);
   87.35        }
   87.36 -      for( int i = n->req() - 1; i >= 0; --i ) {  // For all inputs
   87.37 -        if( n->in(i) != NULL )
   87.38 -          spstack.push(n->in(i));
   87.39 +
   87.40 +      // process all inputs that are non NULL
   87.41 +      for (int i = node->req() - 1; i >= 0; --i) {
   87.42 +        if (node->in(i) != NULL) {
   87.43 +          spstack.push(node->in(i));
   87.44 +        }
   87.45        }
   87.46      }
   87.47    }
   87.48 @@ -205,32 +208,29 @@
   87.49  // which all their inputs occur.
   87.50  bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
   87.51    // Allocate stack with enough space to avoid frequent realloc
   87.52 -  Node_Stack nstack(roots.Size() + 8); // (unique >> 1) + 24 from Java2D stats
   87.53 -  // roots.push(_root); _root will be processed among C->top() inputs
   87.54 +  Node_Stack nstack(roots.Size() + 8);
   87.55 +  // _root will be processed among C->top() inputs
   87.56    roots.push(C->top());
   87.57    visited.set(C->top()->_idx);
   87.58  
   87.59    while (roots.size() != 0) {
   87.60      // Use local variables nstack_top_n & nstack_top_i to cache values
   87.61      // on stack's top.
   87.62 -    Node *nstack_top_n = roots.pop();
   87.63 -    uint  nstack_top_i = 0;
   87.64 -//while_nstack_nonempty:
   87.65 +    Node* parent_node = roots.pop();
   87.66 +    uint  input_index = 0;
   87.67 +
   87.68      while (true) {
   87.69 -      // Get parent node and next input's index from stack's top.
   87.70 -      Node *n = nstack_top_n;
   87.71 -      uint  i = nstack_top_i;
   87.72 -
   87.73 -      if (i == 0) {
   87.74 +      if (input_index == 0) {
   87.75          // Fixup some control.  Constants without control get attached
   87.76          // to root and nodes that use is_block_proj() nodes should be attached
   87.77          // to the region that starts their block.
   87.78 -        const Node *in0 = n->in(0);
   87.79 -        if (in0 != NULL) {              // Control-dependent?
   87.80 -          replace_block_proj_ctrl(n);
   87.81 -        } else {               // n->in(0) == NULL
   87.82 -          if (n->req() == 1) { // This guy is a constant with NO inputs?
   87.83 -            n->set_req(0, _root);
   87.84 +        const Node* control_input = parent_node->in(0);
   87.85 +        if (control_input != NULL) {
   87.86 +          replace_block_proj_ctrl(parent_node);
   87.87 +        } else {
   87.88 +          // Is a constant with NO inputs?
   87.89 +          if (parent_node->req() == 1) {
   87.90 +            parent_node->set_req(0, _root);
   87.91            }
   87.92          }
   87.93        }
   87.94 @@ -239,37 +239,47 @@
   87.95        // input is already in a block we quit following inputs (to avoid
   87.96        // cycles). Instead we put that Node on a worklist to be handled
   87.97        // later (since IT'S inputs may not have a block yet).
   87.98 -      bool done = true;              // Assume all n's inputs will be processed
   87.99 -      while (i < n->len()) {         // For all inputs
  87.100 -        Node *in = n->in(i);         // Get input
  87.101 -        ++i;
  87.102 -        if (in == NULL) continue;    // Ignore NULL, missing inputs
  87.103 +
  87.104 +      // Assume all n's inputs will be processed
  87.105 +      bool done = true;
  87.106 +
  87.107 +      while (input_index < parent_node->len()) {
  87.108 +        Node* in = parent_node->in(input_index++);
  87.109 +        if (in == NULL) {
  87.110 +          continue;
  87.111 +        }
  87.112 +
  87.113          int is_visited = visited.test_set(in->_idx);
  87.114 -        if (!has_block(in)) { // Missing block selection?
  87.115 +        if (!has_block(in)) {
  87.116            if (is_visited) {
  87.117 -            // assert( !visited.test(in->_idx), "did not schedule early" );
  87.118              return false;
  87.119            }
  87.120 -          nstack.push(n, i);         // Save parent node and next input's index.
  87.121 -          nstack_top_n = in;         // Process current input now.
  87.122 -          nstack_top_i = 0;
  87.123 -          done = false;              // Not all n's inputs processed.
  87.124 -          break; // continue while_nstack_nonempty;
  87.125 -        } else if (!is_visited) {    // Input not yet visited?
  87.126 -          roots.push(in);            // Visit this guy later, using worklist
  87.127 +          // Save parent node and next input's index.
  87.128 +          nstack.push(parent_node, input_index);
  87.129 +          // Process current input now.
  87.130 +          parent_node = in;
  87.131 +          input_index = 0;
  87.132 +          // Not all n's inputs processed.
  87.133 +          done = false;
  87.134 +          break;
  87.135 +        } else if (!is_visited) {
  87.136 +          // Visit this guy later, using worklist
  87.137 +          roots.push(in);
  87.138          }
  87.139        }
  87.140 +
  87.141        if (done) {
  87.142          // All of n's inputs have been processed, complete post-processing.
  87.143  
  87.144          // Some instructions are pinned into a block.  These include Region,
  87.145          // Phi, Start, Return, and other control-dependent instructions and
  87.146          // any projections which depend on them.
  87.147 -        if (!n->pinned()) {
  87.148 +        if (!parent_node->pinned()) {
  87.149            // Set earliest legal block.
  87.150 -          map_node_to_block(n, find_deepest_input(n, this));
  87.151 +          Block* earliest_block = find_deepest_input(parent_node, this);
  87.152 +          map_node_to_block(parent_node, earliest_block);
  87.153          } else {
  87.154 -          assert(get_block_for_node(n) == get_block_for_node(n->in(0)), "Pinned Node should be at the same block as its control edge");
  87.155 +          assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
  87.156          }
  87.157  
  87.158          if (nstack.is_empty()) {
  87.159 @@ -278,12 +288,12 @@
  87.160            break;
  87.161          }
  87.162          // Get saved parent node and next input's index.
  87.163 -        nstack_top_n = nstack.node();
  87.164 -        nstack_top_i = nstack.index();
  87.165 +        parent_node = nstack.node();
  87.166 +        input_index = nstack.index();
  87.167          nstack.pop();
  87.168 -      } //    if (done)
  87.169 -    }   // while (true)
  87.170 -  }     // while (roots.size() != 0)
  87.171 +      }
  87.172 +    }
  87.173 +  }
  87.174    return true;
  87.175  }
  87.176  
  87.177 @@ -847,7 +857,7 @@
  87.178  
  87.179  //------------------------------ComputeLatenciesBackwards----------------------
  87.180  // Compute the latency of all the instructions.
  87.181 -void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) {
  87.182 +void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) {
  87.183  #ifndef PRODUCT
  87.184    if (trace_opto_pipelining())
  87.185      tty->print("\n#---- ComputeLatenciesBackwards ----\n");
  87.186 @@ -870,31 +880,34 @@
  87.187    // Set the latency for this instruction
  87.188  #ifndef PRODUCT
  87.189    if (trace_opto_pipelining()) {
  87.190 -    tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
  87.191 -               n->_idx, _node_latency->at_grow(n->_idx));
  87.192 +    tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
  87.193      dump();
  87.194    }
  87.195  #endif
  87.196  
  87.197 -  if (n->is_Proj())
  87.198 +  if (n->is_Proj()) {
  87.199      n = n->in(0);
  87.200 +  }
  87.201  
  87.202 -  if (n->is_Root())
  87.203 +  if (n->is_Root()) {
  87.204      return;
  87.205 +  }
  87.206  
  87.207    uint nlen = n->len();
  87.208 -  uint use_latency = _node_latency->at_grow(n->_idx);
  87.209 +  uint use_latency = get_latency_for_node(n);
  87.210    uint use_pre_order = get_block_for_node(n)->_pre_order;
  87.211  
  87.212 -  for ( uint j=0; j<nlen; j++ ) {
  87.213 +  for (uint j = 0; j < nlen; j++) {
  87.214      Node *def = n->in(j);
  87.215  
  87.216 -    if (!def || def == n)
  87.217 +    if (!def || def == n) {
  87.218        continue;
  87.219 +    }
  87.220  
  87.221      // Walk backwards thru projections
  87.222 -    if (def->is_Proj())
  87.223 +    if (def->is_Proj()) {
  87.224        def = def->in(0);
  87.225 +    }
  87.226  
  87.227  #ifndef PRODUCT
  87.228      if (trace_opto_pipelining()) {
  87.229 @@ -907,22 +920,20 @@
  87.230      Block *def_block = get_block_for_node(def);
  87.231      uint def_pre_order = def_block ? def_block->_pre_order : 0;
  87.232  
  87.233 -    if ( (use_pre_order <  def_pre_order) ||
  87.234 -         (use_pre_order == def_pre_order && n->is_Phi()) )
  87.235 +    if ((use_pre_order <  def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
  87.236        continue;
  87.237 +    }
  87.238  
  87.239      uint delta_latency = n->latency(j);
  87.240      uint current_latency = delta_latency + use_latency;
  87.241  
  87.242 -    if (_node_latency->at_grow(def->_idx) < current_latency) {
  87.243 -      _node_latency->at_put_grow(def->_idx, current_latency);
  87.244 +    if (get_latency_for_node(def) < current_latency) {
  87.245 +      set_latency_for_node(def, current_latency);
  87.246      }
  87.247  
  87.248  #ifndef PRODUCT
  87.249      if (trace_opto_pipelining()) {
  87.250 -      tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
  87.251 -                    use_latency, j, delta_latency, current_latency, def->_idx,
  87.252 -                    _node_latency->at_grow(def->_idx));
  87.253 +      tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
  87.254      }
  87.255  #endif
  87.256    }
  87.257 @@ -957,7 +968,7 @@
  87.258        return 0;
  87.259  
  87.260      uint nlen = use->len();
  87.261 -    uint nl = _node_latency->at_grow(use->_idx);
  87.262 +    uint nl = get_latency_for_node(use);
  87.263  
  87.264      for ( uint j=0; j<nlen; j++ ) {
  87.265        if (use->in(j) == n) {
  87.266 @@ -992,8 +1003,7 @@
  87.267    // Set the latency for this instruction
  87.268  #ifndef PRODUCT
  87.269    if (trace_opto_pipelining()) {
  87.270 -    tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
  87.271 -               n->_idx, _node_latency->at_grow(n->_idx));
  87.272 +    tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
  87.273      dump();
  87.274    }
  87.275  #endif
  87.276 @@ -1006,7 +1016,7 @@
  87.277      if (latency < l) latency = l;
  87.278    }
  87.279  
  87.280 -  _node_latency->at_put_grow(n->_idx, latency);
  87.281 +  set_latency_for_node(n, latency);
  87.282  }
  87.283  
  87.284  //------------------------------hoist_to_cheaper_block-------------------------
  87.285 @@ -1016,9 +1026,9 @@
  87.286    const double delta = 1+PROB_UNLIKELY_MAG(4);
  87.287    Block* least       = LCA;
  87.288    double least_freq  = least->_freq;
  87.289 -  uint target        = _node_latency->at_grow(self->_idx);
  87.290 -  uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
  87.291 -  uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
  87.292 +  uint target        = get_latency_for_node(self);
  87.293 +  uint start_latency = get_latency_for_node(LCA->_nodes[0]);
  87.294 +  uint end_latency   = get_latency_for_node(LCA->_nodes[LCA->end_idx()]);
  87.295    bool in_latency    = (target <= start_latency);
  87.296    const Block* root_block = get_block_for_node(_root);
  87.297  
  87.298 @@ -1035,8 +1045,7 @@
  87.299  
  87.300  #ifndef PRODUCT
  87.301    if (trace_opto_pipelining()) {
  87.302 -    tty->print("# Find cheaper block for latency %d: ",
  87.303 -      _node_latency->at_grow(self->_idx));
  87.304 +    tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
  87.305      self->dump();
  87.306      tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
  87.307        LCA->_pre_order,
  87.308 @@ -1065,9 +1074,9 @@
  87.309      if (mach && LCA == root_block)
  87.310        break;
  87.311  
  87.312 -    uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx);
  87.313 +    uint start_lat = get_latency_for_node(LCA->_nodes[0]);
  87.314      uint end_idx   = LCA->end_idx();
  87.315 -    uint end_lat   = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx);
  87.316 +    uint end_lat   = get_latency_for_node(LCA->_nodes[end_idx]);
  87.317      double LCA_freq = LCA->_freq;
  87.318  #ifndef PRODUCT
  87.319      if (trace_opto_pipelining()) {
  87.320 @@ -1109,7 +1118,7 @@
  87.321        tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
  87.322      }
  87.323  #endif
  87.324 -    _node_latency->at_put_grow(self->_idx, end_latency);
  87.325 +    set_latency_for_node(self, end_latency);
  87.326      partial_latency_of_defs(self);
  87.327    }
  87.328  
  87.329 @@ -1255,7 +1264,7 @@
  87.330  } // end ScheduleLate
  87.331  
  87.332  //------------------------------GlobalCodeMotion-------------------------------
  87.333 -void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_list ) {
  87.334 +void PhaseCFG::global_code_motion() {
  87.335    ResourceMark rm;
  87.336  
  87.337  #ifndef PRODUCT
  87.338 @@ -1265,21 +1274,22 @@
  87.339  #endif
  87.340  
  87.341    // Initialize the node to block mapping for things on the proj_list
  87.342 -  for (uint i = 0; i < proj_list.size(); i++) {
  87.343 -    unmap_node_from_block(proj_list[i]);
  87.344 +  for (uint i = 0; i < _matcher.number_of_projections(); i++) {
  87.345 +    unmap_node_from_block(_matcher.get_projection(i));
  87.346    }
  87.347  
  87.348    // Set the basic block for Nodes pinned into blocks
  87.349 -  Arena *a = Thread::current()->resource_area();
  87.350 -  VectorSet visited(a);
  87.351 -  schedule_pinned_nodes( visited );
  87.352 +  Arena* arena = Thread::current()->resource_area();
  87.353 +  VectorSet visited(arena);
  87.354 +  schedule_pinned_nodes(visited);
  87.355  
  87.356    // Find the earliest Block any instruction can be placed in.  Some
  87.357    // instructions are pinned into Blocks.  Unpinned instructions can
  87.358    // appear in last block in which all their inputs occur.
  87.359    visited.Clear();
  87.360 -  Node_List stack(a);
  87.361 -  stack.map( (unique >> 1) + 16, NULL); // Pre-grow the list
  87.362 +  Node_List stack(arena);
  87.363 +  // Pre-grow the list
  87.364 +  stack.map((C->unique() >> 1) + 16, NULL);
  87.365    if (!schedule_early(visited, stack)) {
  87.366      // Bailout without retry
  87.367      C->record_method_not_compilable("early schedule failed");
  87.368 @@ -1287,29 +1297,25 @@
  87.369    }
  87.370  
  87.371    // Build Def-Use edges.
  87.372 -  proj_list.push(_root);        // Add real root as another root
  87.373 -  proj_list.pop();
  87.374 -
  87.375    // Compute the latency information (via backwards walk) for all the
  87.376    // instructions in the graph
  87.377    _node_latency = new GrowableArray<uint>(); // resource_area allocation
  87.378  
  87.379 -  if( C->do_scheduling() )
  87.380 -    ComputeLatenciesBackwards(visited, stack);
  87.381 +  if (C->do_scheduling()) {
  87.382 +    compute_latencies_backwards(visited, stack);
  87.383 +  }
  87.384  
  87.385    // Now schedule all codes as LATE as possible.  This is the LCA in the
  87.386    // dominator tree of all USES of a value.  Pick the block with the least
  87.387    // loop nesting depth that is lowest in the dominator tree.
  87.388    // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
  87.389    schedule_late(visited, stack);
  87.390 -  if( C->failing() ) {
  87.391 +  if (C->failing()) {
  87.392      // schedule_late fails only when graph is incorrect.
  87.393      assert(!VerifyGraphEdges, "verification should have failed");
  87.394      return;
  87.395    }
  87.396  
  87.397 -  unique = C->unique();
  87.398 -
  87.399  #ifndef PRODUCT
  87.400    if (trace_opto_pipelining()) {
  87.401      tty->print("\n---- Detect implicit null checks ----\n");
  87.402 @@ -1332,10 +1338,11 @@
  87.403      // By reversing the loop direction we get a very minor gain on mpegaudio.
  87.404      // Feel free to revert to a forward loop for clarity.
  87.405      // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
  87.406 -    for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
  87.407 -      Node *proj = matcher._null_check_tests[i  ];
  87.408 -      Node *val  = matcher._null_check_tests[i+1];
  87.409 -      get_block_for_node(proj)->implicit_null_check(this, proj, val, allowed_reasons);
  87.410 +    for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
  87.411 +      Node* proj = _matcher._null_check_tests[i];
  87.412 +      Node* val  = _matcher._null_check_tests[i + 1];
  87.413 +      Block* block = get_block_for_node(proj);
  87.414 +      block->implicit_null_check(this, proj, val, allowed_reasons);
  87.415        // The implicit_null_check will only perform the transformation
  87.416        // if the null branch is truly uncommon, *and* it leads to an
  87.417        // uncommon trap.  Combined with the too_many_traps guards
  87.418 @@ -1352,11 +1359,11 @@
  87.419  
  87.420    // Schedule locally.  Right now a simple topological sort.
  87.421    // Later, do a real latency aware scheduler.
  87.422 -  uint max_idx = C->unique();
  87.423 -  GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
  87.424 +  GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
  87.425    visited.Clear();
  87.426 -  for (uint i = 0; i < _num_blocks; i++) {
  87.427 -    if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
  87.428 +  for (uint i = 0; i < number_of_blocks(); i++) {
  87.429 +    Block* block = get_block(i);
  87.430 +    if (!block->schedule_local(this, _matcher, ready_cnt, visited)) {
  87.431        if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
  87.432          C->record_method_not_compilable("local schedule failed");
  87.433        }
  87.434 @@ -1366,15 +1373,17 @@
  87.435  
  87.436    // If we inserted any instructions between a Call and his CatchNode,
  87.437    // clone the instructions on all paths below the Catch.
  87.438 -  for (uint i = 0; i < _num_blocks; i++) {
  87.439 -    _blocks[i]->call_catch_cleanup(this, C);
  87.440 +  for (uint i = 0; i < number_of_blocks(); i++) {
  87.441 +    Block* block = get_block(i);
  87.442 +    block->call_catch_cleanup(this, C);
  87.443    }
  87.444  
  87.445  #ifndef PRODUCT
  87.446    if (trace_opto_pipelining()) {
  87.447      tty->print("\n---- After GlobalCodeMotion ----\n");
  87.448 -    for (uint i = 0; i < _num_blocks; i++) {
  87.449 -      _blocks[i]->dump();
  87.450 +    for (uint i = 0; i < number_of_blocks(); i++) {
  87.451 +      Block* block = get_block(i);
  87.452 +      block->dump();
  87.453      }
  87.454    }
  87.455  #endif
  87.456 @@ -1382,10 +1391,29 @@
  87.457    _node_latency = (GrowableArray<uint> *)0xdeadbeef;
  87.458  }
  87.459  
  87.460 +bool PhaseCFG::do_global_code_motion() {
  87.461 +
  87.462 +  build_dominator_tree();
  87.463 +  if (C->failing()) {
  87.464 +    return false;
  87.465 +  }
  87.466 +
  87.467 +  NOT_PRODUCT( C->verify_graph_edges(); )
  87.468 +
  87.469 +  estimate_block_frequency();
  87.470 +
  87.471 +  global_code_motion();
  87.472 +
  87.473 +  if (C->failing()) {
  87.474 +    return false;
  87.475 +  }
  87.476 +
  87.477 +  return true;
  87.478 +}
  87.479  
  87.480  //------------------------------Estimate_Block_Frequency-----------------------
  87.481  // Estimate block frequencies based on IfNode probabilities.
  87.482 -void PhaseCFG::Estimate_Block_Frequency() {
  87.483 +void PhaseCFG::estimate_block_frequency() {
  87.484  
  87.485    // Force conditional branches leading to uncommon traps to be unlikely,
  87.486    // not because we get to the uncommon_trap with less relative frequency,
  87.487 @@ -1393,7 +1421,7 @@
  87.488    // there once.
  87.489    if (C->do_freq_based_layout()) {
  87.490      Block_List worklist;
  87.491 -    Block* root_blk = _blocks[0];
  87.492 +    Block* root_blk = get_block(0);
  87.493      for (uint i = 1; i < root_blk->num_preds(); i++) {
  87.494        Block *pb = get_block_for_node(root_blk->pred(i));
  87.495        if (pb->has_uncommon_code()) {
  87.496 @@ -1402,7 +1430,9 @@
  87.497      }
  87.498      while (worklist.size() > 0) {
  87.499        Block* uct = worklist.pop();
  87.500 -      if (uct == _broot) continue;
  87.501 +      if (uct == get_root_block()) {
  87.502 +        continue;
  87.503 +      }
  87.504        for (uint i = 1; i < uct->num_preds(); i++) {
  87.505          Block *pb = get_block_for_node(uct->pred(i));
  87.506          if (pb->_num_succs == 1) {
  87.507 @@ -1426,12 +1456,12 @@
  87.508    _root_loop->scale_freq();
  87.509  
  87.510    // Save outmost loop frequency for LRG frequency threshold
  87.511 -  _outer_loop_freq = _root_loop->outer_loop_freq();
  87.512 +  _outer_loop_frequency = _root_loop->outer_loop_freq();
  87.513  
  87.514    // force paths ending at uncommon traps to be infrequent
  87.515    if (!C->do_freq_based_layout()) {
  87.516      Block_List worklist;
  87.517 -    Block* root_blk = _blocks[0];
  87.518 +    Block* root_blk = get_block(0);
  87.519      for (uint i = 1; i < root_blk->num_preds(); i++) {
  87.520        Block *pb = get_block_for_node(root_blk->pred(i));
  87.521        if (pb->has_uncommon_code()) {
  87.522 @@ -1451,8 +1481,8 @@
  87.523    }
  87.524  
  87.525  #ifdef ASSERT
  87.526 -  for (uint i = 0; i < _num_blocks; i++ ) {
  87.527 -    Block *b = _blocks[i];
  87.528 +  for (uint i = 0; i < number_of_blocks(); i++) {
  87.529 +    Block* b = get_block(i);
  87.530      assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
  87.531    }
  87.532  #endif
  87.533 @@ -1476,16 +1506,16 @@
  87.534  CFGLoop* PhaseCFG::create_loop_tree() {
  87.535  
  87.536  #ifdef ASSERT
  87.537 -  assert( _blocks[0] == _broot, "" );
  87.538 -  for (uint i = 0; i < _num_blocks; i++ ) {
  87.539 -    Block *b = _blocks[i];
  87.540 +  assert(get_block(0) == get_root_block(), "first block should be root block");
  87.541 +  for (uint i = 0; i < number_of_blocks(); i++) {
  87.542 +    Block* block = get_block(i);
  87.543      // Check that _loop field are clear...we could clear them if not.
  87.544 -    assert(b->_loop == NULL, "clear _loop expected");
  87.545 +    assert(block->_loop == NULL, "clear _loop expected");
  87.546      // Sanity check that the RPO numbering is reflected in the _blocks array.
  87.547      // It doesn't have to be for the loop tree to be built, but if it is not,
  87.548      // then the blocks have been reordered since dom graph building...which
  87.549      // may question the RPO numbering
  87.550 -    assert(b->_rpo == i, "unexpected reverse post order number");
  87.551 +    assert(block->_rpo == i, "unexpected reverse post order number");
  87.552    }
  87.553  #endif
  87.554  
  87.555 @@ -1495,11 +1525,11 @@
  87.556    Block_List worklist;
  87.557  
  87.558    // Assign blocks to loops
  87.559 -  for(uint i = _num_blocks - 1; i > 0; i-- ) { // skip Root block
  87.560 -    Block *b = _blocks[i];
  87.561 +  for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
  87.562 +    Block* block = get_block(i);
  87.563  
  87.564 -    if (b->head()->is_Loop()) {
  87.565 -      Block* loop_head = b;
  87.566 +    if (block->head()->is_Loop()) {
  87.567 +      Block* loop_head = block;
  87.568        assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
  87.569        Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
  87.570        Block* tail = get_block_for_node(tail_n);
  87.571 @@ -1533,23 +1563,23 @@
  87.572  
  87.573    // Create a member list for each loop consisting
  87.574    // of both blocks and (immediate child) loops.
  87.575 -  for (uint i = 0; i < _num_blocks; i++) {
  87.576 -    Block *b = _blocks[i];
  87.577 -    CFGLoop* lp = b->_loop;
  87.578 +  for (uint i = 0; i < number_of_blocks(); i++) {
  87.579 +    Block* block = get_block(i);
  87.580 +    CFGLoop* lp = block->_loop;
  87.581      if (lp == NULL) {
  87.582        // Not assigned to a loop. Add it to the method's pseudo loop.
  87.583 -      b->_loop = root_loop;
  87.584 +      block->_loop = root_loop;
  87.585        lp = root_loop;
  87.586      }
  87.587 -    if (lp == root_loop || b != lp->head()) { // loop heads are already members
  87.588 -      lp->add_member(b);
  87.589 +    if (lp == root_loop || block != lp->head()) { // loop heads are already members
  87.590 +      lp->add_member(block);
  87.591      }
  87.592      if (lp != root_loop) {
  87.593        if (lp->parent() == NULL) {
  87.594          // Not a nested loop. Make it a child of the method's pseudo loop.
  87.595          root_loop->add_nested_loop(lp);
  87.596        }
  87.597 -      if (b == lp->head()) {
  87.598 +      if (block == lp->head()) {
  87.599          // Add nested loop to member list of parent loop.
  87.600          lp->parent()->add_member(lp);
  87.601        }
    88.1 --- a/src/share/vm/opto/idealGraphPrinter.cpp	Fri Aug 23 22:12:18 2013 +0100
    88.2 +++ b/src/share/vm/opto/idealGraphPrinter.cpp	Fri Aug 30 09:50:49 2013 +0100
    88.3 @@ -416,7 +416,7 @@
    88.4      if (C->cfg() != NULL) {
    88.5        Block* block = C->cfg()->get_block_for_node(node);
    88.6        if (block == NULL) {
    88.7 -        print_prop("block", C->cfg()->_blocks[0]->_pre_order);
    88.8 +        print_prop("block", C->cfg()->get_block(0)->_pre_order);
    88.9        } else {
   88.10          print_prop("block", block->_pre_order);
   88.11        }
   88.12 @@ -637,10 +637,10 @@
   88.13    if (C->cfg() != NULL) {
   88.14      // once we have a CFG there are some nodes that aren't really
   88.15      // reachable but are in the CFG so add them here.
   88.16 -    for (uint i = 0; i < C->cfg()->_blocks.size(); i++) {
   88.17 -      Block *b = C->cfg()->_blocks[i];
   88.18 -      for (uint s = 0; s < b->_nodes.size(); s++) {
   88.19 -        nodeStack.push(b->_nodes[s]);
   88.20 +    for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
   88.21 +      Block* block = C->cfg()->get_block(i);
   88.22 +      for (uint s = 0; s < block->_nodes.size(); s++) {
   88.23 +        nodeStack.push(block->_nodes[s]);
   88.24        }
   88.25      }
   88.26    }
   88.27 @@ -698,24 +698,24 @@
   88.28    tail(EDGES_ELEMENT);
   88.29    if (C->cfg() != NULL) {
   88.30      head(CONTROL_FLOW_ELEMENT);
   88.31 -    for (uint i = 0; i < C->cfg()->_blocks.size(); i++) {
   88.32 -      Block *b = C->cfg()->_blocks[i];
   88.33 +    for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
   88.34 +      Block* block = C->cfg()->get_block(i);
   88.35        begin_head(BLOCK_ELEMENT);
   88.36 -      print_attr(BLOCK_NAME_PROPERTY, b->_pre_order);
   88.37 +      print_attr(BLOCK_NAME_PROPERTY, block->_pre_order);
   88.38        end_head();
   88.39  
   88.40        head(SUCCESSORS_ELEMENT);
   88.41 -      for (uint s = 0; s < b->_num_succs; s++) {
   88.42 +      for (uint s = 0; s < block->_num_succs; s++) {
   88.43          begin_elem(SUCCESSOR_ELEMENT);
   88.44 -        print_attr(BLOCK_NAME_PROPERTY, b->_succs[s]->_pre_order);
   88.45 +        print_attr(BLOCK_NAME_PROPERTY, block->_succs[s]->_pre_order);
   88.46          end_elem();
   88.47        }
   88.48        tail(SUCCESSORS_ELEMENT);
   88.49  
   88.50        head(NODES_ELEMENT);
   88.51 -      for (uint s = 0; s < b->_nodes.size(); s++) {
   88.52 +      for (uint s = 0; s < block->_nodes.size(); s++) {
   88.53          begin_elem(NODE_ELEMENT);
   88.54 -        print_attr(NODE_ID_PROPERTY, get_node_id(b->_nodes[s]));
   88.55 +        print_attr(NODE_ID_PROPERTY, get_node_id(block->_nodes[s]));
   88.56          end_elem();
   88.57        }
   88.58        tail(NODES_ELEMENT);
    89.1 --- a/src/share/vm/opto/ifg.cpp	Fri Aug 23 22:12:18 2013 +0100
    89.2 +++ b/src/share/vm/opto/ifg.cpp	Fri Aug 30 09:50:49 2013 +0100
    89.3 @@ -37,12 +37,9 @@
    89.4  #include "opto/memnode.hpp"
    89.5  #include "opto/opcodes.hpp"
    89.6  
    89.7 -//=============================================================================
    89.8 -//------------------------------IFG--------------------------------------------
    89.9  PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) {
   89.10  }
   89.11  
   89.12 -//------------------------------init-------------------------------------------
   89.13  void PhaseIFG::init( uint maxlrg ) {
   89.14    _maxlrg = maxlrg;
   89.15    _yanked = new (_arena) VectorSet(_arena);
   89.16 @@ -59,7 +56,6 @@
   89.17    }
   89.18  }
   89.19  
   89.20 -//------------------------------add--------------------------------------------
   89.21  // Add edge between vertices a & b.  These are sorted (triangular matrix),
   89.22  // then the smaller number is inserted in the larger numbered array.
   89.23  int PhaseIFG::add_edge( uint a, uint b ) {
   89.24 @@ -71,7 +67,6 @@
   89.25    return _adjs[a].insert( b );
   89.26  }
   89.27  
   89.28 -//------------------------------add_vector-------------------------------------
   89.29  // Add an edge between 'a' and everything in the vector.
   89.30  void PhaseIFG::add_vector( uint a, IndexSet *vec ) {
   89.31    // IFG is triangular, so do the inserts where 'a' < 'b'.
   89.32 @@ -86,7 +81,6 @@
   89.33    }
   89.34  }
   89.35  
   89.36 -//------------------------------test-------------------------------------------
   89.37  // Is there an edge between a and b?
   89.38  int PhaseIFG::test_edge( uint a, uint b ) const {
   89.39    // Sort a and b, so that a is larger
   89.40 @@ -95,7 +89,6 @@
   89.41    return _adjs[a].member(b);
   89.42  }
   89.43  
   89.44 -//------------------------------SquareUp---------------------------------------
   89.45  // Convert triangular matrix to square matrix
   89.46  void PhaseIFG::SquareUp() {
   89.47    assert( !_is_square, "only on triangular" );
   89.48 @@ -111,7 +104,6 @@
   89.49    _is_square = true;
   89.50  }
   89.51  
   89.52 -//------------------------------Compute_Effective_Degree-----------------------
   89.53  // Compute effective degree in bulk
   89.54  void PhaseIFG::Compute_Effective_Degree() {
   89.55    assert( _is_square, "only on square" );
   89.56 @@ -120,7 +112,6 @@
   89.57      lrgs(i).set_degree(effective_degree(i));
   89.58  }
   89.59  
   89.60 -//------------------------------test_edge_sq-----------------------------------
   89.61  int PhaseIFG::test_edge_sq( uint a, uint b ) const {
   89.62    assert( _is_square, "only on square" );
   89.63    // Swap, so that 'a' has the lesser count.  Then binary search is on
   89.64 @@ -130,7 +121,6 @@
   89.65    return _adjs[a].member(b);
   89.66  }
   89.67  
   89.68 -//------------------------------Union------------------------------------------
   89.69  // Union edges of B into A
   89.70  void PhaseIFG::Union( uint a, uint b ) {
   89.71    assert( _is_square, "only on square" );
   89.72 @@ -146,7 +136,6 @@
   89.73    }
   89.74  }
   89.75  
   89.76 -//------------------------------remove_node------------------------------------
   89.77  // Yank a Node and all connected edges from the IFG.  Return a
   89.78  // list of neighbors (edges) yanked.
   89.79  IndexSet *PhaseIFG::remove_node( uint a ) {
   89.80 @@ -165,7 +154,6 @@
   89.81    return neighbors(a);
   89.82  }
   89.83  
   89.84 -//------------------------------re_insert--------------------------------------
   89.85  // Re-insert a yanked Node.
   89.86  void PhaseIFG::re_insert( uint a ) {
   89.87    assert( _is_square, "only on square" );
   89.88 @@ -180,7 +168,6 @@
   89.89    }
   89.90  }
   89.91  
   89.92 -//------------------------------compute_degree---------------------------------
   89.93  // Compute the degree between 2 live ranges.  If both live ranges are
   89.94  // aligned-adjacent powers-of-2 then we use the MAX size.  If either is
   89.95  // mis-aligned (or for Fat-Projections, not-adjacent) then we have to
   89.96 @@ -196,7 +183,6 @@
   89.97    return tmp;
   89.98  }
   89.99  
  89.100 -//------------------------------effective_degree-------------------------------
  89.101  // Compute effective degree for this live range.  If both live ranges are
  89.102  // aligned-adjacent powers-of-2 then we use the MAX size.  If either is
  89.103  // mis-aligned (or for Fat-Projections, not-adjacent) then we have to
  89.104 @@ -221,7 +207,6 @@
  89.105  
  89.106  
  89.107  #ifndef PRODUCT
  89.108 -//------------------------------dump-------------------------------------------
  89.109  void PhaseIFG::dump() const {
  89.110    tty->print_cr("-- Interference Graph --%s--",
  89.111                  _is_square ? "square" : "triangular" );
  89.112 @@ -260,7 +245,6 @@
  89.113    tty->print("\n");
  89.114  }
  89.115  
  89.116 -//------------------------------stats------------------------------------------
  89.117  void PhaseIFG::stats() const {
  89.118    ResourceMark rm;
  89.119    int *h_cnt = NEW_RESOURCE_ARRAY(int,_maxlrg*2);
  89.120 @@ -276,7 +260,6 @@
  89.121    tty->print_cr("");
  89.122  }
  89.123  
  89.124 -//------------------------------verify-----------------------------------------
  89.125  void PhaseIFG::verify( const PhaseChaitin *pc ) const {
  89.126    // IFG is square, sorted and no need for Find
  89.127    for( uint i = 0; i < _maxlrg; i++ ) {
  89.128 @@ -298,7 +281,6 @@
  89.129  }
  89.130  #endif
  89.131  
  89.132 -//------------------------------interfere_with_live----------------------------
  89.133  // Interfere this register with everything currently live.  Use the RegMasks
  89.134  // to trim the set of possible interferences. Return a count of register-only
  89.135  // interferences as an estimate of register pressure.
  89.136 @@ -315,7 +297,6 @@
  89.137        _ifg->add_edge( r, l );
  89.138  }
  89.139  
  89.140 -//------------------------------build_ifg_virtual------------------------------
  89.141  // Actually build the interference graph.  Uses virtual registers only, no
  89.142  // physical register masks.  This allows me to be very aggressive when
  89.143  // coalescing copies.  Some of this aggressiveness will have to be undone
  89.144 @@ -325,9 +306,9 @@
  89.145  void PhaseChaitin::build_ifg_virtual( ) {
  89.146  
  89.147    // For all blocks (in any order) do...
  89.148 -  for( uint i=0; i<_cfg._num_blocks; i++ ) {
  89.149 -    Block *b = _cfg._blocks[i];
  89.150 -    IndexSet *liveout = _live->live(b);
  89.151 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
  89.152 +    Block* block = _cfg.get_block(i);
  89.153 +    IndexSet* liveout = _live->live(block);
  89.154  
  89.155      // The IFG is built by a single reverse pass over each basic block.
  89.156      // Starting with the known live-out set, we remove things that get
  89.157 @@ -337,8 +318,8 @@
  89.158      // The defined value interferes with everything currently live.  The
  89.159      // value is then removed from the live-ness set and it's inputs are
  89.160      // added to the live-ness set.
  89.161 -    for( uint j = b->end_idx() + 1; j > 1; j-- ) {
  89.162 -      Node *n = b->_nodes[j-1];
  89.163 +    for (uint j = block->end_idx() + 1; j > 1; j--) {
  89.164 +      Node* n = block->_nodes[j - 1];
  89.165  
  89.166        // Get value being defined
  89.167        uint r = _lrg_map.live_range_id(n);
  89.168 @@ -408,7 +389,6 @@
  89.169    } // End of forall blocks
  89.170  }
  89.171  
  89.172 -//------------------------------count_int_pressure-----------------------------
  89.173  uint PhaseChaitin::count_int_pressure( IndexSet *liveout ) {
  89.174    IndexSetIterator elements(liveout);
  89.175    uint lidx;
  89.176 @@ -424,7 +404,6 @@
  89.177    return cnt;
  89.178  }
  89.179  
  89.180 -//------------------------------count_float_pressure---------------------------
  89.181  uint PhaseChaitin::count_float_pressure( IndexSet *liveout ) {
  89.182    IndexSetIterator elements(liveout);
  89.183    uint lidx;
  89.184 @@ -438,7 +417,6 @@
  89.185    return cnt;
  89.186  }
  89.187  
  89.188 -//------------------------------lower_pressure---------------------------------
  89.189  // Adjust register pressure down by 1.  Capture last hi-to-low transition,
  89.190  static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint *hrp_index ) {
  89.191    if (lrg->mask().is_UP() && lrg->mask_size()) {
  89.192 @@ -460,40 +438,41 @@
  89.193    }
  89.194  }
  89.195  
  89.196 -//------------------------------build_ifg_physical-----------------------------
  89.197  // Build the interference graph using physical registers when available.
  89.198  // That is, if 2 live ranges are simultaneously alive but in their acceptable
  89.199  // register sets do not overlap, then they do not interfere.
  89.200  uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
  89.201    NOT_PRODUCT( Compile::TracePhase t3("buildIFG", &_t_buildIFGphysical, TimeCompiler); )
  89.202  
  89.203 -  uint spill_reg = LRG::SPILL_REG;
  89.204    uint must_spill = 0;
  89.205  
  89.206    // For all blocks (in any order) do...
  89.207 -  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
  89.208 -    Block *b = _cfg._blocks[i];
  89.209 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
  89.210 +    Block* block = _cfg.get_block(i);
  89.211      // Clone (rather than smash in place) the liveout info, so it is alive
  89.212      // for the "collect_gc_info" phase later.
  89.213 -    IndexSet liveout(_live->live(b));
  89.214 -    uint last_inst = b->end_idx();
  89.215 +    IndexSet liveout(_live->live(block));
  89.216 +    uint last_inst = block->end_idx();
  89.217      // Compute first nonphi node index
  89.218      uint first_inst;
  89.219 -    for( first_inst = 1; first_inst < last_inst; first_inst++ )
  89.220 -      if( !b->_nodes[first_inst]->is_Phi() )
  89.221 +    for (first_inst = 1; first_inst < last_inst; first_inst++) {
  89.222 +      if (!block->_nodes[first_inst]->is_Phi()) {
  89.223          break;
  89.224 +      }
  89.225 +    }
  89.226  
  89.227      // Spills could be inserted before CreateEx node which should be
  89.228      // first instruction in block after Phis. Move CreateEx up.
  89.229 -    for( uint insidx = first_inst; insidx < last_inst; insidx++ ) {
  89.230 -      Node *ex = b->_nodes[insidx];
  89.231 -      if( ex->is_SpillCopy() ) continue;
  89.232 -      if( insidx > first_inst && ex->is_Mach() &&
  89.233 -          ex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
  89.234 +    for (uint insidx = first_inst; insidx < last_inst; insidx++) {
  89.235 +      Node *ex = block->_nodes[insidx];
  89.236 +      if (ex->is_SpillCopy()) {
  89.237 +        continue;
  89.238 +      }
  89.239 +      if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
  89.240          // If the CreateEx isn't above all the MachSpillCopies
  89.241          // then move it to the top.
  89.242 -        b->_nodes.remove(insidx);
  89.243 -        b->_nodes.insert(first_inst, ex);
  89.244 +        block->_nodes.remove(insidx);
  89.245 +        block->_nodes.insert(first_inst, ex);
  89.246        }
  89.247        // Stop once a CreateEx or any other node is found
  89.248        break;
  89.249 @@ -503,12 +482,12 @@
  89.250      uint pressure[2], hrp_index[2];
  89.251      pressure[0] = pressure[1] = 0;
  89.252      hrp_index[0] = hrp_index[1] = last_inst+1;
  89.253 -    b->_reg_pressure = b->_freg_pressure = 0;
  89.254 +    block->_reg_pressure = block->_freg_pressure = 0;
  89.255      // Liveout things are presumed live for the whole block.  We accumulate
  89.256      // 'area' accordingly.  If they get killed in the block, we'll subtract
  89.257      // the unused part of the block from the area.
  89.258      int inst_count = last_inst - first_inst;
  89.259 -    double cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count);
  89.260 +    double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
  89.261      assert(!(cost < 0.0), "negative spill cost" );
  89.262      IndexSetIterator elements(&liveout);
  89.263      uint lidx;
  89.264 @@ -519,13 +498,15 @@
  89.265        if (lrg.mask().is_UP() && lrg.mask_size()) {
  89.266          if (lrg._is_float || lrg._is_vector) {   // Count float pressure
  89.267            pressure[1] += lrg.reg_pressure();
  89.268 -          if( pressure[1] > b->_freg_pressure )
  89.269 -            b->_freg_pressure = pressure[1];
  89.270 +          if (pressure[1] > block->_freg_pressure) {
  89.271 +            block->_freg_pressure = pressure[1];
  89.272 +          }
  89.273            // Count int pressure, but do not count the SP, flags
  89.274 -        } else if( lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
  89.275 +        } else if(lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI])) {
  89.276            pressure[0] += lrg.reg_pressure();
  89.277 -          if( pressure[0] > b->_reg_pressure )
  89.278 -            b->_reg_pressure = pressure[0];
  89.279 +          if (pressure[0] > block->_reg_pressure) {
  89.280 +            block->_reg_pressure = pressure[0];
  89.281 +          }
  89.282          }
  89.283        }
  89.284      }
  89.285 @@ -541,8 +522,8 @@
  89.286      // value is then removed from the live-ness set and it's inputs are added
  89.287      // to the live-ness set.
  89.288      uint j;
  89.289 -    for( j = last_inst + 1; j > 1; j-- ) {
  89.290 -      Node *n = b->_nodes[j - 1];
  89.291 +    for (j = last_inst + 1; j > 1; j--) {
  89.292 +      Node* n = block->_nodes[j - 1];
  89.293  
  89.294        // Get value being defined
  89.295        uint r = _lrg_map.live_range_id(n);
  89.296 @@ -551,7 +532,7 @@
  89.297        if(r) {
  89.298          // A DEF normally costs block frequency; rematerialized values are
  89.299          // removed from the DEF sight, so LOWER costs here.
  89.300 -        lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq;
  89.301 +        lrgs(r)._cost += n->rematerialize() ? 0 : block->_freq;
  89.302  
  89.303          // If it is not live, then this instruction is dead.  Probably caused
  89.304          // by spilling and rematerialization.  Who cares why, yank this baby.
  89.305 @@ -560,7 +541,7 @@
  89.306            if( !n->is_Proj() ||
  89.307                // Could also be a flags-projection of a dead ADD or such.
  89.308                (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
  89.309 -            b->_nodes.remove(j - 1);
  89.310 +            block->_nodes.remove(j - 1);
  89.311              if (lrgs(r)._def == n) {
  89.312                lrgs(r)._def = 0;
  89.313              }
  89.314 @@ -580,21 +561,21 @@
  89.315              RegMask itmp = lrgs(r).mask();
  89.316              itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
  89.317              int iregs = itmp.Size();
  89.318 -            if( pressure[0]+iregs > b->_reg_pressure )
  89.319 -              b->_reg_pressure = pressure[0]+iregs;
  89.320 -            if( pressure[0]       <= (uint)INTPRESSURE &&
  89.321 -                pressure[0]+iregs >  (uint)INTPRESSURE ) {
  89.322 -              hrp_index[0] = j-1;
  89.323 +            if (pressure[0]+iregs > block->_reg_pressure) {
  89.324 +              block->_reg_pressure = pressure[0] + iregs;
  89.325 +            }
  89.326 +            if (pressure[0] <= (uint)INTPRESSURE && pressure[0] + iregs > (uint)INTPRESSURE) {
  89.327 +              hrp_index[0] = j - 1;
  89.328              }
  89.329              // Count the float-only registers
  89.330              RegMask ftmp = lrgs(r).mask();
  89.331              ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]);
  89.332              int fregs = ftmp.Size();
  89.333 -            if( pressure[1]+fregs > b->_freg_pressure )
  89.334 -              b->_freg_pressure = pressure[1]+fregs;
  89.335 -            if( pressure[1]       <= (uint)FLOATPRESSURE &&
  89.336 -                pressure[1]+fregs >  (uint)FLOATPRESSURE ) {
  89.337 -              hrp_index[1] = j-1;
  89.338 +            if (pressure[1] + fregs > block->_freg_pressure) {
  89.339 +              block->_freg_pressure = pressure[1] + fregs;
  89.340 +            }
  89.341 +            if(pressure[1] <= (uint)FLOATPRESSURE && pressure[1]+fregs > (uint)FLOATPRESSURE) {
  89.342 +              hrp_index[1] = j - 1;
  89.343              }
  89.344            }
  89.345  
  89.346 @@ -607,7 +588,7 @@
  89.347            if( n->is_SpillCopy()
  89.348                && lrgs(r).is_singledef()        // MultiDef live range can still split
  89.349                && n->outcnt() == 1              // and use must be in this block
  89.350 -              && _cfg.get_block_for_node(n->unique_out()) == b ) {
  89.351 +              && _cfg.get_block_for_node(n->unique_out()) == block) {
  89.352              // All single-use MachSpillCopy(s) that immediately precede their
  89.353              // use must color early.  If a longer live range steals their
  89.354              // color, the spill copy will split and may push another spill copy
  89.355 @@ -617,14 +598,16 @@
  89.356              //
  89.357  
  89.358              Node *single_use = n->unique_out();
  89.359 -            assert( b->find_node(single_use) >= j, "Use must be later in block");
  89.360 +            assert(block->find_node(single_use) >= j, "Use must be later in block");
  89.361              // Use can be earlier in block if it is a Phi, but then I should be a MultiDef
  89.362  
  89.363              // Find first non SpillCopy 'm' that follows the current instruction
  89.364              // (j - 1) is index for current instruction 'n'
  89.365              Node *m = n;
  89.366 -            for( uint i = j; i <= last_inst && m->is_SpillCopy(); ++i ) { m = b->_nodes[i]; }
  89.367 -            if( m == single_use ) {
  89.368 +            for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
  89.369 +              m = block->_nodes[i];
  89.370 +            }
  89.371 +            if (m == single_use) {
  89.372                lrgs(r)._area = 0.0;
  89.373              }
  89.374            }
  89.375 @@ -633,7 +616,7 @@
  89.376            if( liveout.remove(r) ) {
  89.377              // Adjust register pressure.
  89.378              // Capture last hi-to-lo pressure transition
  89.379 -            lower_pressure( &lrgs(r), j-1, b, pressure, hrp_index );
  89.380 +            lower_pressure(&lrgs(r), j - 1, block, pressure, hrp_index);
  89.381              assert( pressure[0] == count_int_pressure  (&liveout), "" );
  89.382              assert( pressure[1] == count_float_pressure(&liveout), "" );
  89.383            }
  89.384 @@ -646,7 +629,7 @@
  89.385              if (liveout.remove(x)) {
  89.386                lrgs(x)._area -= cost;
  89.387                // Adjust register pressure.
  89.388 -              lower_pressure(&lrgs(x), j-1, b, pressure, hrp_index);
  89.389 +              lower_pressure(&lrgs(x), j - 1, block, pressure, hrp_index);
  89.390                assert( pressure[0] == count_int_pressure  (&liveout), "" );
  89.391                assert( pressure[1] == count_float_pressure(&liveout), "" );
  89.392              }
  89.393 @@ -718,7 +701,7 @@
  89.394  
  89.395        // Area remaining in the block
  89.396        inst_count--;
  89.397 -      cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count);
  89.398 +      cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
  89.399  
  89.400        // Make all inputs live
  89.401        if( !n->is_Phi() ) {      // Phi function uses come from prior block
  89.402 @@ -743,7 +726,7 @@
  89.403            if (k < debug_start) {
  89.404              // A USE costs twice block frequency (once for the Load, once
  89.405              // for a Load-delay).  Rematerialized uses only cost once.
  89.406 -            lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq + b->_freq));
  89.407 +            lrg._cost += (def->rematerialize() ? block->_freq : (block->_freq + block->_freq));
  89.408            }
  89.409            // It is live now
  89.410            if (liveout.insert(x)) {
  89.411 @@ -753,12 +736,14 @@
  89.412              if (lrg.mask().is_UP() && lrg.mask_size()) {
  89.413                if (lrg._is_float || lrg._is_vector) {
  89.414                  pressure[1] += lrg.reg_pressure();
  89.415 -                if( pressure[1] > b->_freg_pressure )
  89.416 -                  b->_freg_pressure = pressure[1];
  89.417 +                if (pressure[1] > block->_freg_pressure)  {
  89.418 +                  block->_freg_pressure = pressure[1];
  89.419 +                }
  89.420                } else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
  89.421                  pressure[0] += lrg.reg_pressure();
  89.422 -                if( pressure[0] > b->_reg_pressure )
  89.423 -                  b->_reg_pressure = pressure[0];
  89.424 +                if (pressure[0] > block->_reg_pressure) {
  89.425 +                  block->_reg_pressure = pressure[0];
  89.426 +                }
  89.427                }
  89.428              }
  89.429              assert( pressure[0] == count_int_pressure  (&liveout), "" );
  89.430 @@ -772,44 +757,47 @@
  89.431      // If we run off the top of the block with high pressure and
  89.432      // never see a hi-to-low pressure transition, just record that
  89.433      // the whole block is high pressure.
  89.434 -    if( pressure[0] > (uint)INTPRESSURE   ) {
  89.435 +    if (pressure[0] > (uint)INTPRESSURE) {
  89.436        hrp_index[0] = 0;
  89.437 -      if( pressure[0] > b->_reg_pressure )
  89.438 -        b->_reg_pressure = pressure[0];
  89.439 +      if (pressure[0] > block->_reg_pressure) {
  89.440 +        block->_reg_pressure = pressure[0];
  89.441 +      }
  89.442      }
  89.443 -    if( pressure[1] > (uint)FLOATPRESSURE ) {
  89.444 +    if (pressure[1] > (uint)FLOATPRESSURE) {
  89.445        hrp_index[1] = 0;
  89.446 -      if( pressure[1] > b->_freg_pressure )
  89.447 -        b->_freg_pressure = pressure[1];
  89.448 +      if (pressure[1] > block->_freg_pressure) {
  89.449 +        block->_freg_pressure = pressure[1];
  89.450 +      }
  89.451      }
  89.452  
  89.453      // Compute high pressure indice; avoid landing in the middle of projnodes
  89.454      j = hrp_index[0];
  89.455 -    if( j < b->_nodes.size() && j < b->end_idx()+1 ) {
  89.456 -      Node *cur = b->_nodes[j];
  89.457 -      while( cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch() ) {
  89.458 +    if (j < block->_nodes.size() && j < block->end_idx() + 1) {
  89.459 +      Node* cur = block->_nodes[j];
  89.460 +      while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
  89.461          j--;
  89.462 -        cur = b->_nodes[j];
  89.463 +        cur = block->_nodes[j];
  89.464        }
  89.465      }
  89.466 -    b->_ihrp_index = j;
  89.467 +    block->_ihrp_index = j;
  89.468      j = hrp_index[1];
  89.469 -    if( j < b->_nodes.size() && j < b->end_idx()+1 ) {
  89.470 -      Node *cur = b->_nodes[j];
  89.471 -      while( cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch() ) {
  89.472 +    if (j < block->_nodes.size() && j < block->end_idx() + 1) {
  89.473 +      Node* cur = block->_nodes[j];
  89.474 +      while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
  89.475          j--;
  89.476 -        cur = b->_nodes[j];
  89.477 +        cur = block->_nodes[j];
  89.478        }
  89.479      }
  89.480 -    b->_fhrp_index = j;
  89.481 +    block->_fhrp_index = j;
  89.482  
  89.483  #ifndef PRODUCT
  89.484      // Gather Register Pressure Statistics
  89.485      if( PrintOptoStatistics ) {
  89.486 -      if( b->_reg_pressure > (uint)INTPRESSURE || b->_freg_pressure > (uint)FLOATPRESSURE )
  89.487 +      if (block->_reg_pressure > (uint)INTPRESSURE || block->_freg_pressure > (uint)FLOATPRESSURE) {
  89.488          _high_pressure++;
  89.489 -      else
  89.490 +      } else {
  89.491          _low_pressure++;
  89.492 +      }
  89.493      }
  89.494  #endif
  89.495    } // End of for all blocks
    90.1 --- a/src/share/vm/opto/lcm.cpp	Fri Aug 23 22:12:18 2013 +0100
    90.2 +++ b/src/share/vm/opto/lcm.cpp	Fri Aug 30 09:50:49 2013 +0100
    90.3 @@ -501,7 +501,7 @@
    90.4        n_choice = 1;
    90.5      }
    90.6  
    90.7 -    uint n_latency = cfg->_node_latency->at_grow(n->_idx);
    90.8 +    uint n_latency = cfg->get_latency_for_node(n);
    90.9      uint n_score   = n->req();   // Many inputs get high score to break ties
   90.10  
   90.11      // Keep best latency found
   90.12 @@ -797,7 +797,7 @@
   90.13          Node     *n = _nodes[j];
   90.14          int     idx = n->_idx;
   90.15          tty->print("#   ready cnt:%3d  ", ready_cnt.at(idx));
   90.16 -        tty->print("latency:%3d  ", cfg->_node_latency->at_grow(idx));
   90.17 +        tty->print("latency:%3d  ", cfg->get_latency_for_node(n));
   90.18          tty->print("%4d: %s\n", idx, n->Name());
   90.19        }
   90.20      }
   90.21 @@ -825,7 +825,7 @@
   90.22  #ifndef PRODUCT
   90.23      if (cfg->trace_opto_pipelining()) {
   90.24        tty->print("#    select %d: %s", n->_idx, n->Name());
   90.25 -      tty->print(", latency:%d", cfg->_node_latency->at_grow(n->_idx));
   90.26 +      tty->print(", latency:%d", cfg->get_latency_for_node(n));
   90.27        n->dump();
   90.28        if (Verbose) {
   90.29          tty->print("#   ready list:");
    91.1 --- a/src/share/vm/opto/library_call.cpp	Fri Aug 23 22:12:18 2013 +0100
    91.2 +++ b/src/share/vm/opto/library_call.cpp	Fri Aug 30 09:50:49 2013 +0100
    91.3 @@ -213,6 +213,7 @@
    91.4    void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
    91.5    bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
    91.6    bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
    91.7 +  static bool klass_needs_init_guard(Node* kls);
    91.8    bool inline_unsafe_allocate();
    91.9    bool inline_unsafe_copyMemory();
   91.10    bool inline_native_currentThread();
   91.11 @@ -2892,8 +2893,21 @@
   91.12    }
   91.13  }
   91.14  
   91.15 +bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
   91.16 +  if (!kls->is_Con()) {
   91.17 +    return true;
   91.18 +  }
   91.19 +  const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr();
   91.20 +  if (klsptr == NULL) {
   91.21 +    return true;
   91.22 +  }
   91.23 +  ciInstanceKlass* ik = klsptr->klass()->as_instance_klass();
   91.24 +  // don't need a guard for a klass that is already initialized
   91.25 +  return !ik->is_initialized();
   91.26 +}
   91.27 +
   91.28  //----------------------------inline_unsafe_allocate---------------------------
   91.29 -// public native Object sun.mics.Unsafe.allocateInstance(Class<?> cls);
   91.30 +// public native Object sun.misc.Unsafe.allocateInstance(Class<?> cls);
   91.31  bool LibraryCallKit::inline_unsafe_allocate() {
   91.32    if (callee()->is_static())  return false;  // caller must have the capability!
   91.33  
   91.34 @@ -2905,16 +2919,19 @@
   91.35    kls = null_check(kls);
   91.36    if (stopped())  return true;  // argument was like int.class
   91.37  
   91.38 -  // Note:  The argument might still be an illegal value like
   91.39 -  // Serializable.class or Object[].class.   The runtime will handle it.
   91.40 -  // But we must make an explicit check for initialization.
   91.41 -  Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
   91.42 -  // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
   91.43 -  // can generate code to load it as unsigned byte.
   91.44 -  Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN);
   91.45 -  Node* bits = intcon(InstanceKlass::fully_initialized);
   91.46 -  Node* test = _gvn.transform(new (C) SubINode(inst, bits));
   91.47 -  // The 'test' is non-zero if we need to take a slow path.
   91.48 +  Node* test = NULL;
   91.49 +  if (LibraryCallKit::klass_needs_init_guard(kls)) {
   91.50 +    // Note:  The argument might still be an illegal value like
   91.51 +    // Serializable.class or Object[].class.   The runtime will handle it.
   91.52 +    // But we must make an explicit check for initialization.
   91.53 +    Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
   91.54 +    // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
   91.55 +    // can generate code to load it as unsigned byte.
   91.56 +    Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN);
   91.57 +    Node* bits = intcon(InstanceKlass::fully_initialized);
   91.58 +    test = _gvn.transform(new (C) SubINode(inst, bits));
   91.59 +    // The 'test' is non-zero if we need to take a slow path.
   91.60 +  }
   91.61  
   91.62    Node* obj = new_instance(kls, test);
   91.63    set_result(obj);
    92.1 --- a/src/share/vm/opto/live.cpp	Fri Aug 23 22:12:18 2013 +0100
    92.2 +++ b/src/share/vm/opto/live.cpp	Fri Aug 30 09:50:49 2013 +0100
    92.3 @@ -30,9 +30,6 @@
    92.4  #include "opto/machnode.hpp"
    92.5  
    92.6  
    92.7 -
    92.8 -//=============================================================================
    92.9 -//------------------------------PhaseLive--------------------------------------
   92.10  // Compute live-in/live-out.  We use a totally incremental algorithm.  The LIVE
   92.11  // problem is monotonic.  The steady-state solution looks like this: pull a
   92.12  // block from the worklist.  It has a set of delta's - values which are newly
   92.13 @@ -53,9 +50,9 @@
   92.14  
   92.15    // Init the sparse live arrays.  This data is live on exit from here!
   92.16    // The _live info is the live-out info.
   92.17 -  _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*_cfg._num_blocks);
   92.18 +  _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet) * _cfg.number_of_blocks());
   92.19    uint i;
   92.20 -  for( i=0; i<_cfg._num_blocks; i++ ) {
   92.21 +  for (i = 0; i < _cfg.number_of_blocks(); i++) {
   92.22      _live[i].initialize(_maxlrg);
   92.23    }
   92.24  
   92.25 @@ -65,14 +62,14 @@
   92.26    // Does the memory used by _defs and _deltas get reclaimed?  Does it matter?  TT
   92.27  
   92.28    // Array of values defined locally in blocks
   92.29 -  _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg._num_blocks);
   92.30 -  for( i=0; i<_cfg._num_blocks; i++ ) {
   92.31 +  _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg.number_of_blocks());
   92.32 +  for (i = 0; i < _cfg.number_of_blocks(); i++) {
   92.33      _defs[i].initialize(_maxlrg);
   92.34    }
   92.35  
   92.36    // Array of delta-set pointers, indexed by block pre_order-1.
   92.37 -  _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg._num_blocks);
   92.38 -  memset( _deltas, 0, sizeof(IndexSet*)* _cfg._num_blocks);
   92.39 +  _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg.number_of_blocks());
   92.40 +  memset( _deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks());
   92.41  
   92.42    _free_IndexSet = NULL;
   92.43  
   92.44 @@ -80,31 +77,32 @@
   92.45    VectorSet first_pass(Thread::current()->resource_area());
   92.46  
   92.47    // Outer loop: must compute local live-in sets and push into predecessors.
   92.48 -  uint iters = _cfg._num_blocks;        // stat counters
   92.49 -  for( uint j=_cfg._num_blocks; j>0; j-- ) {
   92.50 -    Block *b = _cfg._blocks[j-1];
   92.51 +  for (uint j = _cfg.number_of_blocks(); j > 0; j--) {
   92.52 +    Block* block = _cfg.get_block(j - 1);
   92.53  
   92.54      // Compute the local live-in set.  Start with any new live-out bits.
   92.55 -    IndexSet *use = getset( b );
   92.56 -    IndexSet *def = &_defs[b->_pre_order-1];
   92.57 +    IndexSet* use = getset(block);
   92.58 +    IndexSet* def = &_defs[block->_pre_order-1];
   92.59      DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
   92.60      uint i;
   92.61 -    for( i=b->_nodes.size(); i>1; i-- ) {
   92.62 -      Node *n = b->_nodes[i-1];
   92.63 -      if( n->is_Phi() ) break;
   92.64 +    for (i = block->_nodes.size(); i > 1; i--) {
   92.65 +      Node* n = block->_nodes[i-1];
   92.66 +      if (n->is_Phi()) {
   92.67 +        break;
   92.68 +      }
   92.69  
   92.70        uint r = _names[n->_idx];
   92.71        assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
   92.72        def->insert( r );
   92.73        use->remove( r );
   92.74        uint cnt = n->req();
   92.75 -      for( uint k=1; k<cnt; k++ ) {
   92.76 +      for (uint k = 1; k < cnt; k++) {
   92.77          Node *nk = n->in(k);
   92.78          uint nkidx = nk->_idx;
   92.79 -        if (_cfg.get_block_for_node(nk) != b) {
   92.80 +        if (_cfg.get_block_for_node(nk) != block) {
   92.81            uint u = _names[nkidx];
   92.82 -          use->insert( u );
   92.83 -          DEBUG_ONLY(def_outside->insert( u );)
   92.84 +          use->insert(u);
   92.85 +          DEBUG_ONLY(def_outside->insert(u);)
   92.86          }
   92.87        }
   92.88      }
   92.89 @@ -113,41 +111,38 @@
   92.90      _free_IndexSet = def_outside;     // Drop onto free list
   92.91  #endif
   92.92      // Remove anything defined by Phis and the block start instruction
   92.93 -    for( uint k=i; k>0; k-- ) {
   92.94 -      uint r = _names[b->_nodes[k-1]->_idx];
   92.95 -      def->insert( r );
   92.96 -      use->remove( r );
   92.97 +    for (uint k = i; k > 0; k--) {
   92.98 +      uint r = _names[block->_nodes[k - 1]->_idx];
   92.99 +      def->insert(r);
  92.100 +      use->remove(r);
  92.101      }
  92.102  
  92.103      // Push these live-in things to predecessors
  92.104 -    for( uint l=1; l<b->num_preds(); l++ ) {
  92.105 -      Block *p = _cfg.get_block_for_node(b->pred(l));
  92.106 -      add_liveout( p, use, first_pass );
  92.107 +    for (uint l = 1; l < block->num_preds(); l++) {
  92.108 +      Block* p = _cfg.get_block_for_node(block->pred(l));
  92.109 +      add_liveout(p, use, first_pass);
  92.110  
  92.111        // PhiNode uses go in the live-out set of prior blocks.
  92.112 -      for( uint k=i; k>0; k-- )
  92.113 -        add_liveout( p, _names[b->_nodes[k-1]->in(l)->_idx], first_pass );
  92.114 +      for (uint k = i; k > 0; k--) {
  92.115 +        add_liveout(p, _names[block->_nodes[k-1]->in(l)->_idx], first_pass);
  92.116 +      }
  92.117      }
  92.118 -    freeset( b );
  92.119 -    first_pass.set(b->_pre_order);
  92.120 +    freeset(block);
  92.121 +    first_pass.set(block->_pre_order);
  92.122  
  92.123      // Inner loop: blocks that picked up new live-out values to be propagated
  92.124 -    while( _worklist->size() ) {
  92.125 -        // !!!!!
  92.126 -// #ifdef ASSERT
  92.127 -      iters++;
  92.128 -// #endif
  92.129 -      Block *b = _worklist->pop();
  92.130 -      IndexSet *delta = getset(b);
  92.131 +    while (_worklist->size()) {
  92.132 +      Block* block = _worklist->pop();
  92.133 +      IndexSet *delta = getset(block);
  92.134        assert( delta->count(), "missing delta set" );
  92.135  
  92.136        // Add new-live-in to predecessors live-out sets
  92.137 -      for (uint l = 1; l < b->num_preds(); l++) {
  92.138 -        Block* block = _cfg.get_block_for_node(b->pred(l));
  92.139 -        add_liveout(block, delta, first_pass);
  92.140 +      for (uint l = 1; l < block->num_preds(); l++) {
  92.141 +        Block* predecessor = _cfg.get_block_for_node(block->pred(l));
  92.142 +        add_liveout(predecessor, delta, first_pass);
  92.143        }
  92.144  
  92.145 -      freeset(b);
  92.146 +      freeset(block);
  92.147      } // End of while-worklist-not-empty
  92.148  
  92.149    } // End of for-all-blocks-outer-loop
  92.150 @@ -155,7 +150,7 @@
  92.151    // We explicitly clear all of the IndexSets which we are about to release.
  92.152    // This allows us to recycle their internal memory into IndexSet's free list.
  92.153  
  92.154 -  for( i=0; i<_cfg._num_blocks; i++ ) {
  92.155 +  for (i = 0; i < _cfg.number_of_blocks(); i++) {
  92.156      _defs[i].clear();
  92.157      if (_deltas[i]) {
  92.158        // Is this always true?
  92.159 @@ -171,13 +166,11 @@
  92.160  
  92.161  }
  92.162  
  92.163 -//------------------------------stats------------------------------------------
  92.164  #ifndef PRODUCT
  92.165  void PhaseLive::stats(uint iters) const {
  92.166  }
  92.167  #endif
  92.168  
  92.169 -//------------------------------getset-----------------------------------------
  92.170  // Get an IndexSet for a block.  Return existing one, if any.  Make a new
  92.171  // empty one if a prior one does not exist.
  92.172  IndexSet *PhaseLive::getset( Block *p ) {
  92.173 @@ -188,7 +181,6 @@
  92.174    return delta;                 // Return set of new live-out items
  92.175  }
  92.176  
  92.177 -//------------------------------getfreeset-------------------------------------
  92.178  // Pull from free list, or allocate.  Internal allocation on the returned set
  92.179  // is always from thread local storage.
  92.180  IndexSet *PhaseLive::getfreeset( ) {
  92.181 @@ -207,7 +199,6 @@
  92.182    return f;
  92.183  }
  92.184  
  92.185 -//------------------------------freeset----------------------------------------
  92.186  // Free an IndexSet from a block.
  92.187  void PhaseLive::freeset( const Block *p ) {
  92.188    IndexSet *f = _deltas[p->_pre_order-1];
  92.189 @@ -216,7 +207,6 @@
  92.190    _deltas[p->_pre_order-1] = NULL;
  92.191  }
  92.192  
  92.193 -//------------------------------add_liveout------------------------------------
  92.194  // Add a live-out value to a given blocks live-out set.  If it is new, then
  92.195  // also add it to the delta set and stick the block on the worklist.
  92.196  void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) {
  92.197 @@ -233,8 +223,6 @@
  92.198    }
  92.199  }
  92.200  
  92.201 -
  92.202 -//------------------------------add_liveout------------------------------------
  92.203  // Add a vector of live-out values to a given blocks live-out set.
  92.204  void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
  92.205    IndexSet *live = &_live[p->_pre_order-1];
  92.206 @@ -262,7 +250,6 @@
  92.207  }
  92.208  
  92.209  #ifndef PRODUCT
  92.210 -//------------------------------dump-------------------------------------------
  92.211  // Dump the live-out set for a block
  92.212  void PhaseLive::dump( const Block *b ) const {
  92.213    tty->print("Block %d: ",b->_pre_order);
  92.214 @@ -275,18 +262,19 @@
  92.215    tty->print("\n");
  92.216  }
  92.217  
  92.218 -//------------------------------verify_base_ptrs-------------------------------
  92.219  // Verify that base pointers and derived pointers are still sane.
  92.220  void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
  92.221  #ifdef ASSERT
  92.222    Unique_Node_List worklist(a);
  92.223 -  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
  92.224 -    Block *b = _cfg._blocks[i];
  92.225 -    for( uint j = b->end_idx() + 1; j > 1; j-- ) {
  92.226 -      Node *n = b->_nodes[j-1];
  92.227 -      if( n->is_Phi() ) break;
  92.228 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
  92.229 +    Block* block = _cfg.get_block(i);
  92.230 +    for (uint j = block->end_idx() + 1; j > 1; j--) {
  92.231 +      Node* n = block->_nodes[j-1];
  92.232 +      if (n->is_Phi()) {
  92.233 +        break;
  92.234 +      }
  92.235        // Found a safepoint?
  92.236 -      if( n->is_MachSafePoint() ) {
  92.237 +      if (n->is_MachSafePoint()) {
  92.238          MachSafePointNode *sfpt = n->as_MachSafePoint();
  92.239          JVMState* jvms = sfpt->jvms();
  92.240          if (jvms != NULL) {
  92.241 @@ -358,7 +346,6 @@
  92.242  #endif
  92.243  }
  92.244  
  92.245 -//------------------------------verify-------------------------------------
  92.246  // Verify that graphs and base pointers are still sane.
  92.247  void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const {
  92.248  #ifdef ASSERT
    93.1 --- a/src/share/vm/opto/matcher.cpp	Fri Aug 23 22:12:18 2013 +0100
    93.2 +++ b/src/share/vm/opto/matcher.cpp	Fri Aug 30 09:50:49 2013 +0100
    93.3 @@ -67,8 +67,8 @@
    93.4  const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
    93.5  
    93.6  //---------------------------Matcher-------------------------------------------
    93.7 -Matcher::Matcher( Node_List &proj_list ) :
    93.8 -  PhaseTransform( Phase::Ins_Select ),
    93.9 +Matcher::Matcher()
   93.10 +: PhaseTransform( Phase::Ins_Select ),
   93.11  #ifdef ASSERT
   93.12    _old2new_map(C->comp_arena()),
   93.13    _new2old_map(C->comp_arena()),
   93.14 @@ -78,7 +78,7 @@
   93.15    _swallowed(swallowed),
   93.16    _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
   93.17    _end_inst_chain_rule(_END_INST_CHAIN_RULE),
   93.18 -  _must_clone(must_clone), _proj_list(proj_list),
   93.19 +  _must_clone(must_clone),
   93.20    _register_save_policy(register_save_policy),
   93.21    _c_reg_save_policy(c_reg_save_policy),
   93.22    _register_save_type(register_save_type),
   93.23 @@ -1304,8 +1304,9 @@
   93.24        for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
   93.25          proj->_rout.Insert(OptoReg::Name(i));
   93.26      }
   93.27 -    if( proj->_rout.is_NotEmpty() )
   93.28 -      _proj_list.push(proj);
   93.29 +    if (proj->_rout.is_NotEmpty()) {
   93.30 +      push_projection(proj);
   93.31 +    }
   93.32    }
   93.33    // Transfer the safepoint information from the call to the mcall
   93.34    // Move the JVMState list
   93.35 @@ -1685,14 +1686,15 @@
   93.36    }
   93.37  
   93.38    // If the _leaf is an AddP, insert the base edge
   93.39 -  if( leaf->is_AddP() )
   93.40 +  if (leaf->is_AddP()) {
   93.41      mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
   93.42 +  }
   93.43  
   93.44 -  uint num_proj = _proj_list.size();
   93.45 +  uint number_of_projections_prior = number_of_projections();
   93.46  
   93.47    // Perform any 1-to-many expansions required
   93.48 -  MachNode *ex = mach->Expand(s,_proj_list, mem);
   93.49 -  if( ex != mach ) {
   93.50 +  MachNode *ex = mach->Expand(s, _projection_list, mem);
   93.51 +  if (ex != mach) {
   93.52      assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
   93.53      if( ex->in(1)->is_Con() )
   93.54        ex->in(1)->set_req(0, C->root());
   93.55 @@ -1713,7 +1715,7 @@
   93.56    // generated belatedly during spill code generation.
   93.57    if (_allocation_started) {
   93.58      guarantee(ex == mach, "no expand rules during spill generation");
   93.59 -    guarantee(_proj_list.size() == num_proj, "no allocation during spill generation");
   93.60 +    guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
   93.61    }
   93.62  
   93.63    if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
    94.1 --- a/src/share/vm/opto/matcher.hpp	Fri Aug 23 22:12:18 2013 +0100
    94.2 +++ b/src/share/vm/opto/matcher.hpp	Fri Aug 30 09:50:49 2013 +0100
    94.3 @@ -88,7 +88,7 @@
    94.4  
    94.5    Node *transform( Node *dummy );
    94.6  
    94.7 -  Node_List &_proj_list;        // For Machine nodes killing many values
    94.8 +  Node_List _projection_list;        // For Machine nodes killing many values
    94.9  
   94.10    Node_Array _shared_nodes;
   94.11  
   94.12 @@ -183,10 +183,30 @@
   94.13    void collect_null_checks( Node *proj, Node *orig_proj );
   94.14    void validate_null_checks( );
   94.15  
   94.16 -  Matcher( Node_List &proj_list );
   94.17 +  Matcher();
   94.18 +
   94.19 +  // Get a projection node at position pos
   94.20 +  Node* get_projection(uint pos) {
   94.21 +    return _projection_list[pos];
   94.22 +  }
   94.23 +
   94.24 +  // Push a projection node onto the projection list
   94.25 +  void push_projection(Node* node) {
   94.26 +    _projection_list.push(node);
   94.27 +  }
   94.28 +
   94.29 +  Node* pop_projection() {
   94.30 +    return _projection_list.pop();
   94.31 +  }
   94.32 +
   94.33 +  // Number of nodes in the projection list
   94.34 +  uint number_of_projections() const {
   94.35 +    return _projection_list.size();
   94.36 +  }
   94.37  
   94.38    // Select instructions for entire method
   94.39 -  void  match( );
   94.40 +  void match();
   94.41 +
   94.42    // Helper for match
   94.43    OptoReg::Name warp_incoming_stk_arg( VMReg reg );
   94.44  
    95.1 --- a/src/share/vm/opto/output.cpp	Fri Aug 23 22:12:18 2013 +0100
    95.2 +++ b/src/share/vm/opto/output.cpp	Fri Aug 30 09:50:49 2013 +0100
    95.3 @@ -54,11 +54,10 @@
    95.4  extern int emit_exception_handler(CodeBuffer &cbuf);
    95.5  extern int emit_deopt_handler(CodeBuffer &cbuf);
    95.6  
    95.7 -//------------------------------Output-----------------------------------------
    95.8  // Convert Nodes to instruction bits and pass off to the VM
    95.9  void Compile::Output() {
   95.10    // RootNode goes
   95.11 -  assert( _cfg->_broot->_nodes.size() == 0, "" );
   95.12 +  assert( _cfg->get_root_block()->_nodes.size() == 0, "" );
   95.13  
   95.14    // The number of new nodes (mostly MachNop) is proportional to
   95.15    // the number of java calls and inner loops which are aligned.
   95.16 @@ -68,8 +67,8 @@
   95.17      return;
   95.18    }
   95.19    // Make sure I can find the Start Node
   95.20 -  Block *entry = _cfg->_blocks[1];
   95.21 -  Block *broot = _cfg->_broot;
   95.22 +  Block *entry = _cfg->get_block(1);
   95.23 +  Block *broot = _cfg->get_root_block();
   95.24  
   95.25    const StartNode *start = entry->_nodes[0]->as_Start();
   95.26  
   95.27 @@ -109,40 +108,44 @@
   95.28    }
   95.29  
   95.30    // Insert epilogs before every return
   95.31 -  for( uint i=0; i<_cfg->_num_blocks; i++ ) {
   95.32 -    Block *b = _cfg->_blocks[i];
   95.33 -    if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point?
   95.34 -      Node *m = b->end();
   95.35 -      if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
   95.36 -        MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
   95.37 -        b->add_inst( epilog );
   95.38 -        _cfg->map_node_to_block(epilog, b);
   95.39 +  for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
   95.40 +    Block* block = _cfg->get_block(i);
   95.41 +    if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
   95.42 +      Node* m = block->end();
   95.43 +      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
   95.44 +        MachEpilogNode* epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
   95.45 +        block->add_inst(epilog);
   95.46 +        _cfg->map_node_to_block(epilog, block);
   95.47        }
   95.48      }
   95.49    }
   95.50  
   95.51  # ifdef ENABLE_ZAP_DEAD_LOCALS
   95.52 -  if ( ZapDeadCompiledLocals )  Insert_zap_nodes();
   95.53 +  if (ZapDeadCompiledLocals) {
   95.54 +    Insert_zap_nodes();
   95.55 +  }
   95.56  # endif
   95.57  
   95.58 -  uint* blk_starts = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks+1);
   95.59 -  blk_starts[0]    = 0;
   95.60 +  uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
   95.61 +  blk_starts[0] = 0;
   95.62  
   95.63    // Initialize code buffer and process short branches.
   95.64    CodeBuffer* cb = init_buffer(blk_starts);
   95.65  
   95.66 -  if (cb == NULL || failing())  return;
   95.67 +  if (cb == NULL || failing()) {
   95.68 +    return;
   95.69 +  }
   95.70  
   95.71    ScheduleAndBundle();
   95.72  
   95.73  #ifndef PRODUCT
   95.74    if (trace_opto_output()) {
   95.75      tty->print("\n---- After ScheduleAndBundle ----\n");
   95.76 -    for (uint i = 0; i < _cfg->_num_blocks; i++) {
   95.77 +    for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
   95.78        tty->print("\nBB#%03d:\n", i);
   95.79 -      Block *bb = _cfg->_blocks[i];
   95.80 -      for (uint j = 0; j < bb->_nodes.size(); j++) {
   95.81 -        Node *n = bb->_nodes[j];
   95.82 +      Block* block = _cfg->get_block(i);
   95.83 +      for (uint j = 0; j < block->_nodes.size(); j++) {
   95.84 +        Node* n = block->_nodes[j];
   95.85          OptoReg::Name reg = _regalloc->get_reg_first(n);
   95.86          tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
   95.87          n->dump();
   95.88 @@ -151,11 +154,15 @@
   95.89    }
   95.90  #endif
   95.91  
   95.92 -  if (failing())  return;
   95.93 +  if (failing()) {
   95.94 +    return;
   95.95 +  }
   95.96  
   95.97    BuildOopMaps();
   95.98  
   95.99 -  if (failing())  return;
  95.100 +  if (failing())  {
  95.101 +    return;
  95.102 +  }
  95.103  
  95.104    fill_buffer(cb, blk_starts);
  95.105  }
  95.106 @@ -217,8 +224,8 @@
  95.107      return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
  95.108  
  95.109    // Insert call to zap runtime stub before every node with an oop map
  95.110 -  for( uint i=0; i<_cfg->_num_blocks; i++ ) {
  95.111 -    Block *b = _cfg->_blocks[i];
  95.112 +  for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
  95.113 +    Block *b = _cfg->get_block(i);
  95.114      for ( uint j = 0;  j < b->_nodes.size();  ++j ) {
  95.115        Node *n = b->_nodes[j];
  95.116  
  95.117 @@ -275,7 +282,6 @@
  95.118    return _matcher->match_sfpt(ideal_node);
  95.119  }
  95.120  
  95.121 -//------------------------------is_node_getting_a_safepoint--------------------
  95.122  bool Compile::is_node_getting_a_safepoint( Node* n) {
  95.123    // This code duplicates the logic prior to the call of add_safepoint
  95.124    // below in this file.
  95.125 @@ -285,7 +291,6 @@
  95.126  
  95.127  # endif // ENABLE_ZAP_DEAD_LOCALS
  95.128  
  95.129 -//------------------------------compute_loop_first_inst_sizes------------------
  95.130  // Compute the size of first NumberOfLoopInstrToAlign instructions at the top
  95.131  // of a loop. When aligning a loop we need to provide enough instructions
  95.132  // in cpu's fetch buffer to feed decoders. The loop alignment could be
  95.133 @@ -302,42 +307,39 @@
  95.134    // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
  95.135    // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
  95.136    // equal to 11 bytes which is the largest address NOP instruction.
  95.137 -  if( MaxLoopPad < OptoLoopAlignment-1 ) {
  95.138 -    uint last_block = _cfg->_num_blocks-1;
  95.139 -    for( uint i=1; i <= last_block; i++ ) {
  95.140 -      Block *b = _cfg->_blocks[i];
  95.141 +  if (MaxLoopPad < OptoLoopAlignment - 1) {
  95.142 +    uint last_block = _cfg->number_of_blocks() - 1;
  95.143 +    for (uint i = 1; i <= last_block; i++) {
  95.144 +      Block* block = _cfg->get_block(i);
  95.145        // Check the first loop's block which requires an alignment.
  95.146 -      if( b->loop_alignment() > (uint)relocInfo::addr_unit() ) {
  95.147 +      if (block->loop_alignment() > (uint)relocInfo::addr_unit()) {
  95.148          uint sum_size = 0;
  95.149          uint inst_cnt = NumberOfLoopInstrToAlign;
  95.150 -        inst_cnt = b->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
  95.151 +        inst_cnt = block->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
  95.152  
  95.153          // Check subsequent fallthrough blocks if the loop's first
  95.154          // block(s) does not have enough instructions.
  95.155 -        Block *nb = b;
  95.156 -        while( inst_cnt > 0 &&
  95.157 -               i < last_block &&
  95.158 -               !_cfg->_blocks[i+1]->has_loop_alignment() &&
  95.159 -               !nb->has_successor(b) ) {
  95.160 +        Block *nb = block;
  95.161 +        while(inst_cnt > 0 &&
  95.162 +              i < last_block &&
  95.163 +              !_cfg->get_block(i + 1)->has_loop_alignment() &&
  95.164 +              !nb->has_successor(block)) {
  95.165            i++;
  95.166 -          nb = _cfg->_blocks[i];
  95.167 +          nb = _cfg->get_block(i);
  95.168            inst_cnt  = nb->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
  95.169          } // while( inst_cnt > 0 && i < last_block  )
  95.170  
  95.171 -        b->set_first_inst_size(sum_size);
  95.172 +        block->set_first_inst_size(sum_size);
  95.173        } // f( b->head()->is_Loop() )
  95.174      } // for( i <= last_block )
  95.175    } // if( MaxLoopPad < OptoLoopAlignment-1 )
  95.176  }
  95.177  
  95.178 -//----------------------shorten_branches---------------------------------------
  95.179  // The architecture description provides short branch variants for some long
  95.180  // branch instructions. Replace eligible long branches with short branches.
  95.181  void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size) {
  95.182 -
  95.183 -  // ------------------
  95.184    // Compute size of each block, method size, and relocation information size
  95.185 -  uint nblocks  = _cfg->_num_blocks;
  95.186 +  uint nblocks  = _cfg->number_of_blocks();
  95.187  
  95.188    uint*      jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
  95.189    uint*      jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
  95.190 @@ -364,7 +366,7 @@
  95.191    uint last_avoid_back_to_back_adr = max_uint;
  95.192    uint nop_size = (new (this) MachNopNode())->size(_regalloc);
  95.193    for (uint i = 0; i < nblocks; i++) { // For all blocks
  95.194 -    Block *b = _cfg->_blocks[i];
  95.195 +    Block* block = _cfg->get_block(i);
  95.196  
  95.197      // During short branch replacement, we store the relative (to blk_starts)
  95.198      // offset of jump in jmp_offset, rather than the absolute offset of jump.
  95.199 @@ -377,10 +379,10 @@
  95.200      DEBUG_ONLY( jmp_rule[i]   = 0; )
  95.201  
  95.202      // Sum all instruction sizes to compute block size
  95.203 -    uint last_inst = b->_nodes.size();
  95.204 +    uint last_inst = block->_nodes.size();
  95.205      uint blk_size = 0;
  95.206      for (uint j = 0; j < last_inst; j++) {
  95.207 -      Node* nj = b->_nodes[j];
  95.208 +      Node* nj = block->_nodes[j];
  95.209        // Handle machine instruction nodes
  95.210        if (nj->is_Mach()) {
  95.211          MachNode *mach = nj->as_Mach();
  95.212 @@ -441,8 +443,8 @@
  95.213      // When the next block starts a loop, we may insert pad NOP
  95.214      // instructions.  Since we cannot know our future alignment,
  95.215      // assume the worst.
  95.216 -    if (i< nblocks-1) {
  95.217 -      Block *nb = _cfg->_blocks[i+1];
  95.218 +    if (i < nblocks - 1) {
  95.219 +      Block* nb = _cfg->get_block(i + 1);
  95.220        int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
  95.221        if (max_loop_pad > 0) {
  95.222          assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
  95.223 @@ -473,26 +475,26 @@
  95.224      has_short_branch_candidate = false;
  95.225      int adjust_block_start = 0;
  95.226      for (uint i = 0; i < nblocks; i++) {
  95.227 -      Block *b = _cfg->_blocks[i];
  95.228 +      Block* block = _cfg->get_block(i);
  95.229        int idx = jmp_nidx[i];
  95.230 -      MachNode* mach = (idx == -1) ? NULL: b->_nodes[idx]->as_Mach();
  95.231 +      MachNode* mach = (idx == -1) ? NULL: block->_nodes[idx]->as_Mach();
  95.232        if (mach != NULL && mach->may_be_short_branch()) {
  95.233  #ifdef ASSERT
  95.234          assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
  95.235          int j;
  95.236          // Find the branch; ignore trailing NOPs.
  95.237 -        for (j = b->_nodes.size()-1; j>=0; j--) {
  95.238 -          Node* n = b->_nodes[j];
  95.239 +        for (j = block->_nodes.size()-1; j>=0; j--) {
  95.240 +          Node* n = block->_nodes[j];
  95.241            if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
  95.242              break;
  95.243          }
  95.244 -        assert(j >= 0 && j == idx && b->_nodes[j] == (Node*)mach, "sanity");
  95.245 +        assert(j >= 0 && j == idx && block->_nodes[j] == (Node*)mach, "sanity");
  95.246  #endif
  95.247          int br_size = jmp_size[i];
  95.248          int br_offs = blk_starts[i] + jmp_offset[i];
  95.249  
  95.250          // This requires the TRUE branch target be in succs[0]
  95.251 -        uint bnum = b->non_connector_successor(0)->_pre_order;
  95.252 +        uint bnum = block->non_connector_successor(0)->_pre_order;
  95.253          int offset = blk_starts[bnum] - br_offs;
  95.254          if (bnum > i) { // adjust following block's offset
  95.255            offset -= adjust_block_start;
  95.256 @@ -520,7 +522,7 @@
  95.257              diff -= nop_size;
  95.258            }
  95.259            adjust_block_start += diff;
  95.260 -          b->_nodes.map(idx, replacement);
  95.261 +          block->_nodes.map(idx, replacement);
  95.262            mach->subsume_by(replacement, C);
  95.263            mach = replacement;
  95.264            progress = true;
  95.265 @@ -1083,8 +1085,8 @@
  95.266    if (has_mach_constant_base_node()) {
  95.267      // Fill the constant table.
  95.268      // Note:  This must happen before shorten_branches.
  95.269 -    for (uint i = 0; i < _cfg->_num_blocks; i++) {
  95.270 -      Block* b = _cfg->_blocks[i];
  95.271 +    for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
  95.272 +      Block* b = _cfg->get_block(i);
  95.273  
  95.274        for (uint j = 0; j < b->_nodes.size(); j++) {
  95.275          Node* n = b->_nodes[j];
  95.276 @@ -1170,7 +1172,7 @@
  95.277    // !!!!! This preserves old handling of oopmaps for now
  95.278    debug_info()->set_oopmaps(_oop_map_set);
  95.279  
  95.280 -  uint nblocks  = _cfg->_num_blocks;
  95.281 +  uint nblocks  = _cfg->number_of_blocks();
  95.282    // Count and start of implicit null check instructions
  95.283    uint inct_cnt = 0;
  95.284    uint *inct_starts = NEW_RESOURCE_ARRAY(uint, nblocks+1);
  95.285 @@ -1218,21 +1220,21 @@
  95.286    // Now fill in the code buffer
  95.287    Node *delay_slot = NULL;
  95.288  
  95.289 -  for (uint i=0; i < nblocks; i++) {
  95.290 -    Block *b = _cfg->_blocks[i];
  95.291 -
  95.292 -    Node *head = b->head();
  95.293 +  for (uint i = 0; i < nblocks; i++) {
  95.294 +    Block* block = _cfg->get_block(i);
  95.295 +    Node* head = block->head();
  95.296  
  95.297      // If this block needs to start aligned (i.e, can be reached other
  95.298      // than by falling-thru from the previous block), then force the
  95.299      // start of a new bundle.
  95.300 -    if (Pipeline::requires_bundling() && starts_bundle(head))
  95.301 +    if (Pipeline::requires_bundling() && starts_bundle(head)) {
  95.302        cb->flush_bundle(true);
  95.303 +    }
  95.304  
  95.305  #ifdef ASSERT
  95.306 -    if (!b->is_connector()) {
  95.307 +    if (!block->is_connector()) {
  95.308        stringStream st;
  95.309 -      b->dump_head(_cfg, &st);
  95.310 +      block->dump_head(_cfg, &st);
  95.311        MacroAssembler(cb).block_comment(st.as_string());
  95.312      }
  95.313      jmp_target[i] = 0;
  95.314 @@ -1243,16 +1245,16 @@
  95.315      int blk_offset = current_offset;
  95.316  
  95.317      // Define the label at the beginning of the basic block
  95.318 -    MacroAssembler(cb).bind(blk_labels[b->_pre_order]);
  95.319 -
  95.320 -    uint last_inst = b->_nodes.size();
  95.321 +    MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
  95.322 +
  95.323 +    uint last_inst = block->_nodes.size();
  95.324  
  95.325      // Emit block normally, except for last instruction.
  95.326      // Emit means "dump code bits into code buffer".
  95.327      for (uint j = 0; j<last_inst; j++) {
  95.328  
  95.329        // Get the node
  95.330 -      Node* n = b->_nodes[j];
  95.331 +      Node* n = block->_nodes[j];
  95.332  
  95.333        // See if delay slots are supported
  95.334        if (valid_bundle_info(n) &&
  95.335 @@ -1306,9 +1308,9 @@
  95.336            assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
  95.337            int nops_cnt = padding / nop_size;
  95.338            MachNode *nop = new (this) MachNopNode(nops_cnt);
  95.339 -          b->_nodes.insert(j++, nop);
  95.340 +          block->_nodes.insert(j++, nop);
  95.341            last_inst++;
  95.342 -          _cfg->map_node_to_block(nop, b);
  95.343 +          _cfg->map_node_to_block(nop, block);
  95.344            nop->emit(*cb, _regalloc);
  95.345            cb->flush_bundle(true);
  95.346            current_offset = cb->insts_size();
  95.347 @@ -1322,7 +1324,7 @@
  95.348            mcall->method_set((intptr_t)mcall->entry_point());
  95.349  
  95.350            // Save the return address
  95.351 -          call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset();
  95.352 +          call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
  95.353  
  95.354            if (mcall->is_MachCallLeaf()) {
  95.355              is_mcall = false;
  95.356 @@ -1359,7 +1361,7 @@
  95.357          // If this is a branch, then fill in the label with the target BB's label
  95.358          else if (mach->is_MachBranch()) {
  95.359            // This requires the TRUE branch target be in succs[0]
  95.360 -          uint block_num = b->non_connector_successor(0)->_pre_order;
  95.361 +          uint block_num = block->non_connector_successor(0)->_pre_order;
  95.362  
  95.363            // Try to replace long branch if delay slot is not used,
  95.364            // it is mostly for back branches since forward branch's
  95.365 @@ -1392,8 +1394,8 @@
  95.366                // Insert padding between avoid_back_to_back branches.
  95.367                if (needs_padding && replacement->avoid_back_to_back()) {
  95.368                  MachNode *nop = new (this) MachNopNode();
  95.369 -                b->_nodes.insert(j++, nop);
  95.370 -                _cfg->map_node_to_block(nop, b);
  95.371 +                block->_nodes.insert(j++, nop);
  95.372 +                _cfg->map_node_to_block(nop, block);
  95.373                  last_inst++;
  95.374                  nop->emit(*cb, _regalloc);
  95.375                  cb->flush_bundle(true);
  95.376 @@ -1405,7 +1407,7 @@
  95.377                jmp_size[i]   = new_size;
  95.378                jmp_rule[i]   = mach->rule();
  95.379  #endif
  95.380 -              b->_nodes.map(j, replacement);
  95.381 +              block->_nodes.map(j, replacement);
  95.382                mach->subsume_by(replacement, C);
  95.383                n    = replacement;
  95.384                mach = replacement;
  95.385 @@ -1413,8 +1415,8 @@
  95.386            }
  95.387            mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
  95.388          } else if (mach->ideal_Opcode() == Op_Jump) {
  95.389 -          for (uint h = 0; h < b->_num_succs; h++) {
  95.390 -            Block* succs_block = b->_succs[h];
  95.391 +          for (uint h = 0; h < block->_num_succs; h++) {
  95.392 +            Block* succs_block = block->_succs[h];
  95.393              for (uint j = 1; j < succs_block->num_preds(); j++) {
  95.394                Node* jpn = succs_block->pred(j);
  95.395                if (jpn->is_JumpProj() && jpn->in(0) == mach) {
  95.396 @@ -1425,7 +1427,6 @@
  95.397              }
  95.398            }
  95.399          }
  95.400 -
  95.401  #ifdef ASSERT
  95.402          // Check that oop-store precedes the card-mark
  95.403          else if (mach->ideal_Opcode() == Op_StoreCM) {
  95.404 @@ -1436,17 +1437,18 @@
  95.405              if (oop_store == NULL) continue;
  95.406              count++;
  95.407              uint i4;
  95.408 -            for( i4 = 0; i4 < last_inst; ++i4 ) {
  95.409 -              if( b->_nodes[i4] == oop_store ) break;
  95.410 +            for (i4 = 0; i4 < last_inst; ++i4) {
  95.411 +              if (block->_nodes[i4] == oop_store) {
  95.412 +                break;
  95.413 +              }
  95.414              }
  95.415              // Note: This test can provide a false failure if other precedence
  95.416              // edges have been added to the storeCMNode.
  95.417 -            assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
  95.418 +            assert(i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
  95.419            }
  95.420            assert(count > 0, "storeCM expects at least one precedence edge");
  95.421          }
  95.422  #endif
  95.423 -
  95.424          else if (!n->is_Proj()) {
  95.425            // Remember the beginning of the previous instruction, in case
  95.426            // it's followed by a flag-kill and a null-check.  Happens on
  95.427 @@ -1542,12 +1544,12 @@
  95.428      // If the next block is the top of a loop, pad this block out to align
  95.429      // the loop top a little. Helps prevent pipe stalls at loop back branches.
  95.430      if (i < nblocks-1) {
  95.431 -      Block *nb = _cfg->_blocks[i+1];
  95.432 +      Block *nb = _cfg->get_block(i + 1);
  95.433        int padding = nb->alignment_padding(current_offset);
  95.434        if( padding > 0 ) {
  95.435          MachNode *nop = new (this) MachNopNode(padding / nop_size);
  95.436 -        b->_nodes.insert( b->_nodes.size(), nop );
  95.437 -        _cfg->map_node_to_block(nop, b);
  95.438 +        block->_nodes.insert(block->_nodes.size(), nop);
  95.439 +        _cfg->map_node_to_block(nop, block);
  95.440          nop->emit(*cb, _regalloc);
  95.441          current_offset = cb->insts_size();
  95.442        }
  95.443 @@ -1587,8 +1589,6 @@
  95.444    }
  95.445  #endif
  95.446  
  95.447 -  // ------------------
  95.448 -
  95.449  #ifndef PRODUCT
  95.450    // Information on the size of the method, without the extraneous code
  95.451    Scheduling::increment_method_size(cb->insts_size());
  95.452 @@ -1649,52 +1649,55 @@
  95.453    _inc_table.set_size(cnt);
  95.454  
  95.455    uint inct_cnt = 0;
  95.456 -  for( uint i=0; i<_cfg->_num_blocks; i++ ) {
  95.457 -    Block *b = _cfg->_blocks[i];
  95.458 +  for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
  95.459 +    Block* block = _cfg->get_block(i);
  95.460      Node *n = NULL;
  95.461      int j;
  95.462  
  95.463      // Find the branch; ignore trailing NOPs.
  95.464 -    for( j = b->_nodes.size()-1; j>=0; j-- ) {
  95.465 -      n = b->_nodes[j];
  95.466 -      if( !n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con )
  95.467 +    for (j = block->_nodes.size() - 1; j >= 0; j--) {
  95.468 +      n = block->_nodes[j];
  95.469 +      if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
  95.470          break;
  95.471 +      }
  95.472      }
  95.473  
  95.474      // If we didn't find anything, continue
  95.475 -    if( j < 0 ) continue;
  95.476 +    if (j < 0) {
  95.477 +      continue;
  95.478 +    }
  95.479  
  95.480      // Compute ExceptionHandlerTable subtable entry and add it
  95.481      // (skip empty blocks)
  95.482 -    if( n->is_Catch() ) {
  95.483 +    if (n->is_Catch()) {
  95.484  
  95.485        // Get the offset of the return from the call
  95.486 -      uint call_return = call_returns[b->_pre_order];
  95.487 +      uint call_return = call_returns[block->_pre_order];
  95.488  #ifdef ASSERT
  95.489        assert( call_return > 0, "no call seen for this basic block" );
  95.490 -      while( b->_nodes[--j]->is_MachProj() ) ;
  95.491 -      assert( b->_nodes[j]->is_MachCall(), "CatchProj must follow call" );
  95.492 +      while (block->_nodes[--j]->is_MachProj()) ;
  95.493 +      assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
  95.494  #endif
  95.495        // last instruction is a CatchNode, find it's CatchProjNodes
  95.496 -      int nof_succs = b->_num_succs;
  95.497 +      int nof_succs = block->_num_succs;
  95.498        // allocate space
  95.499        GrowableArray<intptr_t> handler_bcis(nof_succs);
  95.500        GrowableArray<intptr_t> handler_pcos(nof_succs);
  95.501        // iterate through all successors
  95.502        for (int j = 0; j < nof_succs; j++) {
  95.503 -        Block* s = b->_succs[j];
  95.504 +        Block* s = block->_succs[j];
  95.505          bool found_p = false;
  95.506 -        for( uint k = 1; k < s->num_preds(); k++ ) {
  95.507 -          Node *pk = s->pred(k);
  95.508 -          if( pk->is_CatchProj() && pk->in(0) == n ) {
  95.509 +        for (uint k = 1; k < s->num_preds(); k++) {
  95.510 +          Node* pk = s->pred(k);
  95.511 +          if (pk->is_CatchProj() && pk->in(0) == n) {
  95.512              const CatchProjNode* p = pk->as_CatchProj();
  95.513              found_p = true;
  95.514              // add the corresponding handler bci & pco information
  95.515 -            if( p->_con != CatchProjNode::fall_through_index ) {
  95.516 +            if (p->_con != CatchProjNode::fall_through_index) {
  95.517                // p leads to an exception handler (and is not fall through)
  95.518 -              assert(s == _cfg->_blocks[s->_pre_order],"bad numbering");
  95.519 +              assert(s == _cfg->get_block(s->_pre_order), "bad numbering");
  95.520                // no duplicates, please
  95.521 -              if( !handler_bcis.contains(p->handler_bci()) ) {
  95.522 +              if (!handler_bcis.contains(p->handler_bci())) {
  95.523                  uint block_num = s->non_connector()->_pre_order;
  95.524                  handler_bcis.append(p->handler_bci());
  95.525                  handler_pcos.append(blk_labels[block_num].loc_pos());
  95.526 @@ -1713,9 +1716,9 @@
  95.527      }
  95.528  
  95.529      // Handle implicit null exception table updates
  95.530 -    if( n->is_MachNullCheck() ) {
  95.531 -      uint block_num = b->non_connector_successor(0)->_pre_order;
  95.532 -      _inc_table.append( inct_starts[inct_cnt++], blk_labels[block_num].loc_pos() );
  95.533 +    if (n->is_MachNullCheck()) {
  95.534 +      uint block_num = block->non_connector_successor(0)->_pre_order;
  95.535 +      _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
  95.536        continue;
  95.537      }
  95.538    } // End of for all blocks fill in exception table entries
  95.539 @@ -1774,14 +1777,12 @@
  95.540    memset(_current_latency,    0, node_max * sizeof(unsigned short));
  95.541  
  95.542    // Clear the bundling information
  95.543 -  memcpy(_bundle_use_elements,
  95.544 -    Pipeline_Use::elaborated_elements,
  95.545 -    sizeof(Pipeline_Use::elaborated_elements));
  95.546 +  memcpy(_bundle_use_elements, Pipeline_Use::elaborated_elements, sizeof(Pipeline_Use::elaborated_elements));
  95.547  
  95.548    // Get the last node
  95.549 -  Block *bb = _cfg->_blocks[_cfg->_blocks.size()-1];
  95.550 -
  95.551 -  _next_node = bb->_nodes[bb->_nodes.size()-1];
  95.552 +  Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
  95.553 +
  95.554 +  _next_node = block->_nodes[block->_nodes.size() - 1];
  95.555  }
  95.556  
  95.557  #ifndef PRODUCT
  95.558 @@ -1831,7 +1832,6 @@
  95.559      sizeof(Pipeline_Use::elaborated_elements));
  95.560  }
  95.561  
  95.562 -//------------------------------ScheduleAndBundle------------------------------
  95.563  // Perform instruction scheduling and bundling over the sequence of
  95.564  // instructions in backwards order.
  95.565  void Compile::ScheduleAndBundle() {
  95.566 @@ -1858,7 +1858,6 @@
  95.567    scheduling.DoScheduling();
  95.568  }
  95.569  
  95.570 -//------------------------------ComputeLocalLatenciesForward-------------------
  95.571  // Compute the latency of all the instructions.  This is fairly simple,
  95.572  // because we already have a legal ordering.  Walk over the instructions
  95.573  // from first to last, and compute the latency of the instruction based
  95.574 @@ -2028,7 +2027,6 @@
  95.575    return _available[0];
  95.576  }
  95.577  
  95.578 -//------------------------------AddNodeToAvailableList-------------------------
  95.579  void Scheduling::AddNodeToAvailableList(Node *n) {
  95.580    assert( !n->is_Proj(), "projections never directly made available" );
  95.581  #ifndef PRODUCT
  95.582 @@ -2074,7 +2072,6 @@
  95.583  #endif
  95.584  }
  95.585  
  95.586 -//------------------------------DecrementUseCounts-----------------------------
  95.587  void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
  95.588    for ( uint i=0; i < n->len(); i++ ) {
  95.589      Node *def = n->in(i);
  95.590 @@ -2097,7 +2094,6 @@
  95.591    }
  95.592  }
  95.593  
  95.594 -//------------------------------AddNodeToBundle--------------------------------
  95.595  void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
  95.596  #ifndef PRODUCT
  95.597    if (_cfg->C->trace_opto_output()) {
  95.598 @@ -2312,7 +2308,6 @@
  95.599    DecrementUseCounts(n,bb);
  95.600  }
  95.601  
  95.602 -//------------------------------ComputeUseCount--------------------------------
  95.603  // This method sets the use count within a basic block.  We will ignore all
  95.604  // uses outside the current basic block.  As we are doing a backwards walk,
  95.605  // any node we reach that has a use count of 0 may be scheduled.  This also
  95.606 @@ -2397,20 +2392,22 @@
  95.607    Block *bb;
  95.608  
  95.609    // Walk over all the basic blocks in reverse order
  95.610 -  for( int i=_cfg->_num_blocks-1; i >= 0; succ_bb = bb, i-- ) {
  95.611 -    bb = _cfg->_blocks[i];
  95.612 +  for (int i = _cfg->number_of_blocks() - 1; i >= 0; succ_bb = bb, i--) {
  95.613 +    bb = _cfg->get_block(i);
  95.614  
  95.615  #ifndef PRODUCT
  95.616      if (_cfg->C->trace_opto_output()) {
  95.617        tty->print("#  Schedule BB#%03d (initial)\n", i);
  95.618 -      for (uint j = 0; j < bb->_nodes.size(); j++)
  95.619 +      for (uint j = 0; j < bb->_nodes.size(); j++) {
  95.620          bb->_nodes[j]->dump();
  95.621 +      }
  95.622      }
  95.623  #endif
  95.624  
  95.625      // On the head node, skip processing
  95.626 -    if( bb == _cfg->_broot )
  95.627 +    if (bb == _cfg->get_root_block()) {
  95.628        continue;
  95.629 +    }
  95.630  
  95.631      // Skip empty, connector blocks
  95.632      if (bb->is_connector())
  95.633 @@ -2547,7 +2544,6 @@
  95.634  
  95.635  } // end DoScheduling
  95.636  
  95.637 -//------------------------------verify_good_schedule---------------------------
  95.638  // Verify that no live-range used in the block is killed in the block by a
  95.639  // wrong DEF.  This doesn't verify live-ranges that span blocks.
  95.640  
  95.641 @@ -2560,7 +2556,6 @@
  95.642  }
  95.643  
  95.644  #ifdef ASSERT
  95.645 -//------------------------------verify_do_def----------------------------------
  95.646  void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) {
  95.647    // Check for bad kills
  95.648    if( OptoReg::is_valid(def) ) { // Ignore stores & control flow
  95.649 @@ -2576,7 +2571,6 @@
  95.650    }
  95.651  }
  95.652  
  95.653 -//------------------------------verify_good_schedule---------------------------
  95.654  void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
  95.655  
  95.656    // Zap to something reasonable for the verify code
  95.657 @@ -2636,7 +2630,6 @@
  95.658      from->add_prec(to);
  95.659  }
  95.660  
  95.661 -//------------------------------anti_do_def------------------------------------
  95.662  void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) {
  95.663    if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow
  95.664      return;
  95.665 @@ -2706,7 +2699,6 @@
  95.666    add_prec_edge_from_to(kill,pinch);
  95.667  }
  95.668  
  95.669 -//------------------------------anti_do_use------------------------------------
  95.670  void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
  95.671    if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
  95.672      return;
  95.673 @@ -2727,7 +2719,6 @@
  95.674    }
  95.675  }
  95.676  
  95.677 -//------------------------------ComputeRegisterAntidependences-----------------
  95.678  // We insert antidependences between the reads and following write of
  95.679  // allocated registers to prevent illegal code motion. Hopefully, the
  95.680  // number of added references should be fairly small, especially as we
  95.681 @@ -2861,8 +2852,6 @@
  95.682    }
  95.683  }
  95.684  
  95.685 -//------------------------------garbage_collect_pinch_nodes-------------------------------
  95.686 -
  95.687  // Garbage collect pinch nodes for reuse by other blocks.
  95.688  //
  95.689  // The block scheduler's insertion of anti-dependence
  95.690 @@ -2937,7 +2926,6 @@
  95.691    pinch->set_req(0, NULL);
  95.692  }
  95.693  
  95.694 -//------------------------------print_statistics-------------------------------
  95.695  #ifndef PRODUCT
  95.696  
  95.697  void Scheduling::dump_available() const {
    96.1 --- a/src/share/vm/opto/phaseX.cpp	Fri Aug 23 22:12:18 2013 +0100
    96.2 +++ b/src/share/vm/opto/phaseX.cpp	Fri Aug 30 09:50:49 2013 +0100
    96.3 @@ -1643,8 +1643,8 @@
    96.4    bool method_name_not_printed = true;
    96.5  
    96.6    // Examine each basic block
    96.7 -  for( uint block_number = 1; block_number < _cfg._num_blocks; ++block_number ) {
    96.8 -    Block *block = _cfg._blocks[block_number];
    96.9 +  for (uint block_number = 1; block_number < _cfg.number_of_blocks(); ++block_number) {
   96.10 +    Block* block = _cfg.get_block(block_number);
   96.11      bool block_not_printed = true;
   96.12  
   96.13      // and each instruction within a block
    97.1 --- a/src/share/vm/opto/postaloc.cpp	Fri Aug 23 22:12:18 2013 +0100
    97.2 +++ b/src/share/vm/opto/postaloc.cpp	Fri Aug 30 09:50:49 2013 +0100
    97.3 @@ -405,28 +405,29 @@
    97.4  
    97.5    // Need a mapping from basic block Node_Lists.  We need a Node_List to
    97.6    // map from register number to value-producing Node.
    97.7 -  Node_List **blk2value = NEW_RESOURCE_ARRAY( Node_List *, _cfg._num_blocks+1);
    97.8 -  memset( blk2value, 0, sizeof(Node_List*)*(_cfg._num_blocks+1) );
    97.9 +  Node_List **blk2value = NEW_RESOURCE_ARRAY( Node_List *, _cfg.number_of_blocks() + 1);
   97.10 +  memset(blk2value, 0, sizeof(Node_List*) * (_cfg.number_of_blocks() + 1));
   97.11    // Need a mapping from basic block Node_Lists.  We need a Node_List to
   97.12    // map from register number to register-defining Node.
   97.13 -  Node_List **blk2regnd = NEW_RESOURCE_ARRAY( Node_List *, _cfg._num_blocks+1);
   97.14 -  memset( blk2regnd, 0, sizeof(Node_List*)*(_cfg._num_blocks+1) );
   97.15 +  Node_List **blk2regnd = NEW_RESOURCE_ARRAY( Node_List *, _cfg.number_of_blocks() + 1);
   97.16 +  memset(blk2regnd, 0, sizeof(Node_List*) * (_cfg.number_of_blocks() + 1));
   97.17  
   97.18    // We keep unused Node_Lists on a free_list to avoid wasting
   97.19    // memory.
   97.20    GrowableArray<Node_List*> free_list = GrowableArray<Node_List*>(16);
   97.21  
   97.22    // For all blocks
   97.23 -  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
   97.24 +  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
   97.25      uint j;
   97.26 -    Block *b = _cfg._blocks[i];
   97.27 +    Block* block = _cfg.get_block(i);
   97.28  
   97.29      // Count of Phis in block
   97.30      uint phi_dex;
   97.31 -    for( phi_dex = 1; phi_dex < b->_nodes.size(); phi_dex++ ) {
   97.32 -      Node *phi = b->_nodes[phi_dex];
   97.33 -      if( !phi->is_Phi() )
   97.34 +    for (phi_dex = 1; phi_dex < block->_nodes.size(); phi_dex++) {
   97.35 +      Node* phi = block->_nodes[phi_dex];
   97.36 +      if (!phi->is_Phi()) {
   97.37          break;
   97.38 +      }
   97.39      }
   97.40  
   97.41      // If any predecessor has not been visited, we do not know the state
   97.42 @@ -434,21 +435,23 @@
   97.43      // along Phi input edges
   97.44      bool missing_some_inputs = false;
   97.45      Block *freed = NULL;
   97.46 -    for( j = 1; j < b->num_preds(); j++ ) {
   97.47 -      Block *pb = _cfg.get_block_for_node(b->pred(j));
   97.48 +    for (j = 1; j < block->num_preds(); j++) {
   97.49 +      Block* pb = _cfg.get_block_for_node(block->pred(j));
   97.50        // Remove copies along phi edges
   97.51 -      for( uint k=1; k<phi_dex; k++ )
   97.52 -        elide_copy( b->_nodes[k], j, b, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false );
   97.53 -      if( blk2value[pb->_pre_order] ) { // Have a mapping on this edge?
   97.54 +      for (uint k = 1; k < phi_dex; k++) {
   97.55 +        elide_copy(block->_nodes[k], j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
   97.56 +      }
   97.57 +      if (blk2value[pb->_pre_order]) { // Have a mapping on this edge?
   97.58          // See if this predecessor's mappings have been used by everybody
   97.59          // who wants them.  If so, free 'em.
   97.60          uint k;
   97.61 -        for( k=0; k<pb->_num_succs; k++ ) {
   97.62 -          Block *pbsucc = pb->_succs[k];
   97.63 -          if( !blk2value[pbsucc->_pre_order] && pbsucc != b )
   97.64 +        for (k = 0; k < pb->_num_succs; k++) {
   97.65 +          Block* pbsucc = pb->_succs[k];
   97.66 +          if (!blk2value[pbsucc->_pre_order] && pbsucc != block) {
   97.67              break;              // Found a future user
   97.68 +          }
   97.69          }
   97.70 -        if( k >= pb->_num_succs ) { // No more uses, free!
   97.71 +        if (k >= pb->_num_succs) { // No more uses, free!
   97.72            freed = pb;           // Record last block freed
   97.73            free_list.push(blk2value[pb->_pre_order]);
   97.74            free_list.push(blk2regnd[pb->_pre_order]);
   97.75 @@ -467,20 +470,20 @@
   97.76      value.map(_max_reg,NULL);
   97.77      regnd.map(_max_reg,NULL);
   97.78      // Set mappings as OUR mappings
   97.79 -    blk2value[b->_pre_order] = &value;
   97.80 -    blk2regnd[b->_pre_order] = &regnd;
   97.81 +    blk2value[block->_pre_order] = &value;
   97.82 +    blk2regnd[block->_pre_order] = &regnd;
   97.83  
   97.84      // Initialize value & regnd for this block
   97.85 -    if( missing_some_inputs ) {
   97.86 +    if (missing_some_inputs) {
   97.87        // Some predecessor has not yet been visited; zap map to empty
   97.88 -      for( uint k = 0; k < (uint)_max_reg; k++ ) {
   97.89 +      for (uint k = 0; k < (uint)_max_reg; k++) {
   97.90          value.map(k,NULL);
   97.91          regnd.map(k,NULL);
   97.92        }
   97.93      } else {
   97.94        if( !freed ) {            // Didn't get a freebie prior block
   97.95          // Must clone some data
   97.96 -        freed = _cfg.get_block_for_node(b->pred(1));
   97.97 +        freed = _cfg.get_block_for_node(block->pred(1));
   97.98          Node_List &f_value = *blk2value[freed->_pre_order];
   97.99          Node_List &f_regnd = *blk2regnd[freed->_pre_order];
  97.100          for( uint k = 0; k < (uint)_max_reg; k++ ) {
  97.101 @@ -489,9 +492,11 @@
  97.102          }
  97.103        }
  97.104        // Merge all inputs together, setting to NULL any conflicts.
  97.105 -      for( j = 1; j < b->num_preds(); j++ ) {
  97.106 -        Block *pb = _cfg.get_block_for_node(b->pred(j));
  97.107 -        if( pb == freed ) continue; // Did self already via freelist
  97.108 +      for (j = 1; j < block->num_preds(); j++) {
  97.109 +        Block* pb = _cfg.get_block_for_node(block->pred(j));
  97.110 +        if (pb == freed) {
  97.111 +          continue; // Did self already via freelist
  97.112 +        }
  97.113          Node_List &p_regnd = *blk2regnd[pb->_pre_order];
  97.114          for( uint k = 0; k < (uint)_max_reg; k++ ) {
  97.115            if( regnd[k] != p_regnd[k] ) { // Conflict on reaching defs?
  97.116 @@ -503,9 +508,9 @@
  97.117      }
  97.118  
  97.119      // For all Phi's
  97.120 -    for( j = 1; j < phi_dex; j++ ) {
  97.121 +    for (j = 1; j < phi_dex; j++) {
  97.122        uint k;
  97.123 -      Node *phi = b->_nodes[j];
  97.124 +      Node *phi = block->_nodes[j];
  97.125        uint pidx = _lrg_map.live_range_id(phi);
  97.126        OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg();
  97.127  
  97.128 @@ -516,8 +521,8 @@
  97.129          if( phi != x && u != x ) // Found a different input
  97.130            u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
  97.131        }
  97.132 -      if( u != NodeSentinel ) {    // Junk Phi.  Remove
  97.133 -        b->_nodes.remove(j--);
  97.134 +      if (u != NodeSentinel) {    // Junk Phi.  Remove
  97.135 +        block->_nodes.remove(j--);
  97.136          phi_dex--;
  97.137          _cfg.unmap_node_from_block(phi);
  97.138          phi->replace_by(u);
  97.139 @@ -547,13 +552,13 @@
  97.140      }
  97.141  
  97.142      // For all remaining instructions
  97.143 -    for( j = phi_dex; j < b->_nodes.size(); j++ ) {
  97.144 -      Node *n = b->_nodes[j];
  97.145 +    for (j = phi_dex; j < block->_nodes.size(); j++) {
  97.146 +      Node* n = block->_nodes[j];
  97.147  
  97.148 -      if( n->outcnt() == 0 &&   // Dead?
  97.149 -          n != C->top() &&      // (ignore TOP, it has no du info)
  97.150 -          !n->is_Proj() ) {     // fat-proj kills
  97.151 -        j -= yank_if_dead(n,b,&value,&regnd);
  97.152 +      if(n->outcnt() == 0 &&   // Dead?
  97.153 +         n != C->top() &&      // (ignore TOP, it has no du info)
  97.154 +         !n->is_Proj() ) {     // fat-proj kills
  97.155 +        j -= yank_if_dead(n, block, &value, &regnd);
  97.156          continue;
  97.157        }
  97.158  
  97.159 @@ -598,8 +603,9 @@
  97.160        const uint two_adr = n->is_Mach() ? n->as_Mach()->two_adr() : 0;
  97.161  
  97.162        // Remove copies along input edges
  97.163 -      for( k = 1; k < n->req(); k++ )
  97.164 -        j -= elide_copy( n, k, b, value, regnd, two_adr!=k );
  97.165 +      for (k = 1; k < n->req(); k++) {
  97.166 +        j -= elide_copy(n, k, block, value, regnd, two_adr != k);
  97.167 +      }
  97.168  
  97.169        // Unallocated Nodes define no registers
  97.170        uint lidx = _lrg_map.live_range_id(n);
  97.171 @@ -630,8 +636,8 @@
  97.172          // then 'n' is a useless copy.  Do not update the register->node
  97.173          // mapping so 'n' will go dead.
  97.174          if( value[nreg] != val ) {
  97.175 -          if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, OptoReg::Bad)) {
  97.176 -            j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
  97.177 +          if (eliminate_copy_of_constant(val, n, block, value, regnd, nreg, OptoReg::Bad)) {
  97.178 +            j -= replace_and_yank_if_dead(n, nreg, block, value, regnd);
  97.179            } else {
  97.180              // Update the mapping: record new Node defined by the register
  97.181              regnd.map(nreg,n);
  97.182 @@ -640,8 +646,8 @@
  97.183              value.map(nreg,val);
  97.184            }
  97.185          } else if( !may_be_copy_of_callee(n) ) {
  97.186 -          assert( n->is_Copy(), "" );
  97.187 -          j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
  97.188 +          assert(n->is_Copy(), "");
  97.189 +          j -= replace_and_yank_if_dead(n, nreg, block, value, regnd);
  97.190          }
  97.191        } else if (RegMask::is_vector(n_ideal_reg)) {
  97.192          // If Node 'n' does not change the value mapped by the register,
  97.193 @@ -660,7 +666,7 @@
  97.194            }
  97.195          } else if (n->is_Copy()) {
  97.196            // Note: vector can't be constant and can't be copy of calee.
  97.197 -          j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
  97.198 +          j -= replace_and_yank_if_dead(n, nreg, block, value, regnd);
  97.199          }
  97.200        } else {
  97.201          // If the value occupies a register pair, record same info
  97.202 @@ -674,18 +680,18 @@
  97.203            tmp.Remove(nreg);
  97.204            nreg_lo = tmp.find_first_elem();
  97.205          }
  97.206 -        if( value[nreg] != val || value[nreg_lo] != val ) {
  97.207 -          if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, nreg_lo)) {
  97.208 -            j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
  97.209 +        if (value[nreg] != val || value[nreg_lo] != val) {
  97.210 +          if (eliminate_copy_of_constant(val, n, block, value, regnd, nreg, nreg_lo)) {
  97.211 +            j -= replace_and_yank_if_dead(n, nreg, block, value, regnd);
  97.212            } else {
  97.213              regnd.map(nreg   , n );
  97.214              regnd.map(nreg_lo, n );
  97.215              value.map(nreg   ,val);
  97.216              value.map(nreg_lo,val);
  97.217            }
  97.218 -        } else if( !may_be_copy_of_callee(n) ) {
  97.219 -          assert( n->is_Copy(), "" );
  97.220 -          j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
  97.221 +        } else if (!may_be_copy_of_callee(n)) {
  97.222 +          assert(n->is_Copy(), "");
  97.223 +          j -= replace_and_yank_if_dead(n, nreg, block, value, regnd);
  97.224          }
  97.225        }
  97.226  
    98.1 --- a/src/share/vm/opto/reg_split.cpp	Fri Aug 23 22:12:18 2013 +0100
    98.2 +++ b/src/share/vm/opto/reg_split.cpp	Fri Aug 30 09:50:49 2013 +0100
    98.3 @@ -397,10 +397,15 @@
    98.4  #endif
    98.5    // See if the cloned def kills any flags, and copy those kills as well
    98.6    uint i = insidx+1;
    98.7 -  if( clone_projs( b, i, def, spill, maxlrg) ) {
    98.8 +  int found_projs = clone_projs( b, i, def, spill, maxlrg);
    98.9 +  if (found_projs > 0) {
   98.10      // Adjust the point where we go hi-pressure
   98.11 -    if( i <= b->_ihrp_index ) b->_ihrp_index++;
   98.12 -    if( i <= b->_fhrp_index ) b->_fhrp_index++;
   98.13 +    if (i <= b->_ihrp_index) {
   98.14 +      b->_ihrp_index += found_projs;
   98.15 +    }
   98.16 +    if (i <= b->_fhrp_index) {
   98.17 +      b->_fhrp_index += found_projs;
   98.18 +    }
   98.19    }
   98.20  
   98.21    return spill;
   98.22 @@ -529,13 +534,13 @@
   98.23    // a Def is UP or DOWN.  UP means that it should get a register (ie -
   98.24    // it is always in LRP regions), and DOWN means that it is probably
   98.25    // on the stack (ie - it crosses HRP regions).
   98.26 -  Node ***Reaches     = NEW_SPLIT_ARRAY( Node**, _cfg._num_blocks+1 );
   98.27 -  bool  **UP          = NEW_SPLIT_ARRAY( bool*, _cfg._num_blocks+1 );
   98.28 +  Node ***Reaches     = NEW_SPLIT_ARRAY( Node**, _cfg.number_of_blocks() + 1);
   98.29 +  bool  **UP          = NEW_SPLIT_ARRAY( bool*, _cfg.number_of_blocks() + 1);
   98.30    Node  **debug_defs  = NEW_SPLIT_ARRAY( Node*, spill_cnt );
   98.31    VectorSet **UP_entry= NEW_SPLIT_ARRAY( VectorSet*, spill_cnt );
   98.32  
   98.33    // Initialize Reaches & UP
   98.34 -  for( bidx = 0; bidx < _cfg._num_blocks+1; bidx++ ) {
   98.35 +  for (bidx = 0; bidx < _cfg.number_of_blocks() + 1; bidx++) {
   98.36      Reaches[bidx]     = NEW_SPLIT_ARRAY( Node*, spill_cnt );
   98.37      UP[bidx]          = NEW_SPLIT_ARRAY( bool, spill_cnt );
   98.38      Node **Reachblock = Reaches[bidx];
   98.39 @@ -555,13 +560,13 @@
   98.40    //----------PASS 1----------
   98.41    //----------Propagation & Node Insertion Code----------
   98.42    // Walk the Blocks in RPO for DEF & USE info
   98.43 -  for( bidx = 0; bidx < _cfg._num_blocks; bidx++ ) {
   98.44 +  for( bidx = 0; bidx < _cfg.number_of_blocks(); bidx++ ) {
   98.45  
   98.46      if (C->check_node_count(spill_cnt, out_of_nodes)) {
   98.47        return 0;
   98.48      }
   98.49  
   98.50 -    b  = _cfg._blocks[bidx];
   98.51 +    b  = _cfg.get_block(bidx);
   98.52      // Reaches & UP arrays for this block
   98.53      Reachblock = Reaches[b->_pre_order];
   98.54      UPblock    = UP[b->_pre_order];
   98.55 @@ -1394,8 +1399,8 @@
   98.56    // DEBUG
   98.57  #ifdef ASSERT
   98.58    // Validate all live range index assignments
   98.59 -  for (bidx = 0; bidx < _cfg._num_blocks; bidx++) {
   98.60 -    b  = _cfg._blocks[bidx];
   98.61 +  for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) {
   98.62 +    b  = _cfg.get_block(bidx);
   98.63      for (insidx = 0; insidx <= b->end_idx(); insidx++) {
   98.64        Node *n = b->_nodes[insidx];
   98.65        uint defidx = _lrg_map.find(n);
    99.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Aug 23 22:12:18 2013 +0100
    99.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Aug 30 09:50:49 2013 +0100
    99.3 @@ -1554,6 +1554,20 @@
    99.4      return false;
    99.5    }
    99.6  
    99.7 +  // rewrite sourc file name index:
    99.8 +  u2 source_file_name_idx = scratch_class->source_file_name_index();
    99.9 +  if (source_file_name_idx != 0) {
   99.10 +    u2 new_source_file_name_idx = find_new_index(source_file_name_idx);
   99.11 +    scratch_class->set_source_file_name_index(new_source_file_name_idx);
   99.12 +  }
   99.13 +
   99.14 +  // rewrite class generic signature index:
   99.15 +  u2 generic_signature_index = scratch_class->generic_signature_index();
   99.16 +  if (generic_signature_index != 0) {
   99.17 +    u2 new_generic_signature_index = find_new_index(generic_signature_index);
   99.18 +    scratch_class->set_generic_signature_index(new_generic_signature_index);
   99.19 +  }
   99.20 +
   99.21    return true;
   99.22  } // end rewrite_cp_refs()
   99.23  
   99.24 @@ -3370,7 +3384,8 @@
   99.25    // Leave arrays of jmethodIDs and itable index cache unchanged
   99.26  
   99.27    // Copy the "source file name" attribute from new class version
   99.28 -  the_class->set_source_file_name(scratch_class->source_file_name());
   99.29 +  the_class->set_source_file_name_index(
   99.30 +    scratch_class->source_file_name_index());
   99.31  
   99.32    // Copy the "source debug extension" attribute from new class version
   99.33    the_class->set_source_debug_extension(
   100.1 --- a/src/share/vm/prims/whitebox.cpp	Fri Aug 23 22:12:18 2013 +0100
   100.2 +++ b/src/share/vm/prims/whitebox.cpp	Fri Aug 30 09:50:49 2013 +0100
   100.3 @@ -196,12 +196,22 @@
   100.4    VMThread::execute(&op);
   100.5  WB_END
   100.6  
   100.7 -WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method))
   100.8 +WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
   100.9    jmethodID jmid = reflected_method_to_jmid(thread, env, method);
  100.10    MutexLockerEx mu(Compile_lock);
  100.11    methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
  100.12    int result = 0;
  100.13 -  nmethod* code = mh->code();
  100.14 +  nmethod* code;
  100.15 +  if (is_osr) {
  100.16 +    int bci = InvocationEntryBci;
  100.17 +    while ((code = mh->lookup_osr_nmethod_for(bci, CompLevel_none, false)) != NULL) {
  100.18 +      code->mark_for_deoptimization();
  100.19 +      ++result;
  100.20 +      bci = code->osr_entry_bci() + 1;
  100.21 +    }
  100.22 +  } else {
  100.23 +    code = mh->code();
  100.24 +  }
  100.25    if (code != NULL) {
  100.26      code->mark_for_deoptimization();
  100.27      ++result;
  100.28 @@ -214,22 +224,26 @@
  100.29    return result;
  100.30  WB_END
  100.31  
  100.32 -WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method))
  100.33 +WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
  100.34    jmethodID jmid = reflected_method_to_jmid(thread, env, method);
  100.35    MutexLockerEx mu(Compile_lock);
  100.36    methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
  100.37 -  nmethod* code = mh->code();
  100.38 +  nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
  100.39    if (code == NULL) {
  100.40      return JNI_FALSE;
  100.41    }
  100.42    return (code->is_alive() && !code->is_marked_for_deoptimization());
  100.43  WB_END
  100.44  
  100.45 -WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level))
  100.46 +WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level, jboolean is_osr))
  100.47    jmethodID jmid = reflected_method_to_jmid(thread, env, method);
  100.48    MutexLockerEx mu(Compile_lock);
  100.49    methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
  100.50 -  return CompilationPolicy::can_be_compiled(mh, comp_level);
  100.51 +  if (is_osr) {
  100.52 +    return CompilationPolicy::can_be_osr_compiled(mh, comp_level);
  100.53 +  } else {
  100.54 +    return CompilationPolicy::can_be_compiled(mh, comp_level);
  100.55 +  }
  100.56  WB_END
  100.57  
  100.58  WB_ENTRY(jboolean, WB_IsMethodQueuedForCompilation(JNIEnv* env, jobject o, jobject method))
  100.59 @@ -239,18 +253,28 @@
  100.60    return mh->queued_for_compilation();
  100.61  WB_END
  100.62  
  100.63 -WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject method))
  100.64 +WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
  100.65    jmethodID jmid = reflected_method_to_jmid(thread, env, method);
  100.66    methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
  100.67 -  nmethod* code = mh->code();
  100.68 +  nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
  100.69    return (code != NULL ? code->comp_level() : CompLevel_none);
  100.70  WB_END
  100.71  
  100.72 -
  100.73 -WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level))
  100.74 +WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level, jboolean is_osr))
  100.75    jmethodID jmid = reflected_method_to_jmid(thread, env, method);
  100.76    methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
  100.77 -  mh->set_not_compilable(comp_level, true /* report */, "WhiteBox");
  100.78 +  if (is_osr) {
  100.79 +    mh->set_not_osr_compilable(comp_level, true /* report */, "WhiteBox");
  100.80 +  } else {
  100.81 +    mh->set_not_compilable(comp_level, true /* report */, "WhiteBox");
  100.82 +  }
  100.83 +WB_END
  100.84 +
  100.85 +WB_ENTRY(jint, WB_GetMethodEntryBci(JNIEnv* env, jobject o, jobject method))
  100.86 +  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
  100.87 +  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
  100.88 +  nmethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false);
  100.89 +  return (code != NULL && code->is_osr_method() ? code->osr_entry_bci() : InvocationEntryBci);
  100.90  WB_END
  100.91  
  100.92  WB_ENTRY(jboolean, WB_TestSetDontInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value))
  100.93 @@ -261,12 +285,15 @@
  100.94    return result;
  100.95  WB_END
  100.96  
  100.97 -WB_ENTRY(jint, WB_GetCompileQueuesSize(JNIEnv* env, jobject o))
  100.98 -  return CompileBroker::queue_size(CompLevel_full_optimization) /* C2 */ +
  100.99 -         CompileBroker::queue_size(CompLevel_full_profile) /* C1 */;
 100.100 +WB_ENTRY(jint, WB_GetCompileQueueSize(JNIEnv* env, jobject o, jint comp_level))
 100.101 +  if (comp_level == CompLevel_any) {
 100.102 +    return CompileBroker::queue_size(CompLevel_full_optimization) /* C2 */ +
 100.103 +        CompileBroker::queue_size(CompLevel_full_profile) /* C1 */;
 100.104 +  } else {
 100.105 +    return CompileBroker::queue_size(comp_level);
 100.106 +  }
 100.107  WB_END
 100.108  
 100.109 -
 100.110  WB_ENTRY(jboolean, WB_TestSetForceInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value))
 100.111    jmethodID jmid = reflected_method_to_jmid(thread, env, method);
 100.112    methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
 100.113 @@ -275,10 +302,10 @@
 100.114    return result;
 100.115  WB_END
 100.116  
 100.117 -WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobject method, jint comp_level))
 100.118 +WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobject method, jint comp_level, jint bci))
 100.119    jmethodID jmid = reflected_method_to_jmid(thread, env, method);
 100.120    methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
 100.121 -  nmethod* nm = CompileBroker::compile_method(mh, InvocationEntryBci, comp_level, mh, mh->invocation_count(), "WhiteBox", THREAD);
 100.122 +  nmethod* nm = CompileBroker::compile_method(mh, bci, comp_level, mh, mh->invocation_count(), "WhiteBox", THREAD);
 100.123    MutexLockerEx mu(Compile_lock);
 100.124    return (mh->queued_for_compilation() || nm != NULL);
 100.125  WB_END
 100.126 @@ -324,7 +351,6 @@
 100.127    return (StringTable::lookup(name, len) != NULL);
 100.128  WB_END
 100.129  
 100.130 -
 100.131  WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o))
 100.132    Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(true);
 100.133    Universe::heap()->collect(GCCause::_last_ditch_collection);
 100.134 @@ -423,31 +449,32 @@
 100.135    {CC"NMTWaitForDataMerge", CC"()Z",                  (void*)&WB_NMTWaitForDataMerge},
 100.136  #endif // INCLUDE_NMT
 100.137    {CC"deoptimizeAll",      CC"()V",                   (void*)&WB_DeoptimizeAll     },
 100.138 -  {CC"deoptimizeMethod",   CC"(Ljava/lang/reflect/Executable;)I",
 100.139 +  {CC"deoptimizeMethod",   CC"(Ljava/lang/reflect/Executable;Z)I",
 100.140                                                        (void*)&WB_DeoptimizeMethod  },
 100.141 -  {CC"isMethodCompiled",   CC"(Ljava/lang/reflect/Executable;)Z",
 100.142 +  {CC"isMethodCompiled",   CC"(Ljava/lang/reflect/Executable;Z)Z",
 100.143                                                        (void*)&WB_IsMethodCompiled  },
 100.144 -  {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Executable;I)Z",
 100.145 +  {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Executable;IZ)Z",
 100.146                                                        (void*)&WB_IsMethodCompilable},
 100.147    {CC"isMethodQueuedForCompilation",
 100.148        CC"(Ljava/lang/reflect/Executable;)Z",          (void*)&WB_IsMethodQueuedForCompilation},
 100.149    {CC"makeMethodNotCompilable",
 100.150 -      CC"(Ljava/lang/reflect/Executable;I)V",         (void*)&WB_MakeMethodNotCompilable},
 100.151 +      CC"(Ljava/lang/reflect/Executable;IZ)V",        (void*)&WB_MakeMethodNotCompilable},
 100.152    {CC"testSetDontInlineMethod",
 100.153        CC"(Ljava/lang/reflect/Executable;Z)Z",         (void*)&WB_TestSetDontInlineMethod},
 100.154    {CC"getMethodCompilationLevel",
 100.155 -      CC"(Ljava/lang/reflect/Executable;)I",          (void*)&WB_GetMethodCompilationLevel},
 100.156 -  {CC"getCompileQueuesSize",
 100.157 -      CC"()I",                                        (void*)&WB_GetCompileQueuesSize},
 100.158 +      CC"(Ljava/lang/reflect/Executable;Z)I",         (void*)&WB_GetMethodCompilationLevel},
 100.159 +  {CC"getMethodEntryBci",
 100.160 +      CC"(Ljava/lang/reflect/Executable;)I",          (void*)&WB_GetMethodEntryBci},
 100.161 +  {CC"getCompileQueueSize",
 100.162 +      CC"(I)I",                                       (void*)&WB_GetCompileQueueSize},
 100.163    {CC"testSetForceInlineMethod",
 100.164        CC"(Ljava/lang/reflect/Executable;Z)Z",         (void*)&WB_TestSetForceInlineMethod},
 100.165    {CC"enqueueMethodForCompilation",
 100.166 -      CC"(Ljava/lang/reflect/Executable;I)Z",         (void*)&WB_EnqueueMethodForCompilation},
 100.167 +      CC"(Ljava/lang/reflect/Executable;II)Z",        (void*)&WB_EnqueueMethodForCompilation},
 100.168    {CC"clearMethodState",
 100.169        CC"(Ljava/lang/reflect/Executable;)V",          (void*)&WB_ClearMethodState},
 100.170    {CC"isInStringTable",   CC"(Ljava/lang/String;)Z",  (void*)&WB_IsInStringTable  },
 100.171    {CC"fullGC",   CC"()V",                             (void*)&WB_FullGC },
 100.172 -
 100.173    {CC"readReservedMemory", CC"()V",                   (void*)&WB_ReadReservedMemory },
 100.174  };
 100.175  
   101.1 --- a/src/share/vm/runtime/arguments.cpp	Fri Aug 23 22:12:18 2013 +0100
   101.2 +++ b/src/share/vm/runtime/arguments.cpp	Fri Aug 30 09:50:49 2013 +0100
   101.3 @@ -1393,10 +1393,8 @@
   101.4  
   101.5  inline uintx max_heap_for_compressed_oops() {
   101.6    // Avoid sign flip.
   101.7 -  if (OopEncodingHeapMax < ClassMetaspaceSize + os::vm_page_size()) {
   101.8 -    return 0;
   101.9 -  }
  101.10 -  LP64_ONLY(return OopEncodingHeapMax - ClassMetaspaceSize - os::vm_page_size());
  101.11 +  assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
  101.12 +  LP64_ONLY(return OopEncodingHeapMax - os::vm_page_size());
  101.13    NOT_LP64(ShouldNotReachHere(); return 0);
  101.14  }
  101.15  
  101.16 @@ -1448,6 +1446,35 @@
  101.17  #endif // ZERO
  101.18  }
  101.19  
  101.20 +
  101.21 +// NOTE: set_use_compressed_klass_ptrs() must be called after calling
  101.22 +// set_use_compressed_oops().
  101.23 +void Arguments::set_use_compressed_klass_ptrs() {
  101.24 +#ifndef ZERO
  101.25 +#ifdef _LP64
  101.26 +  // UseCompressedOops must be on for UseCompressedKlassPointers to be on.
  101.27 +  if (!UseCompressedOops) {
  101.28 +    if (UseCompressedKlassPointers) {
  101.29 +      warning("UseCompressedKlassPointers requires UseCompressedOops");
  101.30 +    }
  101.31 +    FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
  101.32 +  } else {
  101.33 +    // Turn on UseCompressedKlassPointers too
  101.34 +    if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
  101.35 +      FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
  101.36 +    }
  101.37 +    // Check the ClassMetaspaceSize to make sure we use compressed klass ptrs.
  101.38 +    if (UseCompressedKlassPointers) {
  101.39 +      if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) {
  101.40 +        warning("Class metaspace size is too large for UseCompressedKlassPointers");
  101.41 +        FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
  101.42 +      }
  101.43 +    }
  101.44 +  }
  101.45 +#endif // _LP64
  101.46 +#endif // !ZERO
  101.47 +}
  101.48 +
  101.49  void Arguments::set_ergonomics_flags() {
  101.50  
  101.51    if (os::is_server_class_machine()) {
  101.52 @@ -1470,7 +1497,8 @@
  101.53      // server performance.   On server class machines, keep the default
  101.54      // off unless it is asked for.  Future work: either add bytecode rewriting
  101.55      // at link time, or rewrite bytecodes in non-shared methods.
  101.56 -    if (!DumpSharedSpaces && !RequireSharedSpaces) {
  101.57 +    if (!DumpSharedSpaces && !RequireSharedSpaces &&
  101.58 +        (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
  101.59        no_shared_spaces();
  101.60      }
  101.61    }
  101.62 @@ -1478,33 +1506,11 @@
  101.63  #ifndef ZERO
  101.64  #ifdef _LP64
  101.65    set_use_compressed_oops();
  101.66 -  // UseCompressedOops must be on for UseCompressedKlassPointers to be on.
  101.67 -  if (!UseCompressedOops) {
  101.68 -    if (UseCompressedKlassPointers) {
  101.69 -      warning("UseCompressedKlassPointers requires UseCompressedOops");
  101.70 -    }
  101.71 -    FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
  101.72 -  } else {
  101.73 -    // Turn on UseCompressedKlassPointers too
  101.74 -    if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
  101.75 -      FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
  101.76 -    }
  101.77 -    // Set the ClassMetaspaceSize to something that will not need to be
  101.78 -    // expanded, since it cannot be expanded.
  101.79 -    if (UseCompressedKlassPointers) {
  101.80 -      if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) {
  101.81 -        warning("Class metaspace size is too large for UseCompressedKlassPointers");
  101.82 -        FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
  101.83 -      } else if (FLAG_IS_DEFAULT(ClassMetaspaceSize)) {
  101.84 -        // 100,000 classes seems like a good size, so 100M assumes around 1K
  101.85 -        // per klass.   The vtable and oopMap is embedded so we don't have a fixed
  101.86 -        // size per klass.   Eventually, this will be parameterized because it
  101.87 -        // would also be useful to determine the optimal size of the
  101.88 -        // systemDictionary.
  101.89 -        FLAG_SET_ERGO(uintx, ClassMetaspaceSize, 100*M);
  101.90 -      }
  101.91 -    }
  101.92 -  }
  101.93 +
  101.94 +  // set_use_compressed_klass_ptrs() must be called after calling
  101.95 +  // set_use_compressed_oops().
  101.96 +  set_use_compressed_klass_ptrs();
  101.97 +
  101.98    // Also checks that certain machines are slower with compressed oops
  101.99    // in vm_version initialization code.
 101.100  #endif // _LP64
 101.101 @@ -2153,7 +2159,7 @@
 101.102  
 101.103    status = status && verify_object_alignment();
 101.104  
 101.105 -  status = status && verify_min_value(ClassMetaspaceSize, 1*M,
 101.106 +  status = status && verify_interval(ClassMetaspaceSize, 1*M, 3*G,
 101.107                                        "ClassMetaspaceSize");
 101.108  
 101.109    status = status && verify_interval(MarkStackSizeMax,
 101.110 @@ -3273,33 +3279,22 @@
 101.111  }
 101.112  
 101.113  void Arguments::set_shared_spaces_flags() {
 101.114 -#ifdef _LP64
 101.115 -    const bool must_share = DumpSharedSpaces || RequireSharedSpaces;
 101.116 -
 101.117 -    // CompressedOops cannot be used with CDS.  The offsets of oopmaps and
 101.118 -    // static fields are incorrect in the archive.  With some more clever
 101.119 -    // initialization, this restriction can probably be lifted.
 101.120 -    if (UseCompressedOops) {
 101.121 -      if (must_share) {
 101.122 -          warning("disabling compressed oops because of %s",
 101.123 -                  DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on");
 101.124 -          FLAG_SET_CMDLINE(bool, UseCompressedOops, false);
 101.125 -          FLAG_SET_CMDLINE(bool, UseCompressedKlassPointers, false);
 101.126 -      } else {
 101.127 -        // Prefer compressed oops to class data sharing
 101.128 -        if (UseSharedSpaces && Verbose) {
 101.129 -          warning("turning off use of shared archive because of compressed oops");
 101.130 -        }
 101.131 -        no_shared_spaces();
 101.132 -      }
 101.133 -    }
 101.134 -#endif
 101.135 -
 101.136    if (DumpSharedSpaces) {
 101.137      if (RequireSharedSpaces) {
 101.138        warning("cannot dump shared archive while using shared archive");
 101.139      }
 101.140      UseSharedSpaces = false;
 101.141 +#ifdef _LP64
 101.142 +    if (!UseCompressedOops || !UseCompressedKlassPointers) {
 101.143 +      vm_exit_during_initialization(
 101.144 +        "Cannot dump shared archive when UseCompressedOops or UseCompressedKlassPointers is off.", NULL);
 101.145 +    }
 101.146 +  } else {
 101.147 +    // UseCompressedOops and UseCompressedKlassPointers must be on for UseSharedSpaces.
 101.148 +    if (!UseCompressedOops || !UseCompressedKlassPointers) {
 101.149 +      no_shared_spaces();
 101.150 +    }
 101.151 +#endif
 101.152    }
 101.153  }
 101.154  
   102.1 --- a/src/share/vm/runtime/arguments.hpp	Fri Aug 23 22:12:18 2013 +0100
   102.2 +++ b/src/share/vm/runtime/arguments.hpp	Fri Aug 30 09:50:49 2013 +0100
   102.3 @@ -309,6 +309,7 @@
   102.4    static void set_g1_gc_flags();
   102.5    // GC ergonomics
   102.6    static void set_use_compressed_oops();
   102.7 +  static void set_use_compressed_klass_ptrs();
   102.8    static void set_ergonomics_flags();
   102.9    static void set_shared_spaces_flags();
  102.10    // limits the given memory size by the maximum amount of memory this process is
   103.1 --- a/src/share/vm/runtime/compilationPolicy.cpp	Fri Aug 23 22:12:18 2013 +0100
   103.2 +++ b/src/share/vm/runtime/compilationPolicy.cpp	Fri Aug 30 09:50:49 2013 +0100
   103.3 @@ -138,6 +138,23 @@
   103.4    return false;
   103.5  }
   103.6  
   103.7 +// Returns true if m is allowed to be osr compiled
   103.8 +bool CompilationPolicy::can_be_osr_compiled(methodHandle m, int comp_level) {
   103.9 +  bool result = false;
  103.10 +  if (comp_level == CompLevel_all) {
  103.11 +    if (TieredCompilation) {
  103.12 +      // enough to be osr compilable at any level for tiered
  103.13 +      result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization);
  103.14 +    } else {
  103.15 +      // must be osr compilable at available level for non-tiered
  103.16 +      result = !m->is_not_osr_compilable(CompLevel_highest_tier);
  103.17 +    }
  103.18 +  } else if (is_compile(comp_level)) {
  103.19 +    result = !m->is_not_osr_compilable(comp_level);
  103.20 +  }
  103.21 +  return (result && can_be_compiled(m, comp_level));
  103.22 +}
  103.23 +
  103.24  bool CompilationPolicy::is_compilation_enabled() {
  103.25    // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
  103.26    return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs();
  103.27 @@ -458,7 +475,7 @@
  103.28    const int hot_count = m->backedge_count();
  103.29    const char* comment = "backedge_count";
  103.30  
  103.31 -  if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) {
  103.32 +  if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
  103.33      CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread);
  103.34      NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
  103.35    }
  103.36 @@ -514,7 +531,7 @@
  103.37    const int hot_count = m->backedge_count();
  103.38    const char* comment = "backedge_count";
  103.39  
  103.40 -  if (is_compilation_enabled() && !m->is_not_osr_compilable(comp_level) && can_be_compiled(m, comp_level)) {
  103.41 +  if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
  103.42      CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread);
  103.43      NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
  103.44    }
   104.1 --- a/src/share/vm/runtime/compilationPolicy.hpp	Fri Aug 23 22:12:18 2013 +0100
   104.2 +++ b/src/share/vm/runtime/compilationPolicy.hpp	Fri Aug 30 09:50:49 2013 +0100
   104.3 @@ -52,6 +52,8 @@
   104.4    static bool must_be_compiled(methodHandle m, int comp_level = CompLevel_all);
   104.5    // m is allowed to be compiled
   104.6    static bool can_be_compiled(methodHandle m, int comp_level = CompLevel_all);
   104.7 +  // m is allowed to be osr compiled
   104.8 +  static bool can_be_osr_compiled(methodHandle m, int comp_level = CompLevel_all);
   104.9    static bool is_compilation_enabled();
  104.10    static void set_policy(CompilationPolicy* policy) { _policy = policy; }
  104.11    static CompilationPolicy* policy()                { return _policy; }
   105.1 --- a/src/share/vm/runtime/frame.cpp	Fri Aug 23 22:12:18 2013 +0100
   105.2 +++ b/src/share/vm/runtime/frame.cpp	Fri Aug 30 09:50:49 2013 +0100
   105.3 @@ -23,6 +23,7 @@
   105.4   */
   105.5  
   105.6  #include "precompiled.hpp"
   105.7 +#include "compiler/abstractCompiler.hpp"
   105.8  #include "compiler/disassembler.hpp"
   105.9  #include "gc_interface/collectedHeap.inline.hpp"
  105.10  #include "interpreter/interpreter.hpp"
  105.11 @@ -559,7 +560,7 @@
  105.12  
  105.13    st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp());
  105.14    if (sp() != NULL)
  105.15 -    st->print(", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), pc());
  105.16 +    st->print(", fp=" INTPTR_FORMAT ", real_fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), real_fp(), pc());
  105.17  
  105.18    if (StubRoutines::contains(pc())) {
  105.19      st->print_cr(")");
  105.20 @@ -720,11 +721,14 @@
  105.21      } else if (_cb->is_buffer_blob()) {
  105.22        st->print("v  ~BufferBlob::%s", ((BufferBlob *)_cb)->name());
  105.23      } else if (_cb->is_nmethod()) {
  105.24 -      Method* m = ((nmethod *)_cb)->method();
  105.25 +      nmethod* nm = (nmethod*)_cb;
  105.26 +      Method* m = nm->method();
  105.27        if (m != NULL) {
  105.28          m->name_and_sig_as_C_string(buf, buflen);
  105.29 -        st->print("J  %s @ " PTR_FORMAT " [" PTR_FORMAT "+" SIZE_FORMAT "]",
  105.30 -                  buf, _pc, _cb->code_begin(), _pc - _cb->code_begin());
  105.31 +        st->print("J %d%s %s %s (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+0x%x]",
  105.32 +                  nm->compile_id(), (nm->is_osr_method() ? "%" : ""),
  105.33 +                  ((nm->compiler() != NULL) ? nm->compiler()->name() : ""),
  105.34 +                  buf, m->code_size(), _pc, _cb->code_begin(), _pc - _cb->code_begin());
  105.35        } else {
  105.36          st->print("J  " PTR_FORMAT, pc());
  105.37        }
   106.1 --- a/src/share/vm/runtime/globals.hpp	Fri Aug 23 22:12:18 2013 +0100
   106.2 +++ b/src/share/vm/runtime/globals.hpp	Fri Aug 30 09:50:49 2013 +0100
   106.3 @@ -3036,7 +3036,7 @@
   106.4    product(uintx, MaxMetaspaceSize, max_uintx,                               \
   106.5            "Maximum size of Metaspaces (in bytes)")                          \
   106.6                                                                              \
   106.7 -  product(uintx, ClassMetaspaceSize, 2*M,                                   \
   106.8 +  product(uintx, ClassMetaspaceSize, 1*G,                                   \
   106.9            "Maximum size of InstanceKlass area in Metaspace used for "       \
  106.10            "UseCompressedKlassPointers")                                     \
  106.11                                                                              \
   107.1 --- a/src/share/vm/runtime/init.cpp	Fri Aug 23 22:12:18 2013 +0100
   107.2 +++ b/src/share/vm/runtime/init.cpp	Fri Aug 30 09:50:49 2013 +0100
   107.3 @@ -1,5 +1,5 @@
   107.4  /*
   107.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   107.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   107.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   107.8   *
   107.9   * This code is free software; you can redistribute it and/or modify it
  107.10 @@ -95,7 +95,6 @@
  107.11    management_init();
  107.12    bytecodes_init();
  107.13    classLoader_init();
  107.14 -  Metaspace::global_initialize(); // must be before codeCache
  107.15    codeCache_init();
  107.16    VM_Version_init();
  107.17    os_init_globals();
   108.1 --- a/src/share/vm/runtime/sweeper.hpp	Fri Aug 23 22:12:18 2013 +0100
   108.2 +++ b/src/share/vm/runtime/sweeper.hpp	Fri Aug 30 09:50:49 2013 +0100
   108.3 @@ -83,6 +83,7 @@
   108.4    static jlong peak_disconnect_time()        { return _peak_disconnect_time; }
   108.5  
   108.6  #ifdef ASSERT
   108.7 +  static bool is_sweeping(nmethod* which) { return _current == which; }
   108.8    // Keep track of sweeper activity in the ring buffer
   108.9    static void record_sweep(nmethod* nm, int line);
  108.10    static void report_events(int id, address entry);
   109.1 --- a/src/share/vm/runtime/vmStructs.cpp	Fri Aug 23 22:12:18 2013 +0100
   109.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Fri Aug 30 09:50:49 2013 +0100
   109.3 @@ -294,7 +294,7 @@
   109.4    nonstatic_field(InstanceKlass,               _java_fields_count,                            u2)                                    \
   109.5    nonstatic_field(InstanceKlass,               _constants,                                    ConstantPool*)                         \
   109.6    nonstatic_field(InstanceKlass,               _class_loader_data,                            ClassLoaderData*)                      \
   109.7 -  nonstatic_field(InstanceKlass,               _source_file_name,                             Symbol*)                               \
   109.8 +  nonstatic_field(InstanceKlass,               _source_file_name_index,                            u2)                               \
   109.9    nonstatic_field(InstanceKlass,               _source_debug_extension,                       char*)                                 \
  109.10    nonstatic_field(InstanceKlass,               _inner_classes,                               Array<jushort>*)                       \
  109.11    nonstatic_field(InstanceKlass,               _nonstatic_field_size,                         int)                                   \
  109.12 @@ -313,7 +313,7 @@
  109.13    nonstatic_field(InstanceKlass,               _jni_ids,                                      JNIid*)                                \
  109.14    nonstatic_field(InstanceKlass,               _osr_nmethods_head,                            nmethod*)                              \
  109.15    nonstatic_field(InstanceKlass,               _breakpoints,                                  BreakpointInfo*)                       \
  109.16 -  nonstatic_field(InstanceKlass,               _generic_signature,                            Symbol*)                               \
  109.17 +  nonstatic_field(InstanceKlass,               _generic_signature_index,                           u2)                               \
  109.18    nonstatic_field(InstanceKlass,               _methods_jmethod_ids,                          jmethodID*)                            \
  109.19    nonstatic_field(InstanceKlass,               _methods_cached_itable_indices,                int*)                                  \
  109.20    volatile_nonstatic_field(InstanceKlass,      _idnum_allocated_count,                        u2)                                    \
  109.21 @@ -1096,10 +1096,10 @@
  109.22                                                                                                                                       \
  109.23    c2_nonstatic_field(MachCallRuntimeNode,  _name,                  const char*)                                                      \
  109.24                                                                                                                                       \
  109.25 -  c2_nonstatic_field(PhaseCFG,           _num_blocks,              uint)                                                             \
  109.26 +  c2_nonstatic_field(PhaseCFG,           _number_of_blocks,        uint)                                                             \
  109.27    c2_nonstatic_field(PhaseCFG,           _blocks,                  Block_List)                                                       \
  109.28    c2_nonstatic_field(PhaseCFG,           _node_to_block_mapping,   Block_Array)                                                      \
  109.29 -  c2_nonstatic_field(PhaseCFG,           _broot,                   Block*)                                                           \
  109.30 +  c2_nonstatic_field(PhaseCFG,           _root_block,              Block*)                                                           \
  109.31                                                                                                                                       \
  109.32    c2_nonstatic_field(PhaseRegAlloc,      _node_regs,               OptoRegPair*)                                                     \
  109.33    c2_nonstatic_field(PhaseRegAlloc,      _node_regs_max_index,     uint)                                                             \
   110.1 --- a/src/share/vm/runtime/vm_version.cpp	Fri Aug 23 22:12:18 2013 +0100
   110.2 +++ b/src/share/vm/runtime/vm_version.cpp	Fri Aug 30 09:50:49 2013 +0100
   110.3 @@ -231,6 +231,8 @@
   110.4          #define HOTSPOT_BUILD_COMPILER "Workshop 5.9"
   110.5        #elif __SUNPRO_CC == 0x5100
   110.6          #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u1"
   110.7 +      #elif __SUNPRO_CC == 0x5120
   110.8 +        #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u3"
   110.9        #else
  110.10          #define HOTSPOT_BUILD_COMPILER "unknown Workshop:" XSTR(__SUNPRO_CC)
  110.11        #endif
   111.1 --- a/src/share/vm/services/memoryPool.cpp	Fri Aug 23 22:12:18 2013 +0100
   111.2 +++ b/src/share/vm/services/memoryPool.cpp	Fri Aug 30 09:50:49 2013 +0100
   111.3 @@ -268,11 +268,11 @@
   111.4  }
   111.5  
   111.6  size_t MetaspacePool::used_in_bytes() {
   111.7 -  return MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType);
   111.8 +  return MetaspaceAux::allocated_used_bytes();
   111.9  }
  111.10  
  111.11  size_t MetaspacePool::capacity_in_bytes() const {
  111.12 -  return MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType);
  111.13 +  return MetaspaceAux::allocated_capacity_bytes();
  111.14  }
  111.15  
  111.16  size_t MetaspacePool::calculate_max_size() const {
   112.1 --- a/src/share/vm/utilities/globalDefinitions.hpp	Fri Aug 23 22:12:18 2013 +0100
   112.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp	Fri Aug 30 09:50:49 2013 +0100
   112.3 @@ -362,6 +362,8 @@
   112.4  // Klass encoding metaspace max size
   112.5  const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes;
   112.6  
   112.7 +const jlong CompressedKlassPointersBase = NOT_LP64(0) LP64_ONLY(CONST64(0x800000000));  // 32*G
   112.8 +
   112.9  // Machine dependent stuff
  112.10  
  112.11  #ifdef TARGET_ARCH_x86
   113.1 --- a/src/share/vm/utilities/growableArray.hpp	Fri Aug 23 22:12:18 2013 +0100
   113.2 +++ b/src/share/vm/utilities/growableArray.hpp	Fri Aug 30 09:50:49 2013 +0100
   113.3 @@ -194,6 +194,7 @@
   113.4  
   113.5    void  clear()                 { _len = 0; }
   113.6    int   length() const          { return _len; }
   113.7 +  int   max_length() const      { return _max; }
   113.8    void  trunc_to(int l)         { assert(l <= _len,"cannot increase length"); _len = l; }
   113.9    bool  is_empty() const        { return _len == 0; }
  113.10    bool  is_nonempty() const     { return _len != 0; }
   114.1 --- a/src/share/vm/utilities/taskqueue.hpp	Fri Aug 23 22:12:18 2013 +0100
   114.2 +++ b/src/share/vm/utilities/taskqueue.hpp	Fri Aug 30 09:50:49 2013 +0100
   114.3 @@ -1,5 +1,5 @@
   114.4  /*
   114.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   114.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   114.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   114.8   *
   114.9   * This code is free software; you can redistribute it and/or modify it
  114.10 @@ -132,6 +132,8 @@
  114.11  }
  114.12  #endif // TASKQUEUE_STATS
  114.13  
  114.14 +// TaskQueueSuper collects functionality common to all GenericTaskQueue instances.
  114.15 +
  114.16  template <unsigned int N, MEMFLAGS F>
  114.17  class TaskQueueSuper: public CHeapObj<F> {
  114.18  protected:
  114.19 @@ -249,7 +251,36 @@
  114.20    TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
  114.21  };
  114.22  
  114.23 -
  114.24 +//
  114.25 +// GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double-
  114.26 +// ended-queue (deque), intended for use in work stealing. Queue operations
  114.27 +// are non-blocking.
  114.28 +//
  114.29 +// A queue owner thread performs push() and pop_local() operations on one end
  114.30 +// of the queue, while other threads may steal work using the pop_global()
  114.31 +// method.
  114.32 +//
  114.33 +// The main difference to the original algorithm is that this
  114.34 +// implementation allows wrap-around at the end of its allocated
  114.35 +// storage, which is an array.
  114.36 +//
  114.37 +// The original paper is:
  114.38 +//
  114.39 +// Arora, N. S., Blumofe, R. D., and Plaxton, C. G.
  114.40 +// Thread scheduling for multiprogrammed multiprocessors.
  114.41 +// Theory of Computing Systems 34, 2 (2001), 115-144.
  114.42 +//
  114.43 +// The following paper provides an correctness proof and an
  114.44 +// implementation for weakly ordered memory models including (pseudo-)
  114.45 +// code containing memory barriers for a Chase-Lev deque. Chase-Lev is
  114.46 +// similar to ABP, with the main difference that it allows resizing of the
  114.47 +// underlying storage:
  114.48 +//
  114.49 +// Le, N. M., Pop, A., Cohen A., and Nardell, F. Z.
  114.50 +// Correct and efficient work-stealing for weak memory models
  114.51 +// Proceedings of the 18th ACM SIGPLAN symposium on Principles and
  114.52 +// practice of parallel programming (PPoPP 2013), 69-80
  114.53 +//
  114.54  
  114.55  template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
  114.56  class GenericTaskQueue: public TaskQueueSuper<N, F> {
   115.1 --- a/src/share/vm/utilities/vmError.cpp	Fri Aug 23 22:12:18 2013 +0100
   115.2 +++ b/src/share/vm/utilities/vmError.cpp	Fri Aug 30 09:50:49 2013 +0100
   115.3 @@ -586,6 +586,13 @@
   115.4            while (count++ < StackPrintLimit) {
   115.5               fr.print_on_error(st, buf, sizeof(buf));
   115.6               st->cr();
   115.7 +             // Compiled code may use EBP register on x86 so it looks like
   115.8 +             // non-walkable C frame. Use frame.sender() for java frames.
   115.9 +             if (_thread && _thread->is_Java_thread() && fr.is_java_frame()) {
  115.10 +               RegisterMap map((JavaThread*)_thread, false); // No update
  115.11 +               fr = fr.sender(&map);
  115.12 +               continue;
  115.13 +             }
  115.14               if (os::is_first_C_frame(&fr)) break;
  115.15               fr = os::get_sender_for_C_frame(&fr);
  115.16            }
   116.1 --- a/test/Makefile	Fri Aug 23 22:12:18 2013 +0100
   116.2 +++ b/test/Makefile	Fri Aug 30 09:50:49 2013 +0100
   116.3 @@ -210,9 +210,7 @@
   116.4  	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help
   116.5  	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X
   116.6  	$(RM) $(PRODUCT_HOME)/jre/lib/*/client/classes.jsa
   116.7 -	$(RM) $(PRODUCT_HOME)/jre/lib/*/client/classes_g.jsa
   116.8  	$(RM) $(PRODUCT_HOME)/jre/bin/client/classes.jsa
   116.9 -	$(RM) $(PRODUCT_HOME)/jre/bin/client/classes_g.jsa
  116.10  	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -Xshare:dump
  116.11  
  116.12  PHONY_LIST += clienttest
   117.1 --- a/test/compiler/ciReplay/common.sh	Fri Aug 23 22:12:18 2013 +0100
   117.2 +++ b/test/compiler/ciReplay/common.sh	Fri Aug 30 09:50:49 2013 +0100
   117.3 @@ -89,7 +89,10 @@
   117.4  # $1 - initial error_code
   117.5  common_tests() {
   117.6      positive_test $1 "COMMON :: THE SAME FLAGS"
   117.7 -    positive_test `expr $1 + 1` "COMMON :: TIERED" -XX:+TieredCompilation
   117.8 +    if [ $tiered_available -eq 1 ]
   117.9 +    then
  117.10 +        positive_test `expr $1 + 1` "COMMON :: TIERED" -XX:+TieredCompilation
  117.11 +    fi
  117.12  }
  117.13  
  117.14  # $1 - initial error_code
  117.15 @@ -115,8 +118,11 @@
  117.16      then
  117.17          negative_test $1 "SERVER :: NON-TIERED" -XX:-TieredCompilation \
  117.18                  -server
  117.19 -        positive_test `expr $1 + 1` "SERVER :: TIERED" -XX:+TieredCompilation \
  117.20 -                -server
  117.21 +        if [ $tiered_available -eq 1 ]
  117.22 +        then
  117.23 +            positive_test `expr $1 + 1` "SERVER :: TIERED" -XX:+TieredCompilation \
  117.24 +                    -server
  117.25 +        fi
  117.26      fi
  117.27      nontiered_tests `expr $1 + 2` $client_level 
  117.28  }
  117.29 @@ -167,6 +173,9 @@
  117.30          grep -c Client`
  117.31  server_available=`${JAVA} ${TESTVMOPTS} -server -Xinternalversion 2>&1 | \
  117.32          grep -c Server`
  117.33 +tiered_available=`${JAVA} ${TESTVMOPTS} -XX:+TieredCompilation -XX:+PrintFlagsFinal -version | \
  117.34 +        grep TieredCompilation | \
  117.35 +        grep -c true`
  117.36  is_tiered=`${JAVA} ${TESTVMOPTS} -XX:+PrintFlagsFinal -version | \
  117.37          grep TieredCompilation | \
  117.38          grep -c true`
  117.39 @@ -177,6 +186,7 @@
  117.40  
  117.41  echo "client_available=$client_available"
  117.42  echo "server_available=$server_available"
  117.43 +echo "tiered_available=$tiered_available"
  117.44  echo "is_tiered=$is_tiered"
  117.45  
  117.46  # crash vm in compiler thread with generation replay data and 'small' dump-file
  117.47 @@ -186,6 +196,11 @@
  117.48      then
  117.49          # enable core dump
  117.50          ulimit -c unlimited
  117.51 +
  117.52 +        if [ $VM_OS = "solaris" ]
  117.53 +        then
  117.54 +            coreadm -p core $$
  117.55 +        fi
  117.56      fi
  117.57  
  117.58      cmd="${JAVA} ${TESTVMOPTS} $@ \
   118.1 --- a/test/compiler/whitebox/ClearMethodStateTest.java	Fri Aug 23 22:12:18 2013 +0100
   118.2 +++ b/test/compiler/whitebox/ClearMethodStateTest.java	Fri Aug 30 09:50:49 2013 +0100
   118.3 @@ -23,6 +23,7 @@
   118.4  
   118.5  /*
   118.6   * @test ClearMethodStateTest
   118.7 + * @bug 8006683 8007288 8022832
   118.8   * @library /testlibrary /testlibrary/whitebox
   118.9   * @build ClearMethodStateTest
  118.10   * @run main ClassFileInstaller sun.hotspot.WhiteBox
  118.11 @@ -59,16 +60,19 @@
  118.12          WHITE_BOX.clearMethodState(method);
  118.13          checkCompiled();
  118.14          WHITE_BOX.clearMethodState(method);
  118.15 -        WHITE_BOX.deoptimizeMethod(method);
  118.16 +        deoptimize();
  118.17          checkNotCompiled();
  118.18  
  118.19 -
  118.20 +        if (testCase.isOsr) {
  118.21 +            // part test isn't applicable for OSR test case
  118.22 +            return;
  118.23 +        }
  118.24          if (!TIERED_COMPILATION) {
  118.25              WHITE_BOX.clearMethodState(method);
  118.26              compile(COMPILE_THRESHOLD);
  118.27              checkCompiled();
  118.28  
  118.29 -            WHITE_BOX.deoptimizeMethod(method);
  118.30 +            deoptimize();
  118.31              checkNotCompiled();
  118.32              WHITE_BOX.clearMethodState(method);
  118.33  
   119.1 --- a/test/compiler/whitebox/CompilerWhiteBoxTest.java	Fri Aug 23 22:12:18 2013 +0100
   119.2 +++ b/test/compiler/whitebox/CompilerWhiteBoxTest.java	Fri Aug 30 09:50:49 2013 +0100
   119.3 @@ -44,8 +44,14 @@
   119.4      protected static int COMP_LEVEL_ANY = -1;
   119.5      /** {@code CompLevel::CompLevel_simple} -- C1 */
   119.6      protected static int COMP_LEVEL_SIMPLE = 1;
   119.7 +    /** {@code CompLevel::CompLevel_limited_profile} -- C1, invocation &amp; backedge counters */
   119.8 +    protected static int COMP_LEVEL_LIMITED_PROFILE = 2;
   119.9 +    /** {@code CompLevel::CompLevel_full_profile} -- C1, invocation &amp; backedge counters + mdo */
  119.10 +    protected static int COMP_LEVEL_FULL_PROFILE = 3;
  119.11      /** {@code CompLevel::CompLevel_full_optimization} -- C2 or Shark */
  119.12      protected static int COMP_LEVEL_FULL_OPTIMIZATION = 4;
  119.13 +    /** Maximal value for CompLeveL */
  119.14 +    protected static int COMP_LEVEL_MAX = COMP_LEVEL_FULL_OPTIMIZATION;
  119.15  
  119.16      /** Instance of WhiteBox */
  119.17      protected static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
  119.18 @@ -64,6 +70,21 @@
  119.19      /** Flag for verbose output, true if {@code -Dverbose} specified */
  119.20      protected static final boolean IS_VERBOSE
  119.21              = System.getProperty("verbose") != null;
  119.22 +    /** count of invocation to triger compilation */
  119.23 +    protected static final int THRESHOLD;
  119.24 +    /** count of invocation to triger OSR compilation */
  119.25 +    protected static final long BACKEDGE_THRESHOLD;
  119.26 +
  119.27 +    static {
  119.28 +        if (TIERED_COMPILATION) {
  119.29 +            THRESHOLD = 150000;
  119.30 +            BACKEDGE_THRESHOLD = 0xFFFFFFFFL;
  119.31 +        } else {
  119.32 +            THRESHOLD = COMPILE_THRESHOLD;
  119.33 +            BACKEDGE_THRESHOLD = COMPILE_THRESHOLD * Long.parseLong(getVMOption(
  119.34 +                    "OnStackReplacePercentage"));
  119.35 +        }
  119.36 +    }
  119.37  
  119.38      /**
  119.39       * Returns value of VM option.
  119.40 @@ -112,7 +133,7 @@
  119.41  
  119.42      /** tested method */
  119.43      protected final Executable method;
  119.44 -    private final Callable<Integer> callable;
  119.45 +    protected final TestCase testCase;
  119.46  
  119.47      /**
  119.48       * Constructor.
  119.49 @@ -123,7 +144,7 @@
  119.50          Objects.requireNonNull(testCase);
  119.51          System.out.println("TEST CASE:" + testCase.name());
  119.52          method = testCase.executable;
  119.53 -        callable = testCase.callable;
  119.54 +        this.testCase = testCase;
  119.55      }
  119.56  
  119.57      /**
  119.58 @@ -169,12 +190,18 @@
  119.59          if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
  119.60              throw new RuntimeException(method + " must not be in queue");
  119.61          }
  119.62 -        if (WHITE_BOX.isMethodCompiled(method)) {
  119.63 +        if (WHITE_BOX.isMethodCompiled(method, false)) {
  119.64              throw new RuntimeException(method + " must be not compiled");
  119.65          }
  119.66 -        if (WHITE_BOX.getMethodCompilationLevel(method) != 0) {
  119.67 +        if (WHITE_BOX.getMethodCompilationLevel(method, false) != 0) {
  119.68              throw new RuntimeException(method + " comp_level must be == 0");
  119.69          }
  119.70 +        if (WHITE_BOX.isMethodCompiled(method, true)) {
  119.71 +            throw new RuntimeException(method + " must be not osr_compiled");
  119.72 +        }
  119.73 +        if (WHITE_BOX.getMethodCompilationLevel(method, true) != 0) {
  119.74 +            throw new RuntimeException(method + " osr_comp_level must be == 0");
  119.75 +        }
  119.76      }
  119.77  
  119.78      /**
  119.79 @@ -192,14 +219,46 @@
  119.80                      method, System.currentTimeMillis() - start);
  119.81              return;
  119.82          }
  119.83 -        if (!WHITE_BOX.isMethodCompiled(method)) {
  119.84 -            throw new RuntimeException(method + " must be compiled");
  119.85 +        if (!WHITE_BOX.isMethodCompiled(method, testCase.isOsr)) {
  119.86 +            throw new RuntimeException(method + " must be "
  119.87 +                    + (testCase.isOsr ? "osr_" : "") + "compiled");
  119.88          }
  119.89 -        if (WHITE_BOX.getMethodCompilationLevel(method) == 0) {
  119.90 -            throw new RuntimeException(method + " comp_level must be != 0");
  119.91 +        if (WHITE_BOX.getMethodCompilationLevel(method, testCase.isOsr) == 0) {
  119.92 +            throw new RuntimeException(method
  119.93 +                    + (testCase.isOsr ? " osr_" : " ")
  119.94 +                    + "comp_level must be != 0");
  119.95          }
  119.96      }
  119.97  
  119.98 +    protected final void deoptimize() {
  119.99 +        WHITE_BOX.deoptimizeMethod(method, testCase.isOsr);
 119.100 +        if (testCase.isOsr) {
 119.101 +            WHITE_BOX.deoptimizeMethod(method, false);
 119.102 +        }
 119.103 +    }
 119.104 +
 119.105 +    protected final int getCompLevel() {
 119.106 +        return WHITE_BOX.getMethodCompilationLevel(method, testCase.isOsr);
 119.107 +    }
 119.108 +
 119.109 +    protected final boolean isCompilable() {
 119.110 +        return WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY,
 119.111 +                testCase.isOsr);
 119.112 +    }
 119.113 +
 119.114 +    protected final boolean isCompilable(int compLevel) {
 119.115 +        return WHITE_BOX.isMethodCompilable(method, compLevel, testCase.isOsr);
 119.116 +    }
 119.117 +
 119.118 +    protected final void makeNotCompilable() {
 119.119 +        WHITE_BOX.makeMethodNotCompilable(method, COMP_LEVEL_ANY,
 119.120 +                testCase.isOsr);
 119.121 +    }
 119.122 +
 119.123 +    protected final void makeNotCompilable(int compLevel) {
 119.124 +        WHITE_BOX.makeMethodNotCompilable(method, compLevel, testCase.isOsr);
 119.125 +    }
 119.126 +
 119.127      /**
 119.128       * Waits for completion of background compilation of {@linkplain #method}.
 119.129       */
 119.130 @@ -226,12 +285,18 @@
 119.131      protected final void printInfo() {
 119.132          System.out.printf("%n%s:%n", method);
 119.133          System.out.printf("\tcompilable:\t%b%n",
 119.134 -                WHITE_BOX.isMethodCompilable(method));
 119.135 +                WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY, false));
 119.136          System.out.printf("\tcompiled:\t%b%n",
 119.137 -                WHITE_BOX.isMethodCompiled(method));
 119.138 +                WHITE_BOX.isMethodCompiled(method, false));
 119.139          System.out.printf("\tcomp_level:\t%d%n",
 119.140 -                WHITE_BOX.getMethodCompilationLevel(method));
 119.141 -        System.out.printf("\tin_queue:\t%b%n",
 119.142 +                WHITE_BOX.getMethodCompilationLevel(method, false));
 119.143 +        System.out.printf("\tosr_compilable:\t%b%n",
 119.144 +                WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY, true));
 119.145 +        System.out.printf("\tosr_compiled:\t%b%n",
 119.146 +                WHITE_BOX.isMethodCompiled(method, true));
 119.147 +        System.out.printf("\tosr_comp_level:\t%d%n",
 119.148 +                WHITE_BOX.getMethodCompilationLevel(method, true));
 119.149 +         System.out.printf("\tin_queue:\t%b%n",
 119.150                  WHITE_BOX.isMethodQueuedForCompilation(method));
 119.151          System.out.printf("compile_queues_size:\t%d%n%n",
 119.152                  WHITE_BOX.getCompileQueuesSize());
 119.153 @@ -244,18 +309,22 @@
 119.154  
 119.155      /**
 119.156       * Tries to trigger compilation of {@linkplain #method} by call
 119.157 -     * {@linkplain #callable} enough times.
 119.158 +     * {@linkplain #testCase.callable} enough times.
 119.159       *
 119.160       * @return accumulated result
 119.161       * @see #compile(int)
 119.162       */
 119.163      protected final int compile() {
 119.164 -        return compile(Math.max(COMPILE_THRESHOLD, 150000));
 119.165 +        if (testCase.isOsr) {
 119.166 +            return compile(1);
 119.167 +        } else {
 119.168 +            return compile(THRESHOLD);
 119.169 +        }
 119.170      }
 119.171  
 119.172      /**
 119.173       * Tries to trigger compilation of {@linkplain #method} by call
 119.174 -     * {@linkplain #callable} specified times.
 119.175 +     * {@linkplain #testCase.callable} specified times.
 119.176       *
 119.177       * @param count invocation count
 119.178       * @return accumulated result
 119.179 @@ -265,7 +334,7 @@
 119.180          Integer tmp;
 119.181          for (int i = 0; i < count; ++i) {
 119.182              try {
 119.183 -                tmp = callable.call();
 119.184 +                tmp = testCase.callable.call();
 119.185              } catch (Exception e) {
 119.186                  tmp = null;
 119.187              }
 119.188 @@ -283,23 +352,36 @@
 119.189   */
 119.190  enum TestCase {
 119.191      /** constructor test case */
 119.192 -    CONSTRUCTOR_TEST(Helper.CONSTRUCTOR, Helper.CONSTRUCTOR_CALLABLE),
 119.193 +    CONSTRUCTOR_TEST(Helper.CONSTRUCTOR, Helper.CONSTRUCTOR_CALLABLE, false),
 119.194      /** method test case */
 119.195 -    METOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE),
 119.196 +    METOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE, false),
 119.197      /** static method test case */
 119.198 -    STATIC_TEST(Helper.STATIC, Helper.STATIC_CALLABLE);
 119.199 +    STATIC_TEST(Helper.STATIC, Helper.STATIC_CALLABLE, false),
 119.200 +
 119.201 +    /** OSR constructor test case */
 119.202 +    OSR_CONSTRUCTOR_TEST(Helper.OSR_CONSTRUCTOR,
 119.203 +            Helper.OSR_CONSTRUCTOR_CALLABLE, true),
 119.204 +     /** OSR method test case */
 119.205 +    OSR_METOD_TEST(Helper.OSR_METHOD, Helper.OSR_METHOD_CALLABLE, true),
 119.206 +    /** OSR static method test case */
 119.207 +    OSR_STATIC_TEST(Helper.OSR_STATIC, Helper.OSR_STATIC_CALLABLE, true);
 119.208  
 119.209      /** tested method */
 119.210      final Executable executable;
 119.211      /** object to invoke {@linkplain #executable} */
 119.212      final Callable<Integer> callable;
 119.213 +   /** flag for OSR test case */
 119.214 +    final boolean isOsr;
 119.215  
 119.216 -    private TestCase(Executable executable, Callable<Integer> callable) {
 119.217 +    private TestCase(Executable executable, Callable<Integer> callable,
 119.218 +            boolean isOsr) {
 119.219          this.executable = executable;
 119.220          this.callable = callable;
 119.221 +        this.isOsr = isOsr;
 119.222      }
 119.223  
 119.224      private static class Helper {
 119.225 +
 119.226          private static final Callable<Integer> CONSTRUCTOR_CALLABLE
 119.227                  = new Callable<Integer>() {
 119.228              @Override
 119.229 @@ -326,9 +408,39 @@
 119.230              }
 119.231          };
 119.232  
 119.233 +        private static final Callable<Integer> OSR_CONSTRUCTOR_CALLABLE
 119.234 +                = new Callable<Integer>() {
 119.235 +            @Override
 119.236 +            public Integer call() throws Exception {
 119.237 +                return new Helper(null).hashCode();
 119.238 +            }
 119.239 +        };
 119.240 +
 119.241 +        private static final Callable<Integer> OSR_METHOD_CALLABLE
 119.242 +                = new Callable<Integer>() {
 119.243 +            private final Helper helper = new Helper();
 119.244 +
 119.245 +            @Override
 119.246 +            public Integer call() throws Exception {
 119.247 +                return helper.osrMethod();
 119.248 +            }
 119.249 +        };
 119.250 +
 119.251 +        private static final Callable<Integer> OSR_STATIC_CALLABLE
 119.252 +                = new Callable<Integer>() {
 119.253 +            @Override
 119.254 +            public Integer call() throws Exception {
 119.255 +                return osrStaticMethod();
 119.256 +            }
 119.257 +        };
 119.258 +
 119.259 +
 119.260          private static final Constructor CONSTRUCTOR;
 119.261 +        private static final Constructor OSR_CONSTRUCTOR;
 119.262          private static final Method METHOD;
 119.263          private static final Method STATIC;
 119.264 +        private static final Method OSR_METHOD;
 119.265 +        private static final Method OSR_STATIC;
 119.266  
 119.267          static {
 119.268              try {
 119.269 @@ -338,17 +450,26 @@
 119.270                          "exception on getting method Helper.<init>(int)", e);
 119.271              }
 119.272              try {
 119.273 -                METHOD = Helper.class.getDeclaredMethod("method");
 119.274 +                OSR_CONSTRUCTOR = Helper.class.getDeclaredConstructor(
 119.275 +                        Object.class);
 119.276              } catch (NoSuchMethodException | SecurityException e) {
 119.277                  throw new RuntimeException(
 119.278 -                        "exception on getting method Helper.method()", e);
 119.279 +                        "exception on getting method Helper.<init>(Object)", e);
 119.280              }
 119.281 +            METHOD = getMethod("method");
 119.282 +            STATIC = getMethod("staticMethod");
 119.283 +            OSR_METHOD = getMethod("osrMethod");
 119.284 +            OSR_STATIC = getMethod("osrStaticMethod");
 119.285 +        }
 119.286 +
 119.287 +        private static Method getMethod(String name) {
 119.288              try {
 119.289 -                STATIC = Helper.class.getDeclaredMethod("staticMethod");
 119.290 +                return Helper.class.getDeclaredMethod(name);
 119.291              } catch (NoSuchMethodException | SecurityException e) {
 119.292                  throw new RuntimeException(
 119.293 -                        "exception on getting method Helper.staticMethod()", e);
 119.294 +                        "exception on getting method Helper." + name, e);
 119.295              }
 119.296 +
 119.297          }
 119.298  
 119.299          private static int staticMethod() {
 119.300 @@ -359,12 +480,39 @@
 119.301              return 42;
 119.302          }
 119.303  
 119.304 +        private static int osrStaticMethod() {
 119.305 +            int result = 0;
 119.306 +            for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) {
 119.307 +                result += staticMethod();
 119.308 +            }
 119.309 +            return result;
 119.310 +        }
 119.311 +
 119.312 +        private int osrMethod() {
 119.313 +            int result = 0;
 119.314 +            for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) {
 119.315 +                result += method();
 119.316 +            }
 119.317 +            return result;
 119.318 +        }
 119.319 +
 119.320          private final int x;
 119.321  
 119.322 +        // for method and OSR method test case
 119.323          public Helper() {
 119.324              x = 0;
 119.325          }
 119.326  
 119.327 +        // for OSR constructor test case
 119.328 +        private Helper(Object o) {
 119.329 +            int result = 0;
 119.330 +            for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) {
 119.331 +                result += method();
 119.332 +            }
 119.333 +            x = result;
 119.334 +        }
 119.335 +
 119.336 +        // for constructor test case
 119.337          private Helper(int x) {
 119.338              this.x = x;
 119.339          }
   120.1 --- a/test/compiler/whitebox/DeoptimizeAllTest.java	Fri Aug 23 22:12:18 2013 +0100
   120.2 +++ b/test/compiler/whitebox/DeoptimizeAllTest.java	Fri Aug 30 09:50:49 2013 +0100
   120.3 @@ -23,6 +23,7 @@
   120.4  
   120.5  /*
   120.6   * @test DeoptimizeAllTest
   120.7 + * @bug 8006683 8007288 8022832
   120.8   * @library /testlibrary /testlibrary/whitebox
   120.9   * @build DeoptimizeAllTest
  120.10   * @run main ClassFileInstaller sun.hotspot.WhiteBox
   121.1 --- a/test/compiler/whitebox/DeoptimizeMethodTest.java	Fri Aug 23 22:12:18 2013 +0100
   121.2 +++ b/test/compiler/whitebox/DeoptimizeMethodTest.java	Fri Aug 30 09:50:49 2013 +0100
   121.3 @@ -23,6 +23,7 @@
   121.4  
   121.5  /*
   121.6   * @test DeoptimizeMethodTest
   121.7 + * @bug 8006683 8007288 8022832
   121.8   * @library /testlibrary /testlibrary/whitebox
   121.9   * @build DeoptimizeMethodTest
  121.10   * @run main ClassFileInstaller sun.hotspot.WhiteBox
  121.11 @@ -54,7 +55,7 @@
  121.12      protected void test() throws Exception {
  121.13          compile();
  121.14          checkCompiled();
  121.15 -        WHITE_BOX.deoptimizeMethod(method);
  121.16 +        deoptimize();
  121.17          checkNotCompiled();
  121.18      }
  121.19  }
   122.1 --- a/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Fri Aug 23 22:12:18 2013 +0100
   122.2 +++ b/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Fri Aug 30 09:50:49 2013 +0100
   122.3 @@ -23,10 +23,11 @@
   122.4  
   122.5  /*
   122.6   * @test EnqueueMethodForCompilationTest
   122.7 + * @bug 8006683 8007288 8022832
   122.8   * @library /testlibrary /testlibrary/whitebox
   122.9   * @build EnqueueMethodForCompilationTest
  122.10   * @run main ClassFileInstaller sun.hotspot.WhiteBox
  122.11 - * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* EnqueueMethodForCompilationTest
  122.12 + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* EnqueueMethodForCompilationTest
  122.13   * @summary testing of WB::enqueueMethodForCompilation()
  122.14   * @author igor.ignatyev@oracle.com
  122.15   */
  122.16 @@ -50,7 +51,7 @@
  122.17  
  122.18          // method can not be compiled on level 'none'
  122.19          WHITE_BOX.enqueueMethodForCompilation(method, COMP_LEVEL_NONE);
  122.20 -        if (WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_NONE)) {
  122.21 +        if (isCompilable(COMP_LEVEL_NONE)) {
  122.22              throw new RuntimeException(method
  122.23                      + " is compilable at level COMP_LEVEL_NONE");
  122.24          }
  122.25 @@ -60,27 +61,29 @@
  122.26          WHITE_BOX.enqueueMethodForCompilation(method, COMP_LEVEL_ANY);
  122.27          checkNotCompiled();
  122.28  
  122.29 -        WHITE_BOX.enqueueMethodForCompilation(method, 5);
  122.30 -        if (!WHITE_BOX.isMethodCompilable(method, 5)) {
  122.31 -            checkNotCompiled();
  122.32 -            compile();
  122.33 -            checkCompiled();
  122.34 -        } else {
  122.35 -            checkCompiled();
  122.36 -        }
  122.37 -
  122.38 -        int compLevel = WHITE_BOX.getMethodCompilationLevel(method);
  122.39 -        WHITE_BOX.deoptimizeMethod(method);
  122.40 -        checkNotCompiled();
  122.41 -
  122.42 -        WHITE_BOX.enqueueMethodForCompilation(method, compLevel);
  122.43 -        checkCompiled();
  122.44 -        WHITE_BOX.deoptimizeMethod(method);
  122.45 +        // not existing comp level
  122.46 +        WHITE_BOX.enqueueMethodForCompilation(method, 42);
  122.47          checkNotCompiled();
  122.48  
  122.49          compile();
  122.50          checkCompiled();
  122.51 -        WHITE_BOX.deoptimizeMethod(method);
  122.52 +
  122.53 +        int compLevel = getCompLevel();
  122.54 +        int bci = WHITE_BOX.getMethodEntryBci(method);
  122.55 +        System.out.println("bci = " + bci);
  122.56 +        printInfo();
  122.57 +        deoptimize();
  122.58 +        printInfo();
  122.59 +        checkNotCompiled();
  122.60 +        printInfo();
  122.61 +        WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci);
  122.62 +        checkCompiled();
  122.63 +        deoptimize();
  122.64 +        checkNotCompiled();
  122.65 +
  122.66 +        compile();
  122.67 +        checkCompiled();
  122.68 +        deoptimize();
  122.69          checkNotCompiled();
  122.70      }
  122.71  }
   123.1 --- a/test/compiler/whitebox/IsMethodCompilableTest.java	Fri Aug 23 22:12:18 2013 +0100
   123.2 +++ b/test/compiler/whitebox/IsMethodCompilableTest.java	Fri Aug 30 09:50:49 2013 +0100
   123.3 @@ -23,11 +23,11 @@
   123.4  
   123.5  /*
   123.6   * @test IsMethodCompilableTest
   123.7 - * @bug 8007270
   123.8 + * @bug 8007270 8006683 8007288 8022832
   123.9   * @library /testlibrary /testlibrary/whitebox
  123.10   * @build IsMethodCompilableTest
  123.11   * @run main ClassFileInstaller sun.hotspot.WhiteBox
  123.12 - * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* IsMethodCompilableTest
  123.13 + * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* IsMethodCompilableTest
  123.14   * @summary testing of WB::isMethodCompilable()
  123.15   * @author igor.ignatyev@oracle.com
  123.16   */
  123.17 @@ -68,7 +68,7 @@
  123.18       */
  123.19      @Override
  123.20      protected void test() throws Exception {
  123.21 -        if (!WHITE_BOX.isMethodCompilable(method)) {
  123.22 +        if (!isCompilable()) {
  123.23              throw new RuntimeException(method + " must be compilable");
  123.24          }
  123.25          System.out.println("PerMethodRecompilationCutoff = "
  123.26 @@ -83,7 +83,8 @@
  123.27          for (long i = 0L, n = PER_METHOD_RECOMPILATION_CUTOFF - 1; i < n; ++i) {
  123.28              compileAndDeoptimize();
  123.29          }
  123.30 -        if (!WHITE_BOX.isMethodCompilable(method)) {
  123.31 +        if (!testCase.isOsr && !isCompilable()) {
  123.32 +            // in osr test case count of deopt maybe more than iterations
  123.33              throw new RuntimeException(method + " is not compilable after "
  123.34                      + (PER_METHOD_RECOMPILATION_CUTOFF - 1) + " iterations");
  123.35          }
  123.36 @@ -92,15 +93,16 @@
  123.37          // deoptimize 'PerMethodRecompilationCutoff' + 1 times
  123.38          long i;
  123.39          for (i = 0L; i < PER_METHOD_RECOMPILATION_CUTOFF
  123.40 -                && WHITE_BOX.isMethodCompilable(method); ++i) {
  123.41 +                && isCompilable(); ++i) {
  123.42              compileAndDeoptimize();
  123.43          }
  123.44 -        if (i != PER_METHOD_RECOMPILATION_CUTOFF) {
  123.45 +        if (!testCase.isOsr && i != PER_METHOD_RECOMPILATION_CUTOFF) {
  123.46 +            // in osr test case count of deopt maybe more than iterations
  123.47              throw new RuntimeException(method + " is not compilable after "
  123.48                      + i + " iterations, but must only after "
  123.49                      + PER_METHOD_RECOMPILATION_CUTOFF);
  123.50          }
  123.51 -        if (WHITE_BOX.isMethodCompilable(method)) {
  123.52 +        if (isCompilable()) {
  123.53              throw new RuntimeException(method + " is still compilable after "
  123.54                      + PER_METHOD_RECOMPILATION_CUTOFF + " iterations");
  123.55          }
  123.56 @@ -109,7 +111,7 @@
  123.57  
  123.58          // WB.clearMethodState() must reset no-compilable flags
  123.59          WHITE_BOX.clearMethodState(method);
  123.60 -        if (!WHITE_BOX.isMethodCompilable(method)) {
  123.61 +        if (!isCompilable()) {
  123.62              throw new RuntimeException(method
  123.63                      + " is not compilable after clearMethodState()");
  123.64          }
  123.65 @@ -120,6 +122,6 @@
  123.66      private void compileAndDeoptimize() throws Exception {
  123.67          compile();
  123.68          waitBackgroundCompilation();
  123.69 -        WHITE_BOX.deoptimizeMethod(method);
  123.70 +        deoptimize();
  123.71      }
  123.72  }
   124.1 --- a/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Fri Aug 23 22:12:18 2013 +0100
   124.2 +++ b/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Fri Aug 30 09:50:49 2013 +0100
   124.3 @@ -23,16 +23,16 @@
   124.4  
   124.5  /*
   124.6   * @test MakeMethodNotCompilableTest
   124.7 - * @bug 8012322
   124.8 + * @bug 8012322 8006683 8007288 8022832
   124.9   * @library /testlibrary /testlibrary/whitebox
  124.10   * @build MakeMethodNotCompilableTest
  124.11   * @run main ClassFileInstaller sun.hotspot.WhiteBox
  124.12 - * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* MakeMethodNotCompilableTest
  124.13 + * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* MakeMethodNotCompilableTest
  124.14   * @summary testing of WB::makeMethodNotCompilable()
  124.15   * @author igor.ignatyev@oracle.com
  124.16   */
  124.17  public class MakeMethodNotCompilableTest extends CompilerWhiteBoxTest {
  124.18 -
  124.19 +    private int bci;
  124.20      public static void main(String[] args) throws Exception {
  124.21          if (args.length == 0) {
  124.22              for (TestCase test : TestCase.values()) {
  124.23 @@ -63,25 +63,27 @@
  124.24      @Override
  124.25      protected void test() throws Exception {
  124.26          checkNotCompiled();
  124.27 -        if (!WHITE_BOX.isMethodCompilable(method)) {
  124.28 +        if (!isCompilable()) {
  124.29              throw new RuntimeException(method + " must be compilable");
  124.30          }
  124.31  
  124.32 +        bci = getBci();
  124.33 +
  124.34          if (TIERED_COMPILATION) {
  124.35              final int tierLimit = TIERED_STOP_AT_LEVEL + 1;
  124.36              for (int testedTier = 1; testedTier < tierLimit; ++testedTier) {
  124.37                  testTier(testedTier);
  124.38              }
  124.39              for (int testedTier = 1; testedTier < tierLimit; ++testedTier) {
  124.40 -                WHITE_BOX.makeMethodNotCompilable(method, testedTier);
  124.41 -                if (WHITE_BOX.isMethodCompilable(method, testedTier)) {
  124.42 +                makeNotCompilable(testedTier);
  124.43 +                if (isCompilable(testedTier)) {
  124.44                      throw new RuntimeException(method
  124.45                              + " must be not compilable at level" + testedTier);
  124.46                  }
  124.47 -                WHITE_BOX.enqueueMethodForCompilation(method, testedTier);
  124.48 +                WHITE_BOX.enqueueMethodForCompilation(method, testedTier, bci);
  124.49                  checkNotCompiled();
  124.50  
  124.51 -                if (!WHITE_BOX.isMethodCompilable(method)) {
  124.52 +                if (!isCompilable()) {
  124.53                      System.out.println(method
  124.54                              + " is not compilable after level " + testedTier);
  124.55                  }
  124.56 @@ -89,15 +91,20 @@
  124.57          } else {
  124.58              compile();
  124.59              checkCompiled();
  124.60 -            int compLevel = WHITE_BOX.getMethodCompilationLevel(method);
  124.61 -            WHITE_BOX.deoptimizeMethod(method);
  124.62 -            WHITE_BOX.makeMethodNotCompilable(method, compLevel);
  124.63 -            if (WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY)) {
  124.64 +            int compLevel = getCompLevel();
  124.65 +            deoptimize();
  124.66 +            makeNotCompilable(compLevel);
  124.67 +            if (isCompilable(COMP_LEVEL_ANY)) {
  124.68                  throw new RuntimeException(method
  124.69                          + " must be not compilable at CompLevel::CompLevel_any,"
  124.70                          + " after it is not compilable at " + compLevel);
  124.71              }
  124.72 +
  124.73              WHITE_BOX.clearMethodState(method);
  124.74 +            if (!isCompilable()) {
  124.75 +                throw new RuntimeException(method
  124.76 +                        + " is not compilable after clearMethodState()");
  124.77 +            }
  124.78  
  124.79              // nocompilable at opposite level must make no sense
  124.80              int oppositeLevel;
  124.81 @@ -106,16 +113,16 @@
  124.82              } else {
  124.83                oppositeLevel = COMP_LEVEL_SIMPLE;
  124.84              }
  124.85 -            WHITE_BOX.makeMethodNotCompilable(method, oppositeLevel);
  124.86 +            makeNotCompilable(oppositeLevel);
  124.87  
  124.88 -            if (!WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY)) {
  124.89 +            if (!isCompilable(COMP_LEVEL_ANY)) {
  124.90                    throw new RuntimeException(method
  124.91                          + " must be compilable at CompLevel::CompLevel_any,"
  124.92                          + " even it is not compilable at opposite level ["
  124.93                          + compLevel + "]");
  124.94              }
  124.95  
  124.96 -            if (!WHITE_BOX.isMethodCompilable(method, compLevel)) {
  124.97 +            if (!isCompilable(compLevel)) {
  124.98                    throw new RuntimeException(method
  124.99                          + " must be compilable at level " + compLevel
 124.100                          + ", even it is not compilable at opposite level ["
 124.101 @@ -126,24 +133,24 @@
 124.102          // clearing after tiered/non-tiered tests
 124.103          // WB.clearMethodState() must reset no-compilable flags
 124.104          WHITE_BOX.clearMethodState(method);
 124.105 -        if (!WHITE_BOX.isMethodCompilable(method)) {
 124.106 +        if (!isCompilable()) {
 124.107              throw new RuntimeException(method
 124.108                      + " is not compilable after clearMethodState()");
 124.109          }
 124.110  
 124.111 -        WHITE_BOX.makeMethodNotCompilable(method);
 124.112 -        if (WHITE_BOX.isMethodCompilable(method)) {
 124.113 +        makeNotCompilable();
 124.114 +        if (isCompilable()) {
 124.115              throw new RuntimeException(method + " must be not compilable");
 124.116          }
 124.117  
 124.118          compile();
 124.119          checkNotCompiled();
 124.120 -        if (WHITE_BOX.isMethodCompilable(method)) {
 124.121 +        if (isCompilable()) {
 124.122              throw new RuntimeException(method + " must be not compilable");
 124.123          }
 124.124          // WB.clearMethodState() must reset no-compilable flags
 124.125          WHITE_BOX.clearMethodState(method);
 124.126 -        if (!WHITE_BOX.isMethodCompilable(method)) {
 124.127 +        if (!isCompilable()) {
 124.128              throw new RuntimeException(method
 124.129                      + " is not compilable after clearMethodState()");
 124.130          }
 124.131 @@ -153,24 +160,23 @@
 124.132  
 124.133      // separately tests each tier
 124.134      private void testTier(int testedTier) {
 124.135 -        if (!WHITE_BOX.isMethodCompilable(method, testedTier)) {
 124.136 +        if (!isCompilable(testedTier)) {
 124.137              throw new RuntimeException(method
 124.138                      + " is not compilable on start");
 124.139          }
 124.140 -        WHITE_BOX.makeMethodNotCompilable(method, testedTier);
 124.141 +        makeNotCompilable(testedTier);
 124.142  
 124.143          // tests for all other tiers
 124.144          for (int anotherTier = 1, tierLimit = TIERED_STOP_AT_LEVEL + 1;
 124.145                      anotherTier < tierLimit; ++anotherTier) {
 124.146 -            boolean isCompilable = WHITE_BOX.isMethodCompilable(method,
 124.147 -                    anotherTier);
 124.148 +            boolean isCompilable = isCompilable(anotherTier);
 124.149              if (sameCompile(testedTier, anotherTier)) {
 124.150                  if (isCompilable) {
 124.151                      throw new RuntimeException(method
 124.152                              + " must be not compilable at level " + anotherTier
 124.153                              + ", if it is not compilable at " + testedTier);
 124.154                  }
 124.155 -                WHITE_BOX.enqueueMethodForCompilation(method, anotherTier);
 124.156 +                WHITE_BOX.enqueueMethodForCompilation(method, anotherTier, bci);
 124.157                  checkNotCompiled();
 124.158              } else {
 124.159                  if (!isCompilable) {
 124.160 @@ -179,12 +185,12 @@
 124.161                              + ", even if it is not compilable at "
 124.162                              + testedTier);
 124.163                  }
 124.164 -                WHITE_BOX.enqueueMethodForCompilation(method, anotherTier);
 124.165 +                WHITE_BOX.enqueueMethodForCompilation(method, anotherTier, bci);
 124.166                  checkCompiled();
 124.167 -                WHITE_BOX.deoptimizeMethod(method);
 124.168 +                deoptimize();
 124.169              }
 124.170  
 124.171 -            if (!WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY)) {
 124.172 +            if (!isCompilable(COMP_LEVEL_ANY)) {
 124.173                  throw new RuntimeException(method
 124.174                          + " must be compilable at 'CompLevel::CompLevel_any'"
 124.175                          + ", if it is not compilable only at " + testedTier);
 124.176 @@ -193,7 +199,7 @@
 124.177  
 124.178          // clear state after test
 124.179          WHITE_BOX.clearMethodState(method);
 124.180 -        if (!WHITE_BOX.isMethodCompilable(method, testedTier)) {
 124.181 +        if (!isCompilable(testedTier)) {
 124.182              throw new RuntimeException(method
 124.183                      + " is not compilable after clearMethodState()");
 124.184          }
 124.185 @@ -211,4 +217,13 @@
 124.186          }
 124.187          return false;
 124.188      }
 124.189 +
 124.190 +    private int getBci() {
 124.191 +        compile();
 124.192 +        checkCompiled();
 124.193 +        int result = WHITE_BOX.getMethodEntryBci(method);
 124.194 +        deoptimize();
 124.195 +        WHITE_BOX.clearMethodState(method);
 124.196 +        return result;
 124.197 +    }
 124.198  }
   125.1 --- a/test/compiler/whitebox/SetDontInlineMethodTest.java	Fri Aug 23 22:12:18 2013 +0100
   125.2 +++ b/test/compiler/whitebox/SetDontInlineMethodTest.java	Fri Aug 30 09:50:49 2013 +0100
   125.3 @@ -23,6 +23,7 @@
   125.4  
   125.5  /*
   125.6   * @test SetDontInlineMethodTest
   125.7 + * @bug 8006683 8007288 8022832
   125.8   * @library /testlibrary /testlibrary/whitebox
   125.9   * @build SetDontInlineMethodTest
  125.10   * @run main ClassFileInstaller sun.hotspot.WhiteBox
   126.1 --- a/test/compiler/whitebox/SetForceInlineMethodTest.java	Fri Aug 23 22:12:18 2013 +0100
   126.2 +++ b/test/compiler/whitebox/SetForceInlineMethodTest.java	Fri Aug 30 09:50:49 2013 +0100
   126.3 @@ -23,6 +23,7 @@
   126.4  
   126.5  /*
   126.6   * @test SetForceInlineMethodTest
   126.7 + * @bug 8006683 8007288 8022832
   126.8   * @library /testlibrary /testlibrary/whitebox
   126.9   * @build SetForceInlineMethodTest
  126.10   * @run main ClassFileInstaller sun.hotspot.WhiteBox
   127.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   127.2 +++ b/test/gc/metaspace/TestMetaspacePerfCounters.java	Fri Aug 30 09:50:49 2013 +0100
   127.3 @@ -0,0 +1,104 @@
   127.4 +/*
   127.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   127.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   127.7 + *
   127.8 + * This code is free software; you can redistribute it and/or modify it
   127.9 + * under the terms of the GNU General Public License version 2 only, as
  127.10 + * published by the Free Software Foundation.
  127.11 + *
  127.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  127.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  127.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  127.15 + * version 2 for more details (a copy is included in the LICENSE file that
  127.16 + * accompanied this code).
  127.17 + *
  127.18 + * You should have received a copy of the GNU General Public License version
  127.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  127.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  127.21 + *
  127.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  127.23 + * or visit www.oracle.com if you need additional information or have any
  127.24 + * questions.
  127.25 + */
  127.26 +
  127.27 +import java.util.List;
  127.28 +import java.util.ArrayList;
  127.29 +
  127.30 +import com.oracle.java.testlibrary.*;
  127.31 +import static com.oracle.java.testlibrary.Asserts.*;
  127.32 +
  127.33 +/* @test TestMetaspacePerfCounters
  127.34 + * @bug 8014659
  127.35 + * @library /testlibrary
  127.36 + * @summary Tests that performance counters for metaspace and compressed class
  127.37 + *          space exists and works.
  127.38 + *
  127.39 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
  127.40 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
  127.41 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  127.42 + *
  127.43 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
  127.44 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
  127.45 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  127.46 + */
  127.47 +public class TestMetaspacePerfCounters {
  127.48 +    public static Class fooClass = null;
  127.49 +    private static final String[] counterNames = {"minCapacity", "maxCapacity", "capacity", "used"};
  127.50 +
  127.51 +    public static void main(String[] args) throws Exception {
  127.52 +        String metaspace = "sun.gc.metaspace";
  127.53 +        String ccs = "sun.gc.compressedclassspace";
  127.54 +
  127.55 +        checkPerfCounters(metaspace);
  127.56 +
  127.57 +        if (isUsingCompressedClassPointers()) {
  127.58 +            checkPerfCounters(ccs);
  127.59 +            checkUsedIncreasesWhenLoadingClass(ccs);
  127.60 +        } else {
  127.61 +            checkEmptyPerfCounters(ccs);
  127.62 +            checkUsedIncreasesWhenLoadingClass(metaspace);
  127.63 +        }
  127.64 +    }
  127.65 +
  127.66 +    private static void checkPerfCounters(String ns) throws Exception {
  127.67 +        for (PerfCounter counter : countersInNamespace(ns)) {
  127.68 +            String msg = "Expected " + counter.getName() + " to be larger than 0";
  127.69 +            assertGT(counter.longValue(), 0L, msg);
  127.70 +        }
  127.71 +    }
  127.72 +
  127.73 +    private static void checkEmptyPerfCounters(String ns) throws Exception {
  127.74 +        for (PerfCounter counter : countersInNamespace(ns)) {
  127.75 +            String msg = "Expected " + counter.getName() + " to equal 0";
  127.76 +            assertEQ(counter.longValue(), 0L, msg);
  127.77 +        }
  127.78 +    }
  127.79 +
  127.80 +    private static void checkUsedIncreasesWhenLoadingClass(String ns) throws Exception {
  127.81 +        PerfCounter used = PerfCounters.findByName(ns + ".used");
  127.82 +
  127.83 +        long before = used.longValue();
  127.84 +        fooClass = compileAndLoad("Foo", "public class Foo { }");
  127.85 +        System.gc();
  127.86 +        long after = used.longValue();
  127.87 +
  127.88 +        assertGT(after, before);
  127.89 +    }
  127.90 +
  127.91 +    private static List<PerfCounter> countersInNamespace(String ns) throws Exception {
  127.92 +        List<PerfCounter> counters = new ArrayList<>();
  127.93 +        for (String name : counterNames) {
  127.94 +            counters.add(PerfCounters.findByName(ns + "." + name));
  127.95 +        }
  127.96 +        return counters;
  127.97 +    }
  127.98 +
  127.99 +    private static Class<?> compileAndLoad(String name, String source) throws Exception {
 127.100 +        byte[] byteCode = InMemoryJavaCompiler.compile(name, source);
 127.101 +        return ByteCodeLoader.load(name, byteCode);
 127.102 +    }
 127.103 +
 127.104 +    private static boolean isUsingCompressedClassPointers() {
 127.105 +        return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedKlassPointers");
 127.106 +    }
 127.107 +}
   128.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   128.2 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Fri Aug 30 09:50:49 2013 +0100
   128.3 @@ -0,0 +1,61 @@
   128.4 +/*
   128.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   128.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   128.7 + *
   128.8 + * This code is free software; you can redistribute it and/or modify it
   128.9 + * under the terms of the GNU General Public License version 2 only, as
  128.10 + * published by the Free Software Foundation.
  128.11 + *
  128.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  128.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  128.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  128.15 + * version 2 for more details (a copy is included in the LICENSE file that
  128.16 + * accompanied this code).
  128.17 + *
  128.18 + * You should have received a copy of the GNU General Public License version
  128.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  128.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  128.21 + *
  128.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  128.23 + * or visit www.oracle.com if you need additional information or have any
  128.24 + * questions.
  128.25 + */
  128.26 +
  128.27 +/*
  128.28 + * @test
  128.29 + * @bug 8003424
  128.30 + * @summary Testing UseCompressedKlassPointers with CDS
  128.31 + * @library /testlibrary
  128.32 + * @run main CDSCompressedKPtrs
  128.33 + */
  128.34 +
  128.35 +import com.oracle.java.testlibrary.*;
  128.36 +
  128.37 +public class CDSCompressedKPtrs {
  128.38 +  public static void main(String[] args) throws Exception {
  128.39 +    ProcessBuilder pb;
  128.40 +    if (Platform.is64bit()) {
  128.41 +      pb = ProcessTools.createJavaProcessBuilder(
  128.42 +        "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
  128.43 +        "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  128.44 +      OutputAnalyzer output = new OutputAnalyzer(pb.start());
  128.45 +      try {
  128.46 +        output.shouldContain("Loading classes to share");
  128.47 +        output.shouldHaveExitValue(0);
  128.48 +
  128.49 +        pb = ProcessTools.createJavaProcessBuilder(
  128.50 +          "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
  128.51 +          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
  128.52 +        output = new OutputAnalyzer(pb.start());
  128.53 +        output.shouldContain("sharing");
  128.54 +        output.shouldHaveExitValue(0);
  128.55 +
  128.56 +      } catch (RuntimeException e) {
  128.57 +        // Report 'passed' if CDS was turned off because we could not allocate
  128.58 +        // the klass metaspace at an address that would work with CDS.
  128.59 +        output.shouldContain("Could not allocate metaspace at a compatible address");
  128.60 +        output.shouldHaveExitValue(1);
  128.61 +      }
  128.62 +    }
  128.63 +  }
  128.64 +}
   129.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   129.2 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java	Fri Aug 30 09:50:49 2013 +0100
   129.3 @@ -0,0 +1,93 @@
   129.4 +/*
   129.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   129.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   129.7 + *
   129.8 + * This code is free software; you can redistribute it and/or modify it
   129.9 + * under the terms of the GNU General Public License version 2 only, as
  129.10 + * published by the Free Software Foundation.
  129.11 + *
  129.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  129.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  129.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  129.15 + * version 2 for more details (a copy is included in the LICENSE file that
  129.16 + * accompanied this code).
  129.17 + *
  129.18 + * You should have received a copy of the GNU General Public License version
  129.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  129.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  129.21 + *
  129.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  129.23 + * or visit www.oracle.com if you need additional information or have any
  129.24 + * questions.
  129.25 + */
  129.26 +
  129.27 +/*
  129.28 + * @test
  129.29 + * @bug 8003424
  129.30 + * @summary Test that cannot use CDS if UseCompressedKlassPointers is turned off.
  129.31 + * @library /testlibrary
  129.32 + * @run main CDSCompressedKPtrsError
  129.33 + */
  129.34 +
  129.35 +import com.oracle.java.testlibrary.*;
  129.36 +
  129.37 +public class CDSCompressedKPtrsError {
  129.38 +  public static void main(String[] args) throws Exception {
  129.39 +    ProcessBuilder pb;
  129.40 +    if (Platform.is64bit()) {
  129.41 +      pb = ProcessTools.createJavaProcessBuilder(
  129.42 +        "-XX:+UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
  129.43 +        "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  129.44 +      OutputAnalyzer output = new OutputAnalyzer(pb.start());
  129.45 +      try {
  129.46 +        output.shouldContain("Loading classes to share");
  129.47 +        output.shouldHaveExitValue(0);
  129.48 +
  129.49 +        pb = ProcessTools.createJavaProcessBuilder(
  129.50 +          "-XX:-UseCompressedKlassPointers", "-XX:-UseCompressedOops",
  129.51 +          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
  129.52 +        output = new OutputAnalyzer(pb.start());
  129.53 +        output.shouldContain("Unable to use shared archive");
  129.54 +        output.shouldHaveExitValue(0);
  129.55 +
  129.56 +        pb = ProcessTools.createJavaProcessBuilder(
  129.57 +          "-XX:-UseCompressedKlassPointers", "-XX:+UseCompressedOops",
  129.58 +          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
  129.59 +        output = new OutputAnalyzer(pb.start());
  129.60 +        output.shouldContain("Unable to use shared archive");
  129.61 +        output.shouldHaveExitValue(0);
  129.62 +
  129.63 +        pb = ProcessTools.createJavaProcessBuilder(
  129.64 +          "-XX:+UseCompressedKlassPointers", "-XX:-UseCompressedOops",
  129.65 +          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
  129.66 +        output = new OutputAnalyzer(pb.start());
  129.67 +        output.shouldContain("Unable to use shared archive");
  129.68 +        output.shouldHaveExitValue(0);
  129.69 +
  129.70 +      } catch (RuntimeException e) {
  129.71 +        output.shouldContain("Unable to use shared archive");
  129.72 +        output.shouldHaveExitValue(1);
  129.73 +      }
  129.74 +
  129.75 +      // Test bad options with -Xshare:dump.
  129.76 +      pb = ProcessTools.createJavaProcessBuilder(
  129.77 +        "-XX:-UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
  129.78 +        "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  129.79 +      output = new OutputAnalyzer(pb.start());
  129.80 +      output.shouldContain("Cannot dump shared archive");
  129.81 +
  129.82 +      pb = ProcessTools.createJavaProcessBuilder(
  129.83 +        "-XX:+UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
  129.84 +        "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  129.85 +      output = new OutputAnalyzer(pb.start());
  129.86 +      output.shouldContain("Cannot dump shared archive");
  129.87 +
  129.88 +      pb = ProcessTools.createJavaProcessBuilder(
  129.89 +        "-XX:-UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
  129.90 +        "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  129.91 +      output = new OutputAnalyzer(pb.start());
  129.92 +      output.shouldContain("Cannot dump shared archive");
  129.93 +
  129.94 +    }
  129.95 +  }
  129.96 +}
   130.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   130.2 +++ b/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Fri Aug 30 09:50:49 2013 +0100
   130.3 @@ -0,0 +1,76 @@
   130.4 +/*
   130.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   130.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   130.7 + *
   130.8 + * This code is free software; you can redistribute it and/or modify it
   130.9 + * under the terms of the GNU General Public License version 2 only, as
  130.10 + * published by the Free Software Foundation.
  130.11 + *
  130.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  130.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  130.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  130.15 + * version 2 for more details (a copy is included in the LICENSE file that
  130.16 + * accompanied this code).
  130.17 + *
  130.18 + * You should have received a copy of the GNU General Public License version
  130.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  130.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  130.21 + *
  130.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  130.23 + * or visit www.oracle.com if you need additional information or have any
  130.24 + * questions.
  130.25 + */
  130.26 +
  130.27 +/*
  130.28 + * @test
  130.29 + * @bug 8005933
  130.30 + * @summary Test that -Xshare:auto uses CDS when explicitly specified with -server.
  130.31 + * @library /testlibrary
  130.32 + * @run main XShareAuto
  130.33 + */
  130.34 +
  130.35 +import com.oracle.java.testlibrary.*;
  130.36 +
  130.37 +public class XShareAuto {
  130.38 +    public static void main(String[] args) throws Exception {
  130.39 +        if (!Platform.is64bit()) {
  130.40 +            System.out.println("ObjectAlignmentInBytes for CDS is only " +
  130.41 +                "supported on 64bit platforms; this plaform is " +
  130.42 +                System.getProperty("sun.arch.data.model"));
  130.43 +            System.out.println("Skipping the test");
  130.44 +            return;
  130.45 +        }
  130.46 +        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
  130.47 +            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
  130.48 +            "-Xshare:dump");
  130.49 +        OutputAnalyzer output = new OutputAnalyzer(pb.start());
  130.50 +        output.shouldContain("Loading classes to share");
  130.51 +        output.shouldHaveExitValue(0);
  130.52 +
  130.53 +        pb = ProcessTools.createJavaProcessBuilder(
  130.54 +            "-server", "-XX:+UnlockDiagnosticVMOptions",
  130.55 +            "-XX:SharedArchiveFile=./sample.jsa", "-version");
  130.56 +        output = new OutputAnalyzer(pb.start());
  130.57 +        output.shouldNotContain("sharing");
  130.58 +        output.shouldHaveExitValue(0);
  130.59 +
  130.60 +        pb = ProcessTools.createJavaProcessBuilder(
  130.61 +            "-server", "-Xshare:auto", "-XX:+UnlockDiagnosticVMOptions",
  130.62 +            "-XX:SharedArchiveFile=./sample.jsa", "-version");
  130.63 +        output = new OutputAnalyzer(pb.start());
  130.64 +        try {
  130.65 +            output.shouldContain("sharing");
  130.66 +            output.shouldHaveExitValue(0);
  130.67 +        } catch (RuntimeException e) {
  130.68 +            // If this failed then check that it would also be unable
  130.69 +            // to share even if -Xshare:on is specified.  If so, then
  130.70 +            // return a success status.
  130.71 +            pb = ProcessTools.createJavaProcessBuilder(
  130.72 +                "-server", "-Xshare:on", "-XX:+UnlockDiagnosticVMOptions",
  130.73 +                "-XX:SharedArchiveFile=./sample.jsa", "-version");
  130.74 +            output = new OutputAnalyzer(pb.start());
  130.75 +            output.shouldContain("Could not allocate metaspace at a compatible address");
  130.76 +            output.shouldHaveExitValue(1);
  130.77 +        }
  130.78 +    }
  130.79 +}
   131.1 --- a/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Fri Aug 23 22:12:18 2013 +0100
   131.2 +++ b/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Fri Aug 30 09:50:49 2013 +0100
   131.3 @@ -84,8 +84,7 @@
   131.4              // there is a chance such reservation will fail
   131.5              // If it does, it is NOT considered a failure of the feature,
   131.6              // rather a possible expected outcome, though not likely
   131.7 -            output.shouldContain(
   131.8 -                "Unable to reserve shared space at required address");
   131.9 +            output.shouldContain("Could not allocate metaspace at a compatible address");
  131.10              output.shouldHaveExitValue(1);
  131.11          }
  131.12      }
   132.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   132.2 +++ b/test/testlibrary/AssertsTest.java	Fri Aug 30 09:50:49 2013 +0100
   132.3 @@ -0,0 +1,237 @@
   132.4 +/*
   132.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   132.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   132.7 + *
   132.8 + * This code is free software; you can redistribute it and/or modify it
   132.9 + * under the terms of the GNU General Public License version 2 only, as
  132.10 + * published by the Free Software Foundation.
  132.11 + *
  132.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  132.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  132.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  132.15 + * version 2 for more details (a copy is included in the LICENSE file that
  132.16 + * accompanied this code).
  132.17 + *
  132.18 + * You should have received a copy of the GNU General Public License version
  132.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  132.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  132.21 + *
  132.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  132.23 + * or visit www.oracle.com if you need additional information or have any
  132.24 + * questions.
  132.25 + */
  132.26 +
  132.27 +import static com.oracle.java.testlibrary.Asserts.*;
  132.28 +
  132.29 +/* @test
  132.30 + * @summary Tests the different assertions in the Assert class
  132.31 + * @library /testlibrary
  132.32 + */
  132.33 +public class AssertsTest {
  132.34 +    private static class Foo implements Comparable<Foo> {
  132.35 +        final int id;
  132.36 +        public Foo(int id) {
  132.37 +            this.id = id;
  132.38 +        }
  132.39 +
  132.40 +        public int compareTo(Foo f) {
  132.41 +            return new Integer(id).compareTo(new Integer(f.id));
  132.42 +        }
  132.43 +    }
  132.44 +
  132.45 +    public static void main(String[] args) throws Exception {
  132.46 +        testLessThan();
  132.47 +        testLessThanOrEqual();
  132.48 +        testEquals();
  132.49 +        testGreaterThanOrEqual();
  132.50 +        testGreaterThan();
  132.51 +        testNotEquals();
  132.52 +        testNull();
  132.53 +        testNotNull();
  132.54 +        testTrue();
  132.55 +        testFalse();
  132.56 +    }
  132.57 +
  132.58 +    private static void testLessThan() throws Exception {
  132.59 +        expectPass(Assertion.LT, 1, 2);
  132.60 +
  132.61 +        expectFail(Assertion.LT, 2, 2);
  132.62 +        expectFail(Assertion.LT, 2, 1);
  132.63 +        expectFail(Assertion.LT, null, 2);
  132.64 +        expectFail(Assertion.LT, 2, null);
  132.65 +    }
  132.66 +
  132.67 +    private static void testLessThanOrEqual() throws Exception {
  132.68 +        expectPass(Assertion.LTE, 1, 2);
  132.69 +        expectPass(Assertion.LTE, 2, 2);
  132.70 +
  132.71 +        expectFail(Assertion.LTE, 3, 2);
  132.72 +        expectFail(Assertion.LTE, null, 2);
  132.73 +        expectFail(Assertion.LTE, 2, null);
  132.74 +    }
  132.75 +
  132.76 +    private static void testEquals() throws Exception {
  132.77 +        expectPass(Assertion.EQ, 1, 1);
  132.78 +        expectPass(Assertion.EQ, null, null);
  132.79 +
  132.80 +        Foo f1 = new Foo(1);
  132.81 +        expectPass(Assertion.EQ, f1, f1);
  132.82 +
  132.83 +        Foo f2 = new Foo(1);
  132.84 +        expectFail(Assertion.EQ, f1, f2);
  132.85 +        expectFail(Assertion.LTE, null, 2);
  132.86 +        expectFail(Assertion.LTE, 2, null);
  132.87 +    }
  132.88 +
  132.89 +    private static void testGreaterThanOrEqual() throws Exception {
  132.90 +        expectPass(Assertion.GTE, 1, 1);
  132.91 +        expectPass(Assertion.GTE, 2, 1);
  132.92 +
  132.93 +        expectFail(Assertion.GTE, 1, 2);
  132.94 +        expectFail(Assertion.GTE, null, 2);
  132.95 +        expectFail(Assertion.GTE, 2, null);
  132.96 +    }
  132.97 +
  132.98 +    private static void testGreaterThan() throws Exception {
  132.99 +        expectPass(Assertion.GT, 2, 1);
 132.100 +
 132.101 +        expectFail(Assertion.GT, 1, 1);
 132.102 +        expectFail(Assertion.GT, 1, 2);
 132.103 +        expectFail(Assertion.GT, null, 2);
 132.104 +        expectFail(Assertion.GT, 2, null);
 132.105 +    }
 132.106 +
 132.107 +    private static void testNotEquals() throws Exception {
 132.108 +        expectPass(Assertion.NE, null, 1);
 132.109 +        expectPass(Assertion.NE, 1, null);
 132.110 +
 132.111 +        Foo f1 = new Foo(1);
 132.112 +        Foo f2 = new Foo(1);
 132.113 +        expectPass(Assertion.NE, f1, f2);
 132.114 +
 132.115 +        expectFail(Assertion.NE, null, null);
 132.116 +        expectFail(Assertion.NE, f1, f1);
 132.117 +        expectFail(Assertion.NE, 1, 1);
 132.118 +    }
 132.119 +
 132.120 +    private static void testNull() throws Exception {
 132.121 +        expectPass(Assertion.NULL, null);
 132.122 +
 132.123 +        expectFail(Assertion.NULL, 1);
 132.124 +    }
 132.125 +
 132.126 +    private static void testNotNull() throws Exception {
 132.127 +        expectPass(Assertion.NOTNULL, 1);
 132.128 +
 132.129 +        expectFail(Assertion.NOTNULL, null);
 132.130 +    }
 132.131 +
 132.132 +    private static void testTrue() throws Exception {
 132.133 +        expectPass(Assertion.TRUE, true);
 132.134 +
 132.135 +        expectFail(Assertion.TRUE, false);
 132.136 +    }
 132.137 +
 132.138 +    private static void testFalse() throws Exception {
 132.139 +        expectPass(Assertion.FALSE, false);
 132.140 +
 132.141 +        expectFail(Assertion.FALSE, true);
 132.142 +    }
 132.143 +
 132.144 +    private static <T extends Comparable<T>> void expectPass(Assertion assertion, T ... args)
 132.145 +        throws Exception {
 132.146 +        Assertion.run(assertion, args);
 132.147 +    }
 132.148 +
 132.149 +    private static <T extends Comparable<T>> void expectFail(Assertion assertion, T ... args)
 132.150 +        throws Exception {
 132.151 +        try {
 132.152 +            Assertion.run(assertion, args);
 132.153 +        } catch (RuntimeException e) {
 132.154 +            return;
 132.155 +        }
 132.156 +        throw new Exception("Expected " + Assertion.format(assertion, (Object[]) args) +
 132.157 +                            " to throw a RuntimeException");
 132.158 +    }
 132.159 +
 132.160 +}
 132.161 +
 132.162 +enum Assertion {
 132.163 +    LT, LTE, EQ, GTE, GT, NE, NULL, NOTNULL, FALSE, TRUE;
 132.164 +
 132.165 +    public static <T extends Comparable<T>> void run(Assertion assertion, T ... args) {
 132.166 +        String msg = "Expected " + format(assertion, args) + " to pass";
 132.167 +        switch (assertion) {
 132.168 +            case LT:
 132.169 +                assertLessThan(args[0], args[1], msg);
 132.170 +                break;
 132.171 +            case LTE:
 132.172 +                assertLessThanOrEqual(args[0], args[1], msg);
 132.173 +                break;
 132.174 +            case EQ:
 132.175 +                assertEquals(args[0], args[1], msg);
 132.176 +                break;
 132.177 +            case GTE:
 132.178 +                assertGreaterThanOrEqual(args[0], args[1], msg);
 132.179 +                break;
 132.180 +            case GT:
 132.181 +                assertGreaterThan(args[0], args[1], msg);
 132.182 +                break;
 132.183 +            case NE:
 132.184 +                assertNotEquals(args[0], args[1], msg);
 132.185 +                break;
 132.186 +            case NULL:
 132.187 +                assertNull(args == null ? args : args[0], msg);
 132.188 +                break;
 132.189 +            case NOTNULL:
 132.190 +                assertNotNull(args == null ? args : args[0], msg);
 132.191 +                break;
 132.192 +            case FALSE:
 132.193 +                assertFalse((Boolean) args[0], msg);
 132.194 +                break;
 132.195 +            case TRUE:
 132.196 +                assertTrue((Boolean) args[0], msg);
 132.197 +                break;
 132.198 +            default:
 132.199 +                // do nothing
 132.200 +        }
 132.201 +    }
 132.202 +
 132.203 +    public static String format(Assertion assertion, Object ... args) {
 132.204 +        switch (assertion) {
 132.205 +            case LT:
 132.206 +                return asString("assertLessThan", args);
 132.207 +            case LTE:
 132.208 +                return asString("assertLessThanOrEqual", args);
 132.209 +            case EQ:
 132.210 +                return asString("assertEquals", args);
 132.211 +            case GTE:
 132.212 +                return asString("assertGreaterThanOrEquals", args);
 132.213 +            case GT:
 132.214 +                return asString("assertGreaterThan", args);
 132.215 +            case NE:
 132.216 +                return asString("assertNotEquals", args);
 132.217 +            case NULL:
 132.218 +                return asString("assertNull", args);
 132.219 +            case NOTNULL:
 132.220 +                return asString("assertNotNull", args);
 132.221 +            case FALSE:
 132.222 +                return asString("assertFalse", args);
 132.223 +            case TRUE:
 132.224 +                return asString("assertTrue", args);
 132.225 +            default:
 132.226 +                return "";
 132.227 +        }
 132.228 +    }
 132.229 +
 132.230 +    private static String asString(String assertion, Object ... args) {
 132.231 +        if (args == null) {
 132.232 +            return String.format("%s(null)", assertion);
 132.233 +        }
 132.234 +        if (args.length == 1) {
 132.235 +            return String.format("%s(%s)", assertion, args[0]);
 132.236 +        } else {
 132.237 +            return String.format("%s(%s, %s)", assertion, args[0], args[1]);
 132.238 +        }
 132.239 +    }
 132.240 +}
   133.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   133.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/Asserts.java	Fri Aug 30 09:50:49 2013 +0100
   133.3 @@ -0,0 +1,395 @@
   133.4 +/*
   133.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   133.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   133.7 + *
   133.8 + * This code is free software; you can redistribute it and/or modify it
   133.9 + * under the terms of the GNU General Public License version 2 only, as
  133.10 + * published by the Free Software Foundation.
  133.11 + *
  133.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  133.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  133.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  133.15 + * version 2 for more details (a copy is included in the LICENSE file that
  133.16 + * accompanied this code).
  133.17 + *
  133.18 + * You should have received a copy of the GNU General Public License version
  133.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  133.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  133.21 + *
  133.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  133.23 + * or visit www.oracle.com if you need additional information or have any
  133.24 + * questions.
  133.25 + */
  133.26 +
  133.27 +package com.oracle.java.testlibrary;
  133.28 +
  133.29 +/**
  133.30 + * Asserts that can be used for verifying assumptions in tests.
  133.31 + *
  133.32 + * An assertion will throw a {@link RuntimeException} if the assertion isn't
  133.33 + * valid.  All the asserts can be imported into a test by using a static
  133.34 + * import:
  133.35 + *
  133.36 + * <pre>
  133.37 + * {@code
  133.38 + * import static com.oracle.java.testlibrary.Asserts.*;
  133.39 + * }
  133.40 + *
  133.41 + * Always provide a message describing the assumption if the line number of the
  133.42 + * failing assertion isn't enough to understand why the assumption failed. For
  133.43 + * example, if the assertion is in a loop or in a method that is called
  133.44 + * multiple times, then the line number won't provide enough context to
  133.45 + * understand the failure.
  133.46 + * </pre>
  133.47 + */
  133.48 +public class Asserts {
  133.49 +
  133.50 +    /**
  133.51 +     * Shorthand for {@link #assertLessThan(T, T)}.
  133.52 +     *
  133.53 +     * @see #assertLessThan(T, T)
  133.54 +     */
  133.55 +    public static <T extends Comparable<T>> void assertLT(T lhs, T rhs) {
  133.56 +        assertLessThan(lhs, rhs);
  133.57 +    }
  133.58 +
  133.59 +    /**
  133.60 +     * Shorthand for {@link #assertLessThan(T, T, String)}.
  133.61 +     *
  133.62 +     * @see #assertLessThan(T, T, String)
  133.63 +     */
  133.64 +    public static <T extends Comparable<T>> void assertLT(T lhs, T rhs, String msg) {
  133.65 +        assertLessThan(lhs, rhs, msg);
  133.66 +    }
  133.67 +
  133.68 +    /**
  133.69 +     * Calls {@link #assertLessThan(T, T, String)} with a default message.
  133.70 +     *
  133.71 +     * @see #assertLessThan(T, T, String)
  133.72 +     */
  133.73 +    public static <T extends Comparable<T>> void assertLessThan(T lhs, T rhs) {
  133.74 +        String msg = "Expected that " + format(lhs) + " < " + format(rhs);
  133.75 +        assertLessThan(lhs, rhs, msg);
  133.76 +    }
  133.77 +
  133.78 +    /**
  133.79 +     * Asserts that {@code lhs} is less than {@code rhs}.
  133.80 +     *
  133.81 +     * @param lhs The left hand side of the comparison.
  133.82 +     * @param rhs The right hand side of the comparison.
  133.83 +     * @param msg A description of the assumption.
  133.84 +     * @throws RuntimeException if the assertion isn't valid.
  133.85 +     */
  133.86 +    public static <T extends Comparable<T>>void assertLessThan(T lhs, T rhs, String msg) {
  133.87 +        assertTrue(compare(lhs, rhs, msg) < 0, msg);
  133.88 +    }
  133.89 +
  133.90 +    /**
  133.91 +     * Shorthand for {@link #assertLessThanOrEqual(T, T)}.
  133.92 +     *
  133.93 +     * @see #assertLessThanOrEqual(T, T)
  133.94 +     */
  133.95 +    public static <T extends Comparable<T>> void assertLTE(T lhs, T rhs) {
  133.96 +        assertLessThanOrEqual(lhs, rhs);
  133.97 +    }
  133.98 +
  133.99 +    /**
 133.100 +     * Shorthand for {@link #assertLessThanOrEqual(T, T, String)}.
 133.101 +     *
 133.102 +     * @see #assertLessThanOrEqual(T, T, String)
 133.103 +     */
 133.104 +    public static <T extends Comparable<T>> void assertLTE(T lhs, T rhs, String msg) {
 133.105 +        assertLessThanOrEqual(lhs, rhs, msg);
 133.106 +    }
 133.107 +
 133.108 +    /**
 133.109 +     * Calls {@link #assertLessThanOrEqual(T, T, String)} with a default message.
 133.110 +     *
 133.111 +     * @see #assertLessThanOrEqual(T, T, String)
 133.112 +     */
 133.113 +    public static <T extends Comparable<T>> void assertLessThanOrEqual(T lhs, T rhs) {
 133.114 +        String msg = "Expected that " + format(lhs) + " <= " + format(rhs);
 133.115 +        assertLessThanOrEqual(lhs, rhs, msg);
 133.116 +    }
 133.117 +
 133.118 +    /**
 133.119 +     * Asserts that {@code lhs} is less than or equal to {@code rhs}.
 133.120 +     *
 133.121 +     * @param lhs The left hand side of the comparison.
 133.122 +     * @param rhs The right hand side of the comparison.
 133.123 +     * @param msg A description of the assumption.
 133.124 +     * @throws RuntimeException if the assertion isn't valid.
 133.125 +     */
 133.126 +    public static <T extends Comparable<T>> void assertLessThanOrEqual(T lhs, T rhs, String msg) {
 133.127 +        assertTrue(compare(lhs, rhs, msg) <= 0, msg);
 133.128 +    }
 133.129 +
 133.130 +    /**
 133.131 +     * Shorthand for {@link #assertEquals(T, T)}.
 133.132 +     *
 133.133 +     * @see #assertEquals(T, T)
 133.134 +     */
 133.135 +    public static void assertEQ(Object lhs, Object rhs) {
 133.136 +        assertEquals(lhs, rhs);
 133.137 +    }
 133.138 +
 133.139 +    /**
 133.140 +     * Shorthand for {@link #assertEquals(T, T, String)}.
 133.141 +     *
 133.142 +     * @see #assertEquals(T, T, String)
 133.143 +     */
 133.144 +    public static void assertEQ(Object lhs, Object rhs, String msg) {
 133.145 +        assertEquals(lhs, rhs, msg);
 133.146 +    }
 133.147 +
 133.148 +    /**
 133.149 +     * Calls {@link #assertEquals(T, T, String)} with a default message.
 133.150 +     *
 133.151 +     * @see #assertEquals(T, T, String)
 133.152 +     */
 133.153 +    public static void assertEquals(Object lhs, Object rhs) {
 133.154 +        String msg = "Expected " + format(lhs) + " to equal " + format(rhs);
 133.155 +        assertEquals(lhs, rhs, msg);
 133.156 +    }
 133.157 +
 133.158 +    /**
 133.159 +     * Asserts that {@code lhs} is equal to {@code rhs}.
 133.160 +     *
 133.161 +     * @param lhs The left hand side of the comparison.
 133.162 +     * @param rhs The right hand side of the comparison.
 133.163 +     * @param msg A description of the assumption.
 133.164 +     * @throws RuntimeException if the assertion isn't valid.
 133.165 +     */
 133.166 +    public static void assertEquals(Object lhs, Object rhs, String msg) {
 133.167 +        if (lhs == null) {
 133.168 +            if (rhs != null) {
 133.169 +                error(msg);
 133.170 +            }
 133.171 +        } else {
 133.172 +            assertTrue(lhs.equals(rhs), msg);
 133.173 +        }
 133.174 +    }
 133.175 +
 133.176 +    /**
 133.177 +     * Shorthand for {@link #assertGreaterThanOrEqual(T, T)}.
 133.178 +     *
 133.179 +     * @see #assertGreaterThanOrEqual(T, T)
 133.180 +     */
 133.181 +    public static <T extends Comparable<T>> void assertGTE(T lhs, T rhs) {
 133.182 +        assertGreaterThanOrEqual(lhs, rhs);
 133.183 +    }
 133.184 +
 133.185 +    /**
 133.186 +     * Shorthand for {@link #assertGreaterThanOrEqual(T, T, String)}.
 133.187 +     *
 133.188 +     * @see #assertGreaterThanOrEqual(T, T, String)
 133.189 +     */
 133.190 +    public static <T extends Comparable<T>> void assertGTE(T lhs, T rhs, String msg) {
 133.191 +        assertGreaterThanOrEqual(lhs, rhs, msg);
 133.192 +    }
 133.193 +
 133.194 +    /**
 133.195 +     * Calls {@link #assertGreaterThanOrEqual(T, T, String)} with a default message.
 133.196 +     *
 133.197 +     * @see #assertGreaterThanOrEqual(T, T, String)
 133.198 +     */
 133.199 +    public static <T extends Comparable<T>> void assertGreaterThanOrEqual(T lhs, T rhs) {
 133.200 +        String msg = "Expected that " + format(lhs) + " >= " + format(rhs);
 133.201 +        assertGreaterThanOrEqual(lhs, rhs, msg);
 133.202 +    }
 133.203 +
 133.204 +    /**
 133.205 +     * Asserts that {@code lhs} is greater than or equal to {@code rhs}.
 133.206 +     *
 133.207 +     * @param lhs The left hand side of the comparison.
 133.208 +     * @param rhs The right hand side of the comparison.
 133.209 +     * @param msg A description of the assumption.
 133.210 +     * @throws RuntimeException if the assertion isn't valid.
 133.211 +     */
 133.212 +    public static <T extends Comparable<T>> void assertGreaterThanOrEqual(T lhs, T rhs, String msg) {
 133.213 +        assertTrue(compare(lhs, rhs, msg) >= 0, msg);
 133.214 +    }
 133.215 +
 133.216 +    /**
 133.217 +     * Shorthand for {@link #assertGreaterThan(T, T)}.
 133.218 +     *
 133.219 +     * @see #assertGreaterThan(T, T)
 133.220 +     */
 133.221 +    public static <T extends Comparable<T>> void assertGT(T lhs, T rhs) {
 133.222 +        assertGreaterThan(lhs, rhs);
 133.223 +    }
 133.224 +
 133.225 +    /**
 133.226 +     * Shorthand for {@link #assertGreaterThan(T, T, String)}.
 133.227 +     *
 133.228 +     * @see #assertGreaterThan(T, T, String)
 133.229 +     */
 133.230 +    public static <T extends Comparable<T>> void assertGT(T lhs, T rhs, String msg) {
 133.231 +        assertGreaterThan(lhs, rhs, msg);
 133.232 +    }
 133.233 +
 133.234 +    /**
 133.235 +     * Calls {@link #assertGreaterThan(T, T, String)} with a default message.
 133.236 +     *
 133.237 +     * @see #assertGreaterThan(T, T, String)
 133.238 +     */
 133.239 +    public static <T extends Comparable<T>> void assertGreaterThan(T lhs, T rhs) {
 133.240 +        String msg = "Expected that " + format(lhs) + " > " + format(rhs);
 133.241 +        assertGreaterThan(lhs, rhs, msg);
 133.242 +    }
 133.243 +
 133.244 +    /**
 133.245 +     * Asserts that {@code lhs} is greater than {@code rhs}.
 133.246 +     *
 133.247 +     * @param lhs The left hand side of the comparison.
 133.248 +     * @param rhs The right hand side of the comparison.
 133.249 +     * @param msg A description of the assumption.
 133.250 +     * @throws RuntimeException if the assertion isn't valid.
 133.251 +     */
 133.252 +    public static <T extends Comparable<T>> void assertGreaterThan(T lhs, T rhs, String msg) {
 133.253 +        assertTrue(compare(lhs, rhs, msg) > 0, msg);
 133.254 +    }
 133.255 +
 133.256 +    /**
 133.257 +     * Shorthand for {@link #assertNotEquals(T, T)}.
 133.258 +     *
 133.259 +     * @see #assertNotEquals(T, T)
 133.260 +     */
 133.261 +    public static void assertNE(Object lhs, Object rhs) {
 133.262 +        assertNotEquals(lhs, rhs);
 133.263 +    }
 133.264 +
 133.265 +    /**
 133.266 +     * Shorthand for {@link #assertNotEquals(T, T, String)}.
 133.267 +     *
 133.268 +     * @see #assertNotEquals(T, T, String)
 133.269 +     */
 133.270 +    public static void assertNE(Object lhs, Object rhs, String msg) {
 133.271 +        assertNotEquals(lhs, rhs, msg);
 133.272 +    }
 133.273 +
 133.274 +    /**
 133.275 +     * Calls {@link #assertNotEquals(T, T, String)} with a default message.
 133.276 +     *
 133.277 +     * @see #assertNotEquals(T, T, String)
 133.278 +     */
 133.279 +    public static void assertNotEquals(Object lhs, Object rhs) {
 133.280 +        String msg = "Expected " + format(lhs) + " to not equal " + format(rhs);
 133.281 +        assertNotEquals(lhs, rhs, msg);
 133.282 +    }
 133.283 +
 133.284 +    /**
 133.285 +     * Asserts that {@code lhs} is not equal to {@code rhs}.
 133.286 +     *
 133.287 +     * @param lhs The left hand side of the comparison.
 133.288 +     * @param rhs The right hand side of the comparison.
 133.289 +     * @param msg A description of the assumption.
 133.290 +     * @throws RuntimeException if the assertion isn't valid.
 133.291 +     */
 133.292 +    public static void assertNotEquals(Object lhs, Object rhs, String msg) {
 133.293 +        if (lhs == null) {
 133.294 +            if (rhs == null) {
 133.295 +                error(msg);
 133.296 +            }
 133.297 +        } else {
 133.298 +            assertFalse(lhs.equals(rhs), msg);
 133.299 +        }
 133.300 +    }
 133.301 +
 133.302 +    /**
 133.303 +     * Calls {@link #assertNull(Object, String)} with a default message.
 133.304 +     *
 133.305 +     * @see #assertNull(Object, String)
 133.306 +     */
 133.307 +    public static void assertNull(Object o) {
 133.308 +        assertNull(o, "Expected " + format(o) + " to be null");
 133.309 +    }
 133.310 +
 133.311 +    /**
 133.312 +     * Asserts that {@code o} is null.
 133.313 +     *
 133.314 +     * @param o The reference assumed to be null.
 133.315 +     * @param msg A description of the assumption.
 133.316 +     * @throws RuntimeException if the assertion isn't valid.
 133.317 +     */
 133.318 +    public static void assertNull(Object o, String msg) {
 133.319 +        assertEquals(o, null, msg);
 133.320 +    }
 133.321 +
 133.322 +    /**
 133.323 +     * Calls {@link #assertNotNull(Object, String)} with a default message.
 133.324 +     *
 133.325 +     * @see #assertNotNull(Object, String)
 133.326 +     */
 133.327 +    public static void assertNotNull(Object o) {
 133.328 +        assertNotNull(o, "Expected non null reference");
 133.329 +    }
 133.330 +
 133.331 +    /**
 133.332 +     * Asserts that {@code o} is <i>not</i> null.
 133.333 +     *
 133.334 +     * @param o The reference assumed <i>not</i> to be null,
 133.335 +     * @param msg A description of the assumption.
 133.336 +     * @throws RuntimeException if the assertion isn't valid.
 133.337 +     */
 133.338 +    public static void assertNotNull(Object o, String msg) {
 133.339 +        assertNotEquals(o, null, msg);
 133.340 +    }
 133.341 +
 133.342 +    /**
 133.343 +     * Calls {@link #assertFalse(boolean, String)} with a default message.
 133.344 +     *
 133.345 +     * @see #assertFalse(boolean, String)
 133.346 +     */
 133.347 +    public static void assertFalse(boolean value) {
 133.348 +        assertFalse(value, "Expected value to be false");
 133.349 +    }
 133.350 +
 133.351 +    /**
 133.352 +     * Asserts that {@code value} is {@code false}.
 133.353 +     *
 133.354 +     * @param value The value assumed to be false.
 133.355 +     * @param msg A description of the assumption.
 133.356 +     * @throws RuntimeException if the assertion isn't valid.
 133.357 +     */
 133.358 +    public static void assertFalse(boolean value, String msg) {
 133.359 +        assertTrue(!value, msg);
 133.360 +    }
 133.361 +
 133.362 +    /**
 133.363 +     * Calls {@link #assertTrue(boolean, String)} with a default message.
 133.364 +     *
 133.365 +     * @see #assertTrue(boolean, String)
 133.366 +     */
 133.367 +    public static void assertTrue(boolean value) {
 133.368 +        assertTrue(value, "Expected value to be true");
 133.369 +    }
 133.370 +
 133.371 +    /**
 133.372 +     * Asserts that {@code value} is {@code true}.
 133.373 +     *
 133.374 +     * @param value The value assumed to be true.
 133.375 +     * @param msg A description of the assumption.
 133.376 +     * @throws RuntimeException if the assertion isn't valid.
 133.377 +     */
 133.378 +    public static void assertTrue(boolean value, String msg) {
 133.379 +        if (!value) {
 133.380 +            error(msg);
 133.381 +        }
 133.382 +    }
 133.383 +
 133.384 +    private static <T extends Comparable<T>> int compare(T lhs, T rhs, String msg) {
 133.385 +        assertNotNull(lhs, msg);
 133.386 +        assertNotNull(rhs, msg);
 133.387 +        return lhs.compareTo(rhs);
 133.388 +    }
 133.389 +
 133.390 +    private static String format(Object o) {
 133.391 +        return o == null? "null" : o.toString();
 133.392 +    }
 133.393 +
 133.394 +    private static void error(String msg) {
 133.395 +        throw new RuntimeException(msg);
 133.396 +    }
 133.397 +
 133.398 +}
   134.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   134.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/ByteCodeLoader.java	Fri Aug 30 09:50:49 2013 +0100
   134.3 @@ -0,0 +1,74 @@
   134.4 +/*
   134.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   134.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   134.7 + *
   134.8 + * This code is free software; you can redistribute it and/or modify it
   134.9 + * under the terms of the GNU General Public License version 2 only, as
  134.10 + * published by the Free Software Foundation.
  134.11 + *
  134.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  134.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  134.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  134.15 + * version 2 for more details (a copy is included in the LICENSE file that
  134.16 + * accompanied this code).
  134.17 + *
  134.18 + * You should have received a copy of the GNU General Public License version
  134.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  134.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  134.21 + *
  134.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  134.23 + * or visit www.oracle.com if you need additional information or have any
  134.24 + * questions.
  134.25 + */
  134.26 +
  134.27 +package com.oracle.java.testlibrary;
  134.28 +
  134.29 +import java.security.SecureClassLoader;
  134.30 +
  134.31 +/**
  134.32 + * {@code ByteCodeLoader} can be used for easy loading of byte code already
  134.33 + * present in memory.
  134.34 + *
  134.35 + * {@code InMemoryCompiler} can be used for compiling source code in a string
  134.36 + * into byte code, which then can be loaded with {@code ByteCodeLoader}.
  134.37 + *
  134.38 + * @see InMemoryCompiler
  134.39 + */
  134.40 +public class ByteCodeLoader extends SecureClassLoader {
  134.41 +    private final String className;
  134.42 +    private final byte[] byteCode;
  134.43 +
  134.44 +    /**
  134.45 +     * Creates a new {@code ByteCodeLoader} ready to load a class with the
  134.46 +     * given name and the given byte code.
  134.47 +     *
  134.48 +     * @param className The name of the class
  134.49 +     * @param byteCode The byte code of the class
  134.50 +     */
  134.51 +    public ByteCodeLoader(String className, byte[] byteCode) {
  134.52 +        this.className = className;
  134.53 +        this.byteCode = byteCode;
  134.54 +    }
  134.55 +
  134.56 +    @Override
  134.57 +    protected Class<?> findClass(String name) throws ClassNotFoundException {
  134.58 +        if (!name.equals(className)) {
  134.59 +            throw new ClassNotFoundException(name);
  134.60 +        }
  134.61 +
  134.62 +        return defineClass(name, byteCode, 0, byteCode.length);
  134.63 +    }
  134.64 +
  134.65 +    /**
  134.66 +     * Utility method for creating a new {@code ByteCodeLoader} and then
  134.67 +     * directly load the given byte code.
  134.68 +     *
  134.69 +     * @param className The name of the class
  134.70 +     * @param byteCode The byte code for the class
  134.71 +     * @throws ClassNotFoundException if the class can't be loaded
  134.72 +     * @return A {@see Class} object representing the class
  134.73 +     */
  134.74 +    public static Class<?> load(String className, byte[] byteCode) throws ClassNotFoundException {
  134.75 +        return new ByteCodeLoader(className, byteCode).loadClass(className);
  134.76 +    }
  134.77 +}
   135.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   135.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/InMemoryJavaCompiler.java	Fri Aug 30 09:50:49 2013 +0100
   135.3 @@ -0,0 +1,154 @@
   135.4 +/*
   135.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   135.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   135.7 + *
   135.8 + * This code is free software; you can redistribute it and/or modify it
   135.9 + * under the terms of the GNU General Public License version 2 only, as
  135.10 + * published by the Free Software Foundation.
  135.11 + *
  135.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  135.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  135.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  135.15 + * version 2 for more details (a copy is included in the LICENSE file that
  135.16 + * accompanied this code).
  135.17 + *
  135.18 + * You should have received a copy of the GNU General Public License version
  135.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  135.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  135.21 + *
  135.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  135.23 + * or visit www.oracle.com if you need additional information or have any
  135.24 + * questions.
  135.25 + */
  135.26 +
  135.27 +package com.oracle.java.testlibrary;
  135.28 +
  135.29 +import java.io.ByteArrayOutputStream;
  135.30 +import java.io.IOException;
  135.31 +import java.io.OutputStream;
  135.32 +
  135.33 +import java.net.URI;
  135.34 +import java.util.Arrays;
  135.35 +
  135.36 +import javax.tools.ForwardingJavaFileManager;
  135.37 +import javax.tools.ForwardingJavaFileManager;
  135.38 +import javax.tools.FileObject;
  135.39 +import javax.tools.JavaCompiler;
  135.40 +import javax.tools.JavaCompiler.CompilationTask;
  135.41 +import javax.tools.JavaFileManager;
  135.42 +import javax.tools.JavaFileObject;
  135.43 +import javax.tools.JavaFileObject.Kind;
  135.44 +import javax.tools.SimpleJavaFileObject;
  135.45 +import javax.tools.ToolProvider;
  135.46 +
  135.47 +/**
  135.48 + * {@code InMemoryJavaCompiler} can be used for compiling a {@link
  135.49 + * CharSequence} to a {@code byte[]}.
  135.50 + *
  135.51 + * The compiler will not use the file system at all, instead using a {@link
  135.52 + * ByteArrayOutputStream} for storing the byte code. For the source code, any
  135.53 + * kind of {@link CharSequence} can be used, e.g. {@link String}, {@link
  135.54 + * StringBuffer} or {@link StringBuilder}.
  135.55 + *
  135.56 + * The {@code InMemoryCompiler} can easily be used together with a {@code
  135.57 + * ByteClassLoader} to easily compile and load source code in a {@link String}:
  135.58 + *
  135.59 + * <pre>
  135.60 + * {@code
  135.61 + * import com.oracle.java.testlibrary.InMemoryJavaCompiler;
  135.62 + * import com.oracle.java.testlibrary.ByteClassLoader;
  135.63 + *
  135.64 + * class Example {
  135.65 + *     public static void main(String[] args) {
  135.66 + *         String className = "Foo";
  135.67 + *         String sourceCode = "public class " + className + " {" +
  135.68 + *                             "    public void bar() {" +
  135.69 + *                             "        System.out.println("Hello from bar!");" +
  135.70 + *                             "    }" +
  135.71 + *                             "}";
  135.72 + *         byte[] byteCode = InMemoryJavaCompiler.compile(className, sourceCode);
  135.73 + *         Class fooClass = ByteClassLoader.load(className, byteCode);
  135.74 + *     }
  135.75 + * }
  135.76 + * }
  135.77 + * </pre>
  135.78 + */
  135.79 +public class InMemoryJavaCompiler {
  135.80 +    private static class MemoryJavaFileObject extends SimpleJavaFileObject {
  135.81 +        private final String className;
  135.82 +        private final CharSequence sourceCode;
  135.83 +        private final ByteArrayOutputStream byteCode;
  135.84 +
  135.85 +        public MemoryJavaFileObject(String className, CharSequence sourceCode) {
  135.86 +            super(URI.create("string:///" + className.replace('.','/') + Kind.SOURCE.extension), Kind.SOURCE);
  135.87 +            this.className = className;
  135.88 +            this.sourceCode = sourceCode;
  135.89 +            this.byteCode = new ByteArrayOutputStream();
  135.90 +        }
  135.91 +
  135.92 +        @Override
  135.93 +        public CharSequence getCharContent(boolean ignoreEncodingErrors) {
  135.94 +            return sourceCode;
  135.95 +        }
  135.96 +
  135.97 +        @Override
  135.98 +        public OutputStream openOutputStream() throws IOException {
  135.99 +            return byteCode;
 135.100 +        }
 135.101 +
 135.102 +        public byte[] getByteCode() {
 135.103 +            return byteCode.toByteArray();
 135.104 +        }
 135.105 +
 135.106 +        public String getClassName() {
 135.107 +            return className;
 135.108 +        }
 135.109 +    }
 135.110 +
 135.111 +    private static class FileManagerWrapper extends ForwardingJavaFileManager {
 135.112 +        private MemoryJavaFileObject file;
 135.113 +
 135.114 +        public FileManagerWrapper(MemoryJavaFileObject file) {
 135.115 +            super(getCompiler().getStandardFileManager(null, null, null));
 135.116 +            this.file = file;
 135.117 +        }
 135.118 +
 135.119 +        @Override
 135.120 +        public JavaFileObject getJavaFileForOutput(Location location, String className,
 135.121 +                                                   Kind kind, FileObject sibling)
 135.122 +            throws IOException {
 135.123 +            if (!file.getClassName().equals(className)) {
 135.124 +                throw new IOException("Expected class with name " + file.getClassName() +
 135.125 +                                      ", but got " + className);
 135.126 +            }
 135.127 +            return file;
 135.128 +        }
 135.129 +    }
 135.130 +
 135.131 +    /**
 135.132 +     * Compiles the class with the given name and source code.
 135.133 +     *
 135.134 +     * @param className The name of the class
 135.135 +     * @param sourceCode The source code for the class with name {@code className}
 135.136 +     * @throws RuntimeException if the compilation did not succeed
 135.137 +     * @return The resulting byte code from the compilation
 135.138 +     */
 135.139 +    public static byte[] compile(String className, CharSequence sourceCode) {
 135.140 +        MemoryJavaFileObject file = new MemoryJavaFileObject(className, sourceCode);
 135.141 +        CompilationTask task = getCompilationTask(file);
 135.142 +
 135.143 +        if(!task.call()) {
 135.144 +            throw new RuntimeException("Could not compile " + className + " with source code " + sourceCode);
 135.145 +        }
 135.146 +
 135.147 +        return file.getByteCode();
 135.148 +    }
 135.149 +
 135.150 +    private static JavaCompiler getCompiler() {
 135.151 +        return ToolProvider.getSystemJavaCompiler();
 135.152 +    }
 135.153 +
 135.154 +    private static CompilationTask getCompilationTask(MemoryJavaFileObject file) {
 135.155 +        return getCompiler().getTask(null, new FileManagerWrapper(file), null, null, null, Arrays.asList(file));
 135.156 +    }
 135.157 +}
   136.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   136.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java	Fri Aug 30 09:50:49 2013 +0100
   136.3 @@ -0,0 +1,51 @@
   136.4 +/*
   136.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   136.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   136.7 + *
   136.8 + * This code is free software; you can redistribute it and/or modify it
   136.9 + * under the terms of the GNU General Public License version 2 only, as
  136.10 + * published by the Free Software Foundation.
  136.11 + *
  136.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  136.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  136.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  136.15 + * version 2 for more details (a copy is included in the LICENSE file that
  136.16 + * accompanied this code).
  136.17 + *
  136.18 + * You should have received a copy of the GNU General Public License version
  136.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  136.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  136.21 + *
  136.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  136.23 + * or visit www.oracle.com if you need additional information or have any
  136.24 + * questions.
  136.25 + */
  136.26 +
  136.27 +package com.oracle.java.testlibrary;
  136.28 +
  136.29 +import java.lang.management.RuntimeMXBean;
  136.30 +import java.lang.management.ManagementFactory;
  136.31 +import java.util.List;
  136.32 +
  136.33 +/**
  136.34 + * This class provides access to the input arguments to the VM.
  136.35 + */
  136.36 +public class InputArguments {
  136.37 +    private static final List<String> args;
  136.38 +
  136.39 +    static {
  136.40 +        RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
  136.41 +        args = runtimeMxBean.getInputArguments();
  136.42 +    }
  136.43 +
  136.44 +    /**
  136.45 +     * Returns true if {@code arg} is an input argument to the VM.
  136.46 +     *
  136.47 +     * @param arg The name of the argument.
  136.48 +     * @return {@code true} if the given argument is an input argument,
  136.49 +     *         otherwise {@code false}.
  136.50 +     */
  136.51 +    public static boolean contains(String arg) {
  136.52 +        return args.contains(arg);
  136.53 +    }
  136.54 +}
   137.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   137.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/PerfCounter.java	Fri Aug 30 09:50:49 2013 +0100
   137.3 @@ -0,0 +1,70 @@
   137.4 +/*
   137.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   137.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   137.7 + *
   137.8 + * This code is free software; you can redistribute it and/or modify it
   137.9 + * under the terms of the GNU General Public License version 2 only, as
  137.10 + * published by the Free Software Foundation.
  137.11 + *
  137.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  137.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  137.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  137.15 + * version 2 for more details (a copy is included in the LICENSE file that
  137.16 + * accompanied this code).
  137.17 + *
  137.18 + * You should have received a copy of the GNU General Public License version
  137.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  137.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  137.21 + *
  137.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  137.23 + * or visit www.oracle.com if you need additional information or have any
  137.24 + * questions.
  137.25 + */
  137.26 +
  137.27 +package com.oracle.java.testlibrary;
  137.28 +
  137.29 +import sun.jvmstat.monitor.Monitor;
  137.30 +
  137.31 +/**
  137.32 + * Represents a performance counter in the JVM.
  137.33 + *
  137.34 + * See http://openjdk.java.net/groups/hotspot/docs/Serviceability.html#bjvmstat
  137.35 + * for more details about performance counters.
  137.36 + */
  137.37 +public class PerfCounter {
  137.38 +    private final Monitor monitor;
  137.39 +    private final String name;
  137.40 +
  137.41 +    PerfCounter(Monitor monitor, String name) {
  137.42 +        this.monitor = monitor;
  137.43 +        this.name = name;
  137.44 +    }
  137.45 +
  137.46 +    /**
  137.47 +     * Returns the value of this performance counter as a long.
  137.48 +     *
  137.49 +     * @return The long value of this performance counter
  137.50 +     * @throws RuntimeException If the value of the performance counter isn't a long
  137.51 +     */
  137.52 +    public long longValue() {
  137.53 +        Object value = monitor.getValue();
  137.54 +        if (value instanceof Long) {
  137.55 +            return ((Long) value).longValue();
  137.56 +        }
  137.57 +        throw new RuntimeException("Expected " + monitor.getName() + " to have a long value");
  137.58 +    }
  137.59 +
  137.60 +    /**
  137.61 +     * Returns the name of the performance counter.
  137.62 +     *
  137.63 +     * @return The name of the performance counter.
  137.64 +     */
  137.65 +    public String getName() {
  137.66 +        return name;
  137.67 +    }
  137.68 +
  137.69 +    @Override
  137.70 +    public String toString() {
  137.71 +        return name;
  137.72 +    }
  137.73 +}
   138.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   138.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/PerfCounters.java	Fri Aug 30 09:50:49 2013 +0100
   138.3 @@ -0,0 +1,69 @@
   138.4 +/*
   138.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   138.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   138.7 + *
   138.8 + * This code is free software; you can redistribute it and/or modify it
   138.9 + * under the terms of the GNU General Public License version 2 only, as
  138.10 + * published by the Free Software Foundation.
  138.11 + *
  138.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  138.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  138.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  138.15 + * version 2 for more details (a copy is included in the LICENSE file that
  138.16 + * accompanied this code).
  138.17 + *
  138.18 + * You should have received a copy of the GNU General Public License version
  138.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  138.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  138.21 + *
  138.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  138.23 + * or visit www.oracle.com if you need additional information or have any
  138.24 + * questions.
  138.25 + */
  138.26 +
  138.27 +package com.oracle.java.testlibrary;
  138.28 +
  138.29 +import sun.jvmstat.monitor.Monitor;
  138.30 +import sun.jvmstat.monitor.MonitorException;
  138.31 +import sun.jvmstat.monitor.MonitoredHost;
  138.32 +import sun.jvmstat.monitor.MonitoredVm;
  138.33 +import sun.jvmstat.monitor.VmIdentifier;
  138.34 +
  138.35 +/**
  138.36 + * PerfCounters can be used to get a performance counter from the currently
  138.37 + * executing VM.
  138.38 + *
  138.39 + * Throws a runtime exception if an error occurs while communicating with the
  138.40 + * currently executing VM.
  138.41 + */
  138.42 +public class PerfCounters {
  138.43 +    private final static MonitoredVm vm;
  138.44 +
  138.45 +    static {
  138.46 +        try {
  138.47 +            String pid = Integer.toString(ProcessTools.getProcessId());
  138.48 +            VmIdentifier vmId = new VmIdentifier(pid);
  138.49 +            MonitoredHost host = MonitoredHost.getMonitoredHost(vmId);
  138.50 +            vm = host.getMonitoredVm(vmId);
  138.51 +        } catch (Exception e) {
  138.52 +            throw new RuntimeException("Could not connect to the VM");
  138.53 +        }
  138.54 +    }
  138.55 +
  138.56 +    /**
  138.57 +     * Returns the performance counter with the given name.
  138.58 +     *
  138.59 +     * @param name The name of the performance counter.
  138.60 +     * @throws IllegalArgumentException If no counter with the given name exists.
  138.61 +     * @throws MonitorException If an error occurs while communicating with the VM.
  138.62 +     * @return The performance counter with the given name.
  138.63 +     */
  138.64 +    public static PerfCounter findByName(String name)
  138.65 +        throws MonitorException, IllegalArgumentException {
  138.66 +        Monitor m = vm.findByName(name);
  138.67 +        if (m == null) {
  138.68 +            throw new IllegalArgumentException("Did not find a performance counter with name " + name);
  138.69 +        }
  138.70 +        return new PerfCounter(m, name);
  138.71 +    }
  138.72 +}
   139.1 --- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Aug 23 22:12:18 2013 +0100
   139.2 +++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Aug 30 09:50:49 2013 +0100
   139.3 @@ -93,23 +93,45 @@
   139.4  
   139.5    // Compiler
   139.6    public native void    deoptimizeAll();
   139.7 -  public native boolean isMethodCompiled(Executable method);
   139.8 -  public boolean isMethodCompilable(Executable method) {
   139.9 -      return isMethodCompilable(method, -1 /*any*/);
  139.10 +  public        boolean isMethodCompiled(Executable method) {
  139.11 +    return isMethodCompiled(method, false /*not osr*/);
  139.12    }
  139.13 -  public native boolean isMethodCompilable(Executable method, int compLevel);
  139.14 +  public native boolean isMethodCompiled(Executable method, boolean isOsr);
  139.15 +  public        boolean isMethodCompilable(Executable method) {
  139.16 +    return isMethodCompilable(method, -1 /*any*/);
  139.17 +  }
  139.18 +  public        boolean isMethodCompilable(Executable method, int compLevel) {
  139.19 +    return isMethodCompilable(method, compLevel, false /*not osr*/);
  139.20 +  }
  139.21 +  public native boolean isMethodCompilable(Executable method, int compLevel, boolean isOsr);
  139.22    public native boolean isMethodQueuedForCompilation(Executable method);
  139.23 -  public native int     deoptimizeMethod(Executable method);
  139.24 -  public void makeMethodNotCompilable(Executable method) {
  139.25 -      makeMethodNotCompilable(method, -1 /*any*/);
  139.26 +  public        int     deoptimizeMethod(Executable method) {
  139.27 +    return deoptimizeMethod(method, false /*not osr*/);
  139.28    }
  139.29 -  public native void    makeMethodNotCompilable(Executable method, int compLevel);
  139.30 -  public native int     getMethodCompilationLevel(Executable method);
  139.31 +  public native int     deoptimizeMethod(Executable method, boolean isOsr);
  139.32 +  public        void    makeMethodNotCompilable(Executable method) {
  139.33 +    makeMethodNotCompilable(method, -1 /*any*/);
  139.34 +  }
  139.35 +  public        void    makeMethodNotCompilable(Executable method, int compLevel) {
  139.36 +    makeMethodNotCompilable(method, compLevel, false /*not osr*/);
  139.37 +  }
  139.38 +  public native void    makeMethodNotCompilable(Executable method, int compLevel, boolean isOsr);
  139.39 +  public        int     getMethodCompilationLevel(Executable method) {
  139.40 +    return getMethodCompilationLevel(method, false /*not ost*/);
  139.41 +  }
  139.42 +  public native int     getMethodCompilationLevel(Executable method, boolean isOsr);
  139.43    public native boolean testSetDontInlineMethod(Executable method, boolean value);
  139.44 -  public native int     getCompileQueuesSize();
  139.45 +  public        int     getCompileQueuesSize() {
  139.46 +    return getCompileQueueSize(-1 /*any*/);
  139.47 +  }
  139.48 +  public native int     getCompileQueueSize(int compLevel);
  139.49    public native boolean testSetForceInlineMethod(Executable method, boolean value);
  139.50 -  public native boolean enqueueMethodForCompilation(Executable method, int compLevel);
  139.51 +  public boolean        enqueueMethodForCompilation(Executable method, int compLevel) {
  139.52 +    return enqueueMethodForCompilation(method, compLevel, -1 /*InvocationEntryBci*/);
  139.53 +  }
  139.54 +  public native boolean enqueueMethodForCompilation(Executable method, int compLevel, int entry_bci);
  139.55    public native void    clearMethodState(Executable method);
  139.56 +  public native int     getMethodEntryBci(Executable method);
  139.57  
  139.58    // Intered strings
  139.59    public native boolean isInStringTable(String str);

mercurial