Merge

Tue, 26 Aug 2014 13:38:33 -0700

author
amurillo
date
Tue, 26 Aug 2014 13:38:33 -0700
changeset 7061
3374ec4c4448
parent 7060
28b4223e2ea7
parent 7055
38539608359a
child 7062
77878c021b8d

Merge

.hgtags file | annotate | diff | comparison | revisions
make/hotspot_version file | annotate | diff | comparison | revisions
src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Fri Aug 22 13:24:04 2014 +0200
     1.2 +++ b/.hgtags	Tue Aug 26 13:38:33 2014 -0700
     1.3 @@ -508,3 +508,5 @@
     1.4  cb95655ef06fece507bbc2792474411ab2e899ab hs25.40-b04
     1.5  dc06b830ea95ed953cac02e9e67a75ab682edb97 jdk8u40-b01
     1.6  897333c7e5874625bd26d09fdaf242196024e9c2 hs25.40-b05
     1.7 +f52cb91647590fe4a12af295a8a87e2cb761b044 jdk8u40-b02
     1.8 +fbc31318922c31488c0464ccd864d2cd1d9e21a7 hs25.40-b06
     2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Fri Aug 22 13:24:04 2014 +0200
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Tue Aug 26 13:38:33 2014 -0700
     2.3 @@ -45,8 +45,8 @@
     2.4  public class G1CollectedHeap extends SharedHeap {
     2.5      // HeapRegionSeq _seq;
     2.6      static private long hrsFieldOffset;
     2.7 -    // MemRegion _g1_committed;
     2.8 -    static private long g1CommittedFieldOffset;
     2.9 +    // MemRegion _g1_reserved;
    2.10 +    static private long g1ReservedFieldOffset;
    2.11      // size_t _summary_bytes_used;
    2.12      static private CIntegerField summaryBytesUsedField;
    2.13      // G1MonitoringSupport* _g1mm;
    2.14 @@ -68,7 +68,6 @@
    2.15          Type type = db.lookupType("G1CollectedHeap");
    2.16  
    2.17          hrsFieldOffset = type.getField("_hrs").getOffset();
    2.18 -        g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
    2.19          summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
    2.20          g1mmField = type.getAddressField("_g1mm");
    2.21          oldSetFieldOffset = type.getField("_old_set").getOffset();
    2.22 @@ -76,9 +75,7 @@
    2.23      }
    2.24  
    2.25      public long capacity() {
    2.26 -        Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
    2.27 -        MemRegion g1Committed = new MemRegion(g1CommittedAddr);
    2.28 -        return g1Committed.byteSize();
    2.29 +        return hrs().capacity();
    2.30      }
    2.31  
    2.32      public long used() {
     3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Fri Aug 22 13:24:04 2014 +0200
     3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Tue Aug 26 13:38:33 2014 -0700
     3.3 @@ -93,19 +93,35 @@
     3.4      private class HeapRegionIterator implements Iterator<HeapRegion> {
     3.5          private long index;
     3.6          private long length;
     3.7 +        private HeapRegion next;
     3.8 +
     3.9 +        public HeapRegion positionToNext() {
    3.10 +          HeapRegion result = next;
    3.11 +          while (index < length && at(index) == null) {
    3.12 +            index++;
    3.13 +          }
    3.14 +          if (index < length) {
    3.15 +            next = at(index);
    3.16 +            index++; // restart search at next element
    3.17 +          } else {
    3.18 +            next = null;
    3.19 +          }
    3.20 +          return result;
    3.21 +        }
    3.22  
    3.23          @Override
    3.24 -        public boolean hasNext() { return index < length; }
    3.25 +        public boolean hasNext() { return next != null;     }
    3.26  
    3.27          @Override
    3.28 -        public HeapRegion next() { return at(index++);    }
    3.29 +        public HeapRegion next() { return positionToNext(); }
    3.30  
    3.31          @Override
    3.32 -        public void remove()     { /* not supported */    }
    3.33 +        public void remove()     { /* not supported */      }
    3.34  
    3.35 -        HeapRegionIterator(long committedLength) {
    3.36 +        HeapRegionIterator(long totalLength) {
    3.37              index = 0;
    3.38 -            length = committedLength;
    3.39 +            length = totalLength;
    3.40 +            positionToNext();
    3.41          }
    3.42      }
    3.43  
     4.1 --- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Fri Aug 22 13:24:04 2014 +0200
     4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Tue Aug 26 13:38:33 2014 -0700
     4.3 @@ -43,7 +43,7 @@
     4.4      // G1HeapRegionTable _regions
     4.5      static private long regionsFieldOffset;
     4.6      // uint _committed_length
     4.7 -    static private CIntegerField committedLengthField;
     4.8 +    static private CIntegerField numCommittedField;
     4.9  
    4.10      static {
    4.11          VM.registerVMInitializedObserver(new Observer() {
    4.12 @@ -57,7 +57,7 @@
    4.13          Type type = db.lookupType("HeapRegionSeq");
    4.14  
    4.15          regionsFieldOffset = type.getField("_regions").getOffset();
    4.16 -        committedLengthField = type.getCIntegerField("_committed_length");
    4.17 +        numCommittedField = type.getCIntegerField("_num_committed");
    4.18      }
    4.19  
    4.20      private G1HeapRegionTable regions() {
    4.21 @@ -66,16 +66,20 @@
    4.22                                                               regionsAddr);
    4.23      }
    4.24  
    4.25 +    public long capacity() {
    4.26 +        return length() * HeapRegion.grainBytes();
    4.27 +    }
    4.28 +
    4.29      public long length() {
    4.30          return regions().length();
    4.31      }
    4.32  
    4.33      public long committedLength() {
    4.34 -        return committedLengthField.getValue(addr);
    4.35 +        return numCommittedField.getValue(addr);
    4.36      }
    4.37  
    4.38      public Iterator<HeapRegion> heapRegionIterator() {
    4.39 -        return regions().heapRegionIterator(committedLength());
    4.40 +        return regions().heapRegionIterator(length());
    4.41      }
    4.42  
    4.43      public HeapRegionSeq(Address addr) {
     5.1 --- a/make/hotspot_version	Fri Aug 22 13:24:04 2014 +0200
     5.2 +++ b/make/hotspot_version	Tue Aug 26 13:38:33 2014 -0700
     5.3 @@ -35,7 +35,7 @@
     5.4  
     5.5  HS_MAJOR_VER=25
     5.6  HS_MINOR_VER=40
     5.7 -HS_BUILD_NUMBER=05
     5.8 +HS_BUILD_NUMBER=06
     5.9  
    5.10  JDK_MAJOR_VER=1
    5.11  JDK_MINOR_VER=8
     6.1 --- a/make/jprt.properties	Fri Aug 22 13:24:04 2014 +0200
     6.2 +++ b/make/jprt.properties	Tue Aug 26 13:38:33 2014 -0700
     6.3 @@ -33,7 +33,7 @@
     6.4  
     6.5  # This tells jprt what default release we want to build
     6.6  
     6.7 -jprt.hotspot.default.release=jdk8u20
     6.8 +jprt.hotspot.default.release=jdk8u40
     6.9  
    6.10  jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
    6.11  
    6.12 @@ -47,65 +47,65 @@
    6.13  #       sparc etc.
    6.14  
    6.15  # Define the Solaris platforms we want for the various releases
    6.16 -jprt.my.solaris.sparcv9.jdk8u20=solaris_sparcv9_5.10
    6.17 +jprt.my.solaris.sparcv9.jdk8u40=solaris_sparcv9_5.10
    6.18  jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
    6.19  jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7}
    6.20  jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
    6.21  
    6.22 -jprt.my.solaris.x64.jdk8u20=solaris_x64_5.10
    6.23 +jprt.my.solaris.x64.jdk8u40=solaris_x64_5.10
    6.24  jprt.my.solaris.x64.jdk7=solaris_x64_5.10
    6.25  jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7}
    6.26  jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
    6.27  
    6.28 -jprt.my.linux.i586.jdk8u20=linux_i586_2.6
    6.29 +jprt.my.linux.i586.jdk8u40=linux_i586_2.6
    6.30  jprt.my.linux.i586.jdk7=linux_i586_2.6
    6.31  jprt.my.linux.i586.jdk7u8=${jprt.my.linux.i586.jdk7}
    6.32  jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
    6.33  
    6.34 -jprt.my.linux.x64.jdk8u20=linux_x64_2.6
    6.35 +jprt.my.linux.x64.jdk8u40=linux_x64_2.6
    6.36  jprt.my.linux.x64.jdk7=linux_x64_2.6
    6.37  jprt.my.linux.x64.jdk7u8=${jprt.my.linux.x64.jdk7}
    6.38  jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
    6.39  
    6.40 -jprt.my.linux.ppc.jdk8u20=linux_ppc_2.6
    6.41 +jprt.my.linux.ppc.jdk8u40=linux_ppc_2.6
    6.42  jprt.my.linux.ppc.jdk7=linux_ppc_2.6
    6.43  jprt.my.linux.ppc.jdk7u8=${jprt.my.linux.ppc.jdk7}
    6.44  jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
    6.45  
    6.46 -jprt.my.linux.ppcv2.jdk8u20=linux_ppcv2_2.6
    6.47 +jprt.my.linux.ppcv2.jdk8u40=linux_ppcv2_2.6
    6.48  jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
    6.49  jprt.my.linux.ppcv2.jdk7u8=${jprt.my.linux.ppcv2.jdk7}
    6.50  jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
    6.51  
    6.52 -jprt.my.linux.armvfpsflt.jdk8u20=linux_armvfpsflt_2.6
    6.53 +jprt.my.linux.armvfpsflt.jdk8u40=linux_armvfpsflt_2.6
    6.54  jprt.my.linux.armvfpsflt=${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}}
    6.55  
    6.56 -jprt.my.linux.armvfphflt.jdk8u20=linux_armvfphflt_2.6
    6.57 +jprt.my.linux.armvfphflt.jdk8u40=linux_armvfphflt_2.6
    6.58  jprt.my.linux.armvfphflt=${jprt.my.linux.armvfphflt.${jprt.tools.default.release}}
    6.59  
    6.60  # The ARM GP vfp-sflt build is not currently supported
    6.61 -#jprt.my.linux.armvs.jdk8u20=linux_armvs_2.6
    6.62 +#jprt.my.linux.armvs.jdk8u40=linux_armvs_2.6
    6.63  #jprt.my.linux.armvs=${jprt.my.linux.armvs.${jprt.tools.default.release}}
    6.64  
    6.65 -jprt.my.linux.armvh.jdk8u20=linux_armvh_2.6
    6.66 +jprt.my.linux.armvh.jdk8u40=linux_armvh_2.6
    6.67  jprt.my.linux.armvh=${jprt.my.linux.armvh.${jprt.tools.default.release}}
    6.68  
    6.69 -jprt.my.linux.armsflt.jdk8u20=linux_armsflt_2.6
    6.70 +jprt.my.linux.armsflt.jdk8u40=linux_armsflt_2.6
    6.71  jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
    6.72  jprt.my.linux.armsflt.jdk7u8=${jprt.my.linux.armsflt.jdk7}
    6.73  jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
    6.74  
    6.75 -jprt.my.macosx.x64.jdk8u20=macosx_x64_10.7
    6.76 +jprt.my.macosx.x64.jdk8u40=macosx_x64_10.7
    6.77  jprt.my.macosx.x64.jdk7=macosx_x64_10.7
    6.78  jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
    6.79  jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
    6.80  
    6.81 -jprt.my.windows.i586.jdk8u20=windows_i586_6.1
    6.82 +jprt.my.windows.i586.jdk8u40=windows_i586_6.1
    6.83  jprt.my.windows.i586.jdk7=windows_i586_6.1
    6.84  jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
    6.85  jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
    6.86  
    6.87 -jprt.my.windows.x64.jdk8u20=windows_x64_6.1
    6.88 +jprt.my.windows.x64.jdk8u40=windows_x64_6.1
    6.89  jprt.my.windows.x64.jdk7=windows_x64_6.1
    6.90  jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
    6.91  jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
    6.92 @@ -137,7 +137,7 @@
    6.93  jprt.build.targets.all=${jprt.build.targets.standard}, \
    6.94      ${jprt.build.targets.embedded}, ${jprt.build.targets.open}
    6.95  
    6.96 -jprt.build.targets.jdk8u20=${jprt.build.targets.all}
    6.97 +jprt.build.targets.jdk8u40=${jprt.build.targets.all}
    6.98  jprt.build.targets.jdk7=${jprt.build.targets.all}
    6.99  jprt.build.targets.jdk7u8=${jprt.build.targets.all}
   6.100  jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
   6.101 @@ -343,7 +343,7 @@
   6.102    ${jprt.my.windows.i586.test.targets}, \
   6.103    ${jprt.my.windows.x64.test.targets}
   6.104  
   6.105 -jprt.test.targets.jdk8u20=${jprt.test.targets.standard}
   6.106 +jprt.test.targets.jdk8u40=${jprt.test.targets.standard}
   6.107  jprt.test.targets.jdk7=${jprt.test.targets.standard}
   6.108  jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7}
   6.109  jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
   6.110 @@ -393,7 +393,7 @@
   6.111  jprt.make.rule.test.targets.embedded = \
   6.112    ${jprt.make.rule.test.targets.standard.client}
   6.113  
   6.114 -jprt.make.rule.test.targets.jdk8u20=${jprt.make.rule.test.targets.standard}
   6.115 +jprt.make.rule.test.targets.jdk8u40=${jprt.make.rule.test.targets.standard}
   6.116  jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
   6.117  jprt.make.rule.test.targets.jdk7u8=${jprt.make.rule.test.targets.jdk7}
   6.118  jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
     7.1 --- a/src/share/vm/classfile/verifier.cpp	Fri Aug 22 13:24:04 2014 +0200
     7.2 +++ b/src/share/vm/classfile/verifier.cpp	Tue Aug 26 13:38:33 2014 -0700
     7.3 @@ -2231,6 +2231,181 @@
     7.4    }
     7.5  }
     7.6  
     7.7 +// Look at the method's handlers.  If the bci is in the handler's try block
     7.8 +// then check if the handler_pc is already on the stack.  If not, push it.
     7.9 +void ClassVerifier::push_handlers(ExceptionTable* exhandlers,
    7.10 +                                  GrowableArray<u4>* handler_stack,
    7.11 +                                  u4 bci) {
    7.12 +  int exlength = exhandlers->length();
    7.13 +  for(int x = 0; x < exlength; x++) {
    7.14 +    if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) {
    7.15 +      handler_stack->append_if_missing(exhandlers->handler_pc(x));
    7.16 +    }
    7.17 +  }
    7.18 +}
    7.19 +
    7.20 +// Return TRUE if all code paths starting with start_bc_offset end in
    7.21 +// bytecode athrow or loop.
    7.22 +bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
    7.23 +  ResourceMark rm;
    7.24 +  // Create bytecode stream.
    7.25 +  RawBytecodeStream bcs(method());
    7.26 +  u4 code_length = method()->code_size();
    7.27 +  bcs.set_start(start_bc_offset);
    7.28 +  u4 target;
    7.29 +  // Create stack for storing bytecode start offsets for if* and *switch.
    7.30 +  GrowableArray<u4>* bci_stack = new GrowableArray<u4>(30);
    7.31 +  // Create stack for handlers for try blocks containing this handler.
    7.32 +  GrowableArray<u4>* handler_stack = new GrowableArray<u4>(30);
    7.33 +  // Create list of visited branch opcodes (goto* and if*).
    7.34 +  GrowableArray<u4>* visited_branches = new GrowableArray<u4>(30);
    7.35 +  ExceptionTable exhandlers(_method());
    7.36 +
    7.37 +  while (true) {
    7.38 +    if (bcs.is_last_bytecode()) {
    7.39 +      // if no more starting offsets to parse or if at the end of the
    7.40 +      // method then return false.
    7.41 +      if ((bci_stack->is_empty()) || ((u4)bcs.end_bci() == code_length))
    7.42 +        return false;
    7.43 +      // Pop a bytecode starting offset and scan from there.
    7.44 +      bcs.set_start(bci_stack->pop());
    7.45 +    }
    7.46 +    Bytecodes::Code opcode = bcs.raw_next();
    7.47 +    u4 bci = bcs.bci();
    7.48 +
    7.49 +    // If the bytecode is in a TRY block, push its handlers so they
    7.50 +    // will get parsed.
    7.51 +    push_handlers(&exhandlers, handler_stack, bci);
    7.52 +
    7.53 +    switch (opcode) {
    7.54 +      case Bytecodes::_if_icmpeq:
    7.55 +      case Bytecodes::_if_icmpne:
    7.56 +      case Bytecodes::_if_icmplt:
    7.57 +      case Bytecodes::_if_icmpge:
    7.58 +      case Bytecodes::_if_icmpgt:
    7.59 +      case Bytecodes::_if_icmple:
    7.60 +      case Bytecodes::_ifeq:
    7.61 +      case Bytecodes::_ifne:
    7.62 +      case Bytecodes::_iflt:
    7.63 +      case Bytecodes::_ifge:
    7.64 +      case Bytecodes::_ifgt:
    7.65 +      case Bytecodes::_ifle:
    7.66 +      case Bytecodes::_if_acmpeq:
    7.67 +      case Bytecodes::_if_acmpne:
    7.68 +      case Bytecodes::_ifnull:
    7.69 +      case Bytecodes::_ifnonnull:
    7.70 +        target = bcs.dest();
    7.71 +        if (visited_branches->contains(bci)) {
    7.72 +          if (bci_stack->is_empty()) return true;
    7.73 +          // Pop a bytecode starting offset and scan from there.
    7.74 +          bcs.set_start(bci_stack->pop());
    7.75 +        } else {
    7.76 +          if (target > bci) { // forward branch
    7.77 +            if (target >= code_length) return false;
    7.78 +            // Push the branch target onto the stack.
    7.79 +            bci_stack->push(target);
    7.80 +            // then, scan bytecodes starting with next.
    7.81 +            bcs.set_start(bcs.next_bci());
    7.82 +          } else { // backward branch
    7.83 +            // Push bytecode offset following backward branch onto the stack.
    7.84 +            bci_stack->push(bcs.next_bci());
    7.85 +            // Check bytecodes starting with branch target.
    7.86 +            bcs.set_start(target);
    7.87 +          }
    7.88 +          // Record target so we don't branch here again.
    7.89 +          visited_branches->append(bci);
    7.90 +        }
    7.91 +        break;
    7.92 +
    7.93 +      case Bytecodes::_goto:
    7.94 +      case Bytecodes::_goto_w:
    7.95 +        target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
    7.96 +        if (visited_branches->contains(bci)) {
    7.97 +          if (bci_stack->is_empty()) return true;
    7.98 +          // Been here before, pop new starting offset from stack.
    7.99 +          bcs.set_start(bci_stack->pop());
   7.100 +        } else {
   7.101 +          if (target >= code_length) return false;
   7.102 +          // Continue scanning from the target onward.
   7.103 +          bcs.set_start(target);
   7.104 +          // Record target so we don't branch here again.
   7.105 +          visited_branches->append(bci);
   7.106 +        }
   7.107 +        break;
   7.108 +
   7.109 +      // Check that all switch alternatives end in 'athrow' bytecodes. Since it
   7.110 +      // is  difficult to determine where each switch alternative ends, parse
   7.111 +      // each switch alternative until either hit a 'return', 'athrow', or reach
   7.112 +      // the end of the method's bytecodes.  This is gross but should be okay
   7.113 +      // because:
   7.114 +      // 1. tableswitch and lookupswitch byte codes in handlers for ctor explicit
   7.115 +      //    constructor invocations should be rare.
   7.116 +      // 2. if each switch alternative ends in an athrow then the parsing should be
   7.117 +      //    short.  If there is no athrow then it is bogus code, anyway.
   7.118 +      case Bytecodes::_lookupswitch:
   7.119 +      case Bytecodes::_tableswitch:
   7.120 +        {
   7.121 +          address aligned_bcp = (address) round_to((intptr_t)(bcs.bcp() + 1), jintSize);
   7.122 +          u4 default_offset = Bytes::get_Java_u4(aligned_bcp) + bci;
   7.123 +          int keys, delta;
   7.124 +          if (opcode == Bytecodes::_tableswitch) {
   7.125 +            jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
   7.126 +            jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
   7.127 +            // This is invalid, but let the regular bytecode verifier
   7.128 +            // report this because the user will get a better error message.
   7.129 +            if (low > high) return true;
   7.130 +            keys = high - low + 1;
   7.131 +            delta = 1;
   7.132 +          } else {
   7.133 +            keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
   7.134 +            delta = 2;
   7.135 +          }
   7.136 +          // Invalid, let the regular bytecode verifier deal with it.
   7.137 +          if (keys < 0) return true;
   7.138 +
   7.139 +          // Push the offset of the next bytecode onto the stack.
   7.140 +          bci_stack->push(bcs.next_bci());
   7.141 +
   7.142 +          // Push the switch alternatives onto the stack.
   7.143 +          for (int i = 0; i < keys; i++) {
   7.144 +            u4 target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
   7.145 +            if (target > code_length) return false;
   7.146 +            bci_stack->push(target);
   7.147 +          }
   7.148 +
   7.149 +          // Start bytecode parsing for the switch at the default alternative.
   7.150 +          if (default_offset > code_length) return false;
   7.151 +          bcs.set_start(default_offset);
   7.152 +          break;
   7.153 +        }
   7.154 +
   7.155 +      case Bytecodes::_return:
   7.156 +        return false;
   7.157 +
   7.158 +      case Bytecodes::_athrow:
   7.159 +        {
   7.160 +          if (bci_stack->is_empty()) {
   7.161 +            if (handler_stack->is_empty()) {
   7.162 +              return true;
   7.163 +            } else {
   7.164 +              // Parse the catch handlers for try blocks containing athrow.
   7.165 +              bcs.set_start(handler_stack->pop());
   7.166 +            }
   7.167 +          } else {
   7.168 +            // Pop a bytecode offset and starting scanning from there.
   7.169 +            bcs.set_start(bci_stack->pop());
   7.170 +          }
   7.171 +        }
   7.172 +        break;
   7.173 +
   7.174 +      default:
   7.175 +        ;
   7.176 +    } // end switch
   7.177 +  } // end while loop
   7.178 +
   7.179 +  return false;
   7.180 +}
   7.181 +
   7.182  void ClassVerifier::verify_invoke_init(
   7.183      RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
   7.184      StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
   7.185 @@ -2250,18 +2425,26 @@
   7.186        return;
   7.187      }
   7.188  
   7.189 -    // Make sure that this call is not done from within a TRY block because
   7.190 -    // that can result in returning an incomplete object.  Simply checking
   7.191 -    // (bci >= start_pc) also ensures that this call is not done after a TRY
   7.192 -    // block.  That is also illegal because this call must be the first Java
   7.193 -    // statement in the constructor.
   7.194 +    // Check if this call is done from inside of a TRY block.  If so, make
   7.195 +    // sure that all catch clause paths end in a throw.  Otherwise, this
   7.196 +    // can result in returning an incomplete object.
   7.197      ExceptionTable exhandlers(_method());
   7.198      int exlength = exhandlers.length();
   7.199      for(int i = 0; i < exlength; i++) {
   7.200 -      if (bci >= exhandlers.start_pc(i)) {
   7.201 -        verify_error(ErrorContext::bad_code(bci),
   7.202 -                     "Bad <init> method call from after the start of a try block");
   7.203 -        return;
   7.204 +      u2 start_pc = exhandlers.start_pc(i);
   7.205 +      u2 end_pc = exhandlers.end_pc(i);
   7.206 +
   7.207 +      if (bci >= start_pc && bci < end_pc) {
   7.208 +        if (!ends_in_athrow(exhandlers.handler_pc(i))) {
   7.209 +          verify_error(ErrorContext::bad_code(bci),
   7.210 +            "Bad <init> method call from after the start of a try block");
   7.211 +          return;
   7.212 +        } else if (VerboseVerification) {
   7.213 +          ResourceMark rm;
   7.214 +          tty->print_cr(
   7.215 +            "Survived call to ends_in_athrow(): %s",
   7.216 +                        current_class()->name()->as_C_string());
   7.217 +        }
   7.218        }
   7.219      }
   7.220  
     8.1 --- a/src/share/vm/classfile/verifier.hpp	Fri Aug 22 13:24:04 2014 +0200
     8.2 +++ b/src/share/vm/classfile/verifier.hpp	Tue Aug 26 13:38:33 2014 -0700
     8.3 @@ -30,6 +30,7 @@
     8.4  #include "oops/klass.hpp"
     8.5  #include "oops/method.hpp"
     8.6  #include "runtime/handles.hpp"
     8.7 +#include "utilities/growableArray.hpp"
     8.8  #include "utilities/exceptions.hpp"
     8.9  
    8.10  // The verifier class
    8.11 @@ -303,6 +304,16 @@
    8.12      StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
    8.13      constantPoolHandle cp, TRAPS);
    8.14  
    8.15 +  // Used by ends_in_athrow() to push all handlers that contain bci onto
    8.16 +  // the handler_stack, if the handler is not already on the stack.
    8.17 +  void push_handlers(ExceptionTable* exhandlers,
    8.18 +                     GrowableArray<u4>* handler_stack,
    8.19 +                     u4 bci);
    8.20 +
    8.21 +  // Returns true if all paths starting with start_bc_offset end in athrow
    8.22 +  // bytecode or loop.
    8.23 +  bool ends_in_athrow(u4 start_bc_offset);
    8.24 +
    8.25    void verify_invoke_instructions(
    8.26      RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
    8.27      bool* this_uninit, VerificationType return_type,
     9.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 22 13:24:04 2014 +0200
     9.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Aug 26 13:38:33 2014 -0700
     9.3 @@ -1514,6 +1514,8 @@
     9.4      gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
     9.5      gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
     9.6      gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
     9.7 +    gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
     9.8 +    gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
     9.9      gclog_or_tty->print_cr("metadata initialized %d",
    9.10        MetaspaceGC::should_concurrent_collect());
    9.11    }
    9.12 @@ -1576,6 +1578,28 @@
    9.13      return true;
    9.14    }
    9.15  
    9.16 +  // CMSTriggerInterval starts a CMS cycle if enough time has passed.
    9.17 +  if (CMSTriggerInterval >= 0) {
    9.18 +    if (CMSTriggerInterval == 0) {
    9.19 +      // Trigger always
    9.20 +      return true;
    9.21 +    }
    9.22 +
    9.23 +    // Check the CMS time since begin (we do not check the stats validity
    9.24 +    // as we want to be able to trigger the first CMS cycle as well)
    9.25 +    if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
    9.26 +      if (Verbose && PrintGCDetails) {
    9.27 +        if (stats().valid()) {
    9.28 +          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
    9.29 +                                 stats().cms_time_since_begin());
    9.30 +        } else {
    9.31 +          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
    9.32 +        }
    9.33 +      }
    9.34 +      return true;
    9.35 +    }
    9.36 +  }
    9.37 +
    9.38    return false;
    9.39  }
    9.40  
    10.1 --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Fri Aug 22 13:24:04 2014 +0200
    10.2 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Tue Aug 26 13:38:33 2014 -0700
    10.3 @@ -81,8 +81,8 @@
    10.4    }
    10.5  }
    10.6  
    10.7 -void ConcurrentG1Refine::init() {
    10.8 -  _hot_card_cache.initialize();
    10.9 +void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) {
   10.10 +  _hot_card_cache.initialize(card_counts_storage);
   10.11  }
   10.12  
   10.13  void ConcurrentG1Refine::stop() {
    11.1 --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Fri Aug 22 13:24:04 2014 +0200
    11.2 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Tue Aug 26 13:38:33 2014 -0700
    11.3 @@ -34,6 +34,7 @@
    11.4  class ConcurrentG1RefineThread;
    11.5  class G1CollectedHeap;
    11.6  class G1HotCardCache;
    11.7 +class G1RegionToSpaceMapper;
    11.8  class G1RemSet;
    11.9  class DirtyCardQueue;
   11.10  
   11.11 @@ -74,7 +75,7 @@
   11.12    ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure);
   11.13    ~ConcurrentG1Refine();
   11.14  
   11.15 -  void init(); // Accomplish some initialization that has to wait.
   11.16 +  void init(G1RegionToSpaceMapper* card_counts_storage);
   11.17    void stop();
   11.18  
   11.19    void reinitialize_threads();
    12.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Aug 22 13:24:04 2014 +0200
    12.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Aug 26 13:38:33 2014 -0700
    12.3 @@ -36,6 +36,7 @@
    12.4  #include "gc_implementation/g1/heapRegion.inline.hpp"
    12.5  #include "gc_implementation/g1/heapRegionRemSet.hpp"
    12.6  #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    12.7 +#include "gc_implementation/g1/heapRegionSet.inline.hpp"
    12.8  #include "gc_implementation/shared/vmGCOperations.hpp"
    12.9  #include "gc_implementation/shared/gcTimer.hpp"
   12.10  #include "gc_implementation/shared/gcTrace.hpp"
   12.11 @@ -98,12 +99,12 @@
   12.12  }
   12.13  
   12.14  #ifndef PRODUCT
   12.15 -bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
   12.16 +bool CMBitMapRO::covers(MemRegion heap_rs) const {
   12.17    // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
   12.18    assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
   12.19           "size inconsistency");
   12.20 -  return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
   12.21 -         _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
   12.22 +  return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
   12.23 +         _bmWordSize  == heap_rs.word_size();
   12.24  }
   12.25  #endif
   12.26  
   12.27 @@ -111,33 +112,73 @@
   12.28    _bm.print_on_error(st, prefix);
   12.29  }
   12.30  
   12.31 -bool CMBitMap::allocate(ReservedSpace heap_rs) {
   12.32 -  _bmStartWord = (HeapWord*)(heap_rs.base());
   12.33 -  _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
   12.34 -  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
   12.35 -                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
   12.36 -  if (!brs.is_reserved()) {
   12.37 -    warning("ConcurrentMark marking bit map allocation failure");
   12.38 +size_t CMBitMap::compute_size(size_t heap_size) {
   12.39 +  return heap_size / mark_distance();
   12.40 +}
   12.41 +
   12.42 +size_t CMBitMap::mark_distance() {
   12.43 +  return MinObjAlignmentInBytes * BitsPerByte;
   12.44 +}
   12.45 +
   12.46 +void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
   12.47 +  _bmStartWord = heap.start();
   12.48 +  _bmWordSize = heap.word_size();
   12.49 +
   12.50 +  _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
   12.51 +  _bm.set_size(_bmWordSize >> _shifter);
   12.52 +
   12.53 +  storage->set_mapping_changed_listener(&_listener);
   12.54 +}
   12.55 +
   12.56 +void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) {
   12.57 +  // We need to clear the bitmap on commit, removing any existing information.
   12.58 +  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
   12.59 +  _bm->clearRange(mr);
   12.60 +}
   12.61 +
   12.62 +// Closure used for clearing the given mark bitmap.
   12.63 +class ClearBitmapHRClosure : public HeapRegionClosure {
   12.64 + private:
   12.65 +  ConcurrentMark* _cm;
   12.66 +  CMBitMap* _bitmap;
   12.67 +  bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
   12.68 + public:
   12.69 +  ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
   12.70 +    assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
   12.71 +  }
   12.72 +
   12.73 +  virtual bool doHeapRegion(HeapRegion* r) {
   12.74 +    size_t const chunk_size_in_words = M / HeapWordSize;
   12.75 +
   12.76 +    HeapWord* cur = r->bottom();
   12.77 +    HeapWord* const end = r->end();
   12.78 +
   12.79 +    while (cur < end) {
   12.80 +      MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
   12.81 +      _bitmap->clearRange(mr);
   12.82 +
   12.83 +      cur += chunk_size_in_words;
   12.84 +
   12.85 +      // Abort iteration if after yielding the marking has been aborted.
   12.86 +      if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
   12.87 +        return true;
   12.88 +      }
   12.89 +      // Repeat the asserts from before the start of the closure. We will do them
   12.90 +      // as asserts here to minimize their overhead on the product. However, we
   12.91 +      // will have them as guarantees at the beginning / end of the bitmap
   12.92 +      // clearing to get some checking in the product.
   12.93 +      assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
   12.94 +      assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
   12.95 +    }
   12.96 +
   12.97      return false;
   12.98    }
   12.99 -  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
  12.100 -  // For now we'll just commit all of the bit map up front.
  12.101 -  // Later on we'll try to be more parsimonious with swap.
  12.102 -  if (!_virtual_space.initialize(brs, brs.size())) {
  12.103 -    warning("ConcurrentMark marking bit map backing store failure");
  12.104 -    return false;
  12.105 -  }
  12.106 -  assert(_virtual_space.committed_size() == brs.size(),
  12.107 -         "didn't reserve backing store for all of concurrent marking bit map?");
  12.108 -  _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
  12.109 -  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
  12.110 -         _bmWordSize, "inconsistency in bit map sizing");
  12.111 -  _bm.set_size(_bmWordSize >> _shifter);
  12.112 -  return true;
  12.113 -}
  12.114 +};
  12.115  
  12.116  void CMBitMap::clearAll() {
  12.117 -  _bm.clear();
  12.118 +  ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
  12.119 +  G1CollectedHeap::heap()->heap_region_iterate(&cl);
  12.120 +  guarantee(cl.complete(), "Must have completed iteration.");
  12.121    return;
  12.122  }
  12.123  
  12.124 @@ -482,10 +523,10 @@
  12.125    return MAX2((n_par_threads + 2) / 4, 1U);
  12.126  }
  12.127  
  12.128 -ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
  12.129 +ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
  12.130    _g1h(g1h),
  12.131 -  _markBitMap1(log2_intptr(MinObjAlignment)),
  12.132 -  _markBitMap2(log2_intptr(MinObjAlignment)),
  12.133 +  _markBitMap1(),
  12.134 +  _markBitMap2(),
  12.135    _parallel_marking_threads(0),
  12.136    _max_parallel_marking_threads(0),
  12.137    _sleep_factor(0.0),
  12.138 @@ -494,7 +535,7 @@
  12.139    _cleanup_task_overhead(1.0),
  12.140    _cleanup_list("Cleanup List"),
  12.141    _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
  12.142 -  _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
  12.143 +  _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
  12.144              CardTableModRefBS::card_shift,
  12.145              false /* in_resource_area*/),
  12.146  
  12.147 @@ -544,14 +585,8 @@
  12.148                             "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
  12.149    }
  12.150  
  12.151 -  if (!_markBitMap1.allocate(heap_rs)) {
  12.152 -    warning("Failed to allocate first CM bit map");
  12.153 -    return;
  12.154 -  }
  12.155 -  if (!_markBitMap2.allocate(heap_rs)) {
  12.156 -    warning("Failed to allocate second CM bit map");
  12.157 -    return;
  12.158 -  }
  12.159 +  _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
  12.160 +  _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
  12.161  
  12.162    // Create & start a ConcurrentMark thread.
  12.163    _cmThread = new ConcurrentMarkThread(this);
  12.164 @@ -562,8 +597,8 @@
  12.165    }
  12.166  
  12.167    assert(CGC_lock != NULL, "Where's the CGC_lock?");
  12.168 -  assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
  12.169 -  assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
  12.170 +  assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
  12.171 +  assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
  12.172  
  12.173    SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
  12.174    satb_qs.set_buffer_size(G1SATBBufferSize);
  12.175 @@ -723,38 +758,17 @@
  12.176    clear_all_count_data();
  12.177  
  12.178    // so that the call below can read a sensible value
  12.179 -  _heap_start = (HeapWord*) heap_rs.base();
  12.180 +  _heap_start = g1h->reserved_region().start();
  12.181    set_non_marking_state();
  12.182    _completed_initialization = true;
  12.183  }
  12.184  
  12.185 -void ConcurrentMark::update_g1_committed(bool force) {
  12.186 -  // If concurrent marking is not in progress, then we do not need to
  12.187 -  // update _heap_end.
  12.188 -  if (!concurrent_marking_in_progress() && !force) return;
  12.189 -
  12.190 -  MemRegion committed = _g1h->g1_committed();
  12.191 -  assert(committed.start() == _heap_start, "start shouldn't change");
  12.192 -  HeapWord* new_end = committed.end();
  12.193 -  if (new_end > _heap_end) {
  12.194 -    // The heap has been expanded.
  12.195 -
  12.196 -    _heap_end = new_end;
  12.197 -  }
  12.198 -  // Notice that the heap can also shrink. However, this only happens
  12.199 -  // during a Full GC (at least currently) and the entire marking
  12.200 -  // phase will bail out and the task will not be restarted. So, let's
  12.201 -  // do nothing.
  12.202 -}
  12.203 -
  12.204  void ConcurrentMark::reset() {
  12.205    // Starting values for these two. This should be called in a STW
  12.206 -  // phase. CM will be notified of any future g1_committed expansions
  12.207 -  // will be at the end of evacuation pauses, when tasks are
  12.208 -  // inactive.
  12.209 -  MemRegion committed = _g1h->g1_committed();
  12.210 -  _heap_start = committed.start();
  12.211 -  _heap_end   = committed.end();
  12.212 +  // phase.
  12.213 +  MemRegion reserved = _g1h->g1_reserved();
  12.214 +  _heap_start = reserved.start();
  12.215 +  _heap_end   = reserved.end();
  12.216  
  12.217    // Separated the asserts so that we know which one fires.
  12.218    assert(_heap_start != NULL, "heap bounds should look ok");
  12.219 @@ -826,7 +840,6 @@
  12.220      assert(out_of_regions(),
  12.221             err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
  12.222                     p2i(_finger), p2i(_heap_end)));
  12.223 -    update_g1_committed(true);
  12.224    }
  12.225  }
  12.226  
  12.227 @@ -845,7 +858,6 @@
  12.228  
  12.229  void ConcurrentMark::clearNextBitmap() {
  12.230    G1CollectedHeap* g1h = G1CollectedHeap::heap();
  12.231 -  G1CollectorPolicy* g1p = g1h->g1_policy();
  12.232  
  12.233    // Make sure that the concurrent mark thread looks to still be in
  12.234    // the current cycle.
  12.235 @@ -857,41 +869,36 @@
  12.236    // is the case.
  12.237    guarantee(!g1h->mark_in_progress(), "invariant");
  12.238  
  12.239 -  // clear the mark bitmap (no grey objects to start with).
  12.240 -  // We need to do this in chunks and offer to yield in between
  12.241 -  // each chunk.
  12.242 -  HeapWord* start  = _nextMarkBitMap->startWord();
  12.243 -  HeapWord* end    = _nextMarkBitMap->endWord();
  12.244 -  HeapWord* cur    = start;
  12.245 -  size_t chunkSize = M;
  12.246 -  while (cur < end) {
  12.247 -    HeapWord* next = cur + chunkSize;
  12.248 -    if (next > end) {
  12.249 -      next = end;
  12.250 -    }
  12.251 -    MemRegion mr(cur,next);
  12.252 -    _nextMarkBitMap->clearRange(mr);
  12.253 -    cur = next;
  12.254 -    do_yield_check();
  12.255 -
  12.256 -    // Repeat the asserts from above. We'll do them as asserts here to
  12.257 -    // minimize their overhead on the product. However, we'll have
  12.258 -    // them as guarantees at the beginning / end of the bitmap
  12.259 -    // clearing to get some checking in the product.
  12.260 -    assert(cmThread()->during_cycle(), "invariant");
  12.261 -    assert(!g1h->mark_in_progress(), "invariant");
  12.262 +  ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
  12.263 +  g1h->heap_region_iterate(&cl);
  12.264 +
  12.265 +  // Clear the liveness counting data. If the marking has been aborted, the abort()
  12.266 +  // call already did that.
  12.267 +  if (cl.complete()) {
  12.268 +    clear_all_count_data();
  12.269    }
  12.270  
  12.271 -  // Clear the liveness counting data
  12.272 -  clear_all_count_data();
  12.273 -
  12.274    // Repeat the asserts from above.
  12.275    guarantee(cmThread()->during_cycle(), "invariant");
  12.276    guarantee(!g1h->mark_in_progress(), "invariant");
  12.277  }
  12.278  
  12.279 +class CheckBitmapClearHRClosure : public HeapRegionClosure {
  12.280 +  CMBitMap* _bitmap;
  12.281 +  bool _error;
  12.282 + public:
  12.283 +  CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
  12.284 +  }
  12.285 +
  12.286 +  virtual bool doHeapRegion(HeapRegion* r) {
  12.287 +    return _bitmap->getNextMarkedWordAddress(r->bottom(), r->end()) != r->end();
  12.288 +  }
  12.289 +};
  12.290 +
  12.291  bool ConcurrentMark::nextMarkBitmapIsClear() {
  12.292 -  return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end;
  12.293 +  CheckBitmapClearHRClosure cl(_nextMarkBitMap);
  12.294 +  _g1h->heap_region_iterate(&cl);
  12.295 +  return cl.complete();
  12.296  }
  12.297  
  12.298  class NoteStartOfMarkHRClosure: public HeapRegionClosure {
  12.299 @@ -2191,10 +2198,10 @@
  12.300                             _cleanup_list.length());
  12.301    }
  12.302  
  12.303 -  // Noone else should be accessing the _cleanup_list at this point,
  12.304 -  // so it's not necessary to take any locks
  12.305 +  // No one else should be accessing the _cleanup_list at this point,
  12.306 +  // so it is not necessary to take any locks
  12.307    while (!_cleanup_list.is_empty()) {
  12.308 -    HeapRegion* hr = _cleanup_list.remove_head();
  12.309 +    HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
  12.310      assert(hr != NULL, "Got NULL from a non-empty list");
  12.311      hr->par_clear();
  12.312      tmp_free_list.add_ordered(hr);
  12.313 @@ -2800,7 +2807,6 @@
  12.314        str = " O";
  12.315      } else {
  12.316        HeapRegion* hr  = _g1h->heap_region_containing(obj);
  12.317 -      guarantee(hr != NULL, "invariant");
  12.318        bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
  12.319        bool marked = _g1h->is_marked(obj, _vo);
  12.320  
  12.321 @@ -2979,22 +2985,25 @@
  12.322      // claim_region() and a humongous object allocation might force us
  12.323      // to do a bit of unnecessary work (due to some unnecessary bitmap
  12.324      // iterations) but it should not introduce and correctness issues.
  12.325 -    HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
  12.326 -    HeapWord*   bottom        = curr_region->bottom();
  12.327 -    HeapWord*   end           = curr_region->end();
  12.328 -    HeapWord*   limit         = curr_region->next_top_at_mark_start();
  12.329 -
  12.330 -    if (verbose_low()) {
  12.331 -      gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
  12.332 -                             "["PTR_FORMAT", "PTR_FORMAT"), "
  12.333 -                             "limit = "PTR_FORMAT,
  12.334 -                             worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
  12.335 -    }
  12.336 +    HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
  12.337 +
  12.338 +    // Above heap_region_containing_raw may return NULL as we always scan claim
  12.339 +    // until the end of the heap. In this case, just jump to the next region.
  12.340 +    HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
  12.341  
  12.342      // Is the gap between reading the finger and doing the CAS too long?
  12.343      HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
  12.344 -    if (res == finger) {
  12.345 +    if (res == finger && curr_region != NULL) {
  12.346        // we succeeded
  12.347 +      HeapWord*   bottom        = curr_region->bottom();
  12.348 +      HeapWord*   limit         = curr_region->next_top_at_mark_start();
  12.349 +
  12.350 +      if (verbose_low()) {
  12.351 +        gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
  12.352 +                               "["PTR_FORMAT", "PTR_FORMAT"), "
  12.353 +                               "limit = "PTR_FORMAT,
  12.354 +                               worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
  12.355 +      }
  12.356  
  12.357        // notice that _finger == end cannot be guaranteed here since,
  12.358        // someone else might have moved the finger even further
  12.359 @@ -3025,10 +3034,17 @@
  12.360      } else {
  12.361        assert(_finger > finger, "the finger should have moved forward");
  12.362        if (verbose_low()) {
  12.363 -        gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
  12.364 -                               "global finger = "PTR_FORMAT", "
  12.365 -                               "our finger = "PTR_FORMAT,
  12.366 -                               worker_id, p2i(_finger), p2i(finger));
  12.367 +        if (curr_region == NULL) {
  12.368 +          gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
  12.369 +                                 "global finger = "PTR_FORMAT", "
  12.370 +                                 "our finger = "PTR_FORMAT,
  12.371 +                                 worker_id, p2i(_finger), p2i(finger));
  12.372 +        } else {
  12.373 +          gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
  12.374 +                                 "global finger = "PTR_FORMAT", "
  12.375 +                                 "our finger = "PTR_FORMAT,
  12.376 +                                 worker_id, p2i(_finger), p2i(finger));
  12.377 +        }
  12.378        }
  12.379  
  12.380        // read it again
  12.381 @@ -3143,8 +3159,10 @@
  12.382        // happens, heap_region_containing() will return the bottom of the
  12.383        // corresponding starts humongous region and the check below will
  12.384        // not hold any more.
  12.385 +      // Since we always iterate over all regions, we might get a NULL HeapRegion
  12.386 +      // here.
  12.387        HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
  12.388 -      guarantee(global_finger == global_hr->bottom(),
  12.389 +      guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
  12.390                  err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
  12.391                          p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
  12.392      }
  12.393 @@ -3157,7 +3175,7 @@
  12.394        if (task_finger != NULL && task_finger < _heap_end) {
  12.395          // See above note on the global finger verification.
  12.396          HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
  12.397 -        guarantee(task_finger == task_hr->bottom() ||
  12.398 +        guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
  12.399                    !task_hr->in_collection_set(),
  12.400                    err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
  12.401                            p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
  12.402 @@ -3565,9 +3583,8 @@
  12.403  }
  12.404  
  12.405  void CMTask::setup_for_region(HeapRegion* hr) {
  12.406 -  // Separated the asserts so that we know which one fires.
  12.407    assert(hr != NULL,
  12.408 -        "claim_region() should have filtered out continues humongous regions");
  12.409 +        "claim_region() should have filtered out NULL regions");
  12.410    assert(!hr->continuesHumongous(),
  12.411          "claim_region() should have filtered out continues humongous regions");
  12.412  
  12.413 @@ -4674,7 +4691,6 @@
  12.414      _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
  12.415      _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
  12.416    G1CollectedHeap* g1h = G1CollectedHeap::heap();
  12.417 -  MemRegion g1_committed = g1h->g1_committed();
  12.418    MemRegion g1_reserved = g1h->g1_reserved();
  12.419    double now = os::elapsedTime();
  12.420  
  12.421 @@ -4682,10 +4698,8 @@
  12.422    _out->cr();
  12.423    _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
  12.424    _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
  12.425 -                 G1PPRL_SUM_ADDR_FORMAT("committed")
  12.426                   G1PPRL_SUM_ADDR_FORMAT("reserved")
  12.427                   G1PPRL_SUM_BYTE_FORMAT("region-size"),
  12.428 -                 p2i(g1_committed.start()), p2i(g1_committed.end()),
  12.429                   p2i(g1_reserved.start()), p2i(g1_reserved.end()),
  12.430                   HeapRegion::GrainBytes);
  12.431    _out->print_cr(G1PPRL_LINE_PREFIX);
    13.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Aug 22 13:24:04 2014 +0200
    13.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Aug 26 13:38:33 2014 -0700
    13.3 @@ -27,10 +27,12 @@
    13.4  
    13.5  #include "classfile/javaClasses.hpp"
    13.6  #include "gc_implementation/g1/heapRegionSet.hpp"
    13.7 +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
    13.8  #include "gc_implementation/shared/gcId.hpp"
    13.9  #include "utilities/taskqueue.hpp"
   13.10  
   13.11  class G1CollectedHeap;
   13.12 +class CMBitMap;
   13.13  class CMTask;
   13.14  typedef GenericTaskQueue<oop, mtGC>            CMTaskQueue;
   13.15  typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
   13.16 @@ -57,7 +59,6 @@
   13.17    HeapWord* _bmStartWord;      // base address of range covered by map
   13.18    size_t    _bmWordSize;       // map size (in #HeapWords covered)
   13.19    const int _shifter;          // map to char or bit
   13.20 -  VirtualSpace _virtual_space; // underlying the bit map
   13.21    BitMap    _bm;               // the bit map itself
   13.22  
   13.23   public:
   13.24 @@ -115,42 +116,41 @@
   13.25    void print_on_error(outputStream* st, const char* prefix) const;
   13.26  
   13.27    // debugging
   13.28 -  NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
   13.29 +  NOT_PRODUCT(bool covers(MemRegion rs) const;)
   13.30 +};
   13.31 +
   13.32 +class CMBitMapMappingChangedListener : public G1MappingChangedListener {
   13.33 + private:
   13.34 +  CMBitMap* _bm;
   13.35 + public:
   13.36 +  CMBitMapMappingChangedListener() : _bm(NULL) {}
   13.37 +
   13.38 +  void set_bitmap(CMBitMap* bm) { _bm = bm; }
   13.39 +
   13.40 +  virtual void on_commit(uint start_idx, size_t num_regions);
   13.41  };
   13.42  
   13.43  class CMBitMap : public CMBitMapRO {
   13.44 + private:
   13.45 +  CMBitMapMappingChangedListener _listener;
   13.46  
   13.47   public:
   13.48 -  // constructor
   13.49 -  CMBitMap(int shifter) :
   13.50 -    CMBitMapRO(shifter) {}
   13.51 +  static size_t compute_size(size_t heap_size);
   13.52 +  // Returns the amount of bytes on the heap between two marks in the bitmap.
   13.53 +  static size_t mark_distance();
   13.54  
   13.55 -  // Allocates the back store for the marking bitmap
   13.56 -  bool allocate(ReservedSpace heap_rs);
   13.57 +  CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
   13.58  
   13.59 -  // write marks
   13.60 -  void mark(HeapWord* addr) {
   13.61 -    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   13.62 -           "outside underlying space?");
   13.63 -    _bm.set_bit(heapWordToOffset(addr));
   13.64 -  }
   13.65 -  void clear(HeapWord* addr) {
   13.66 -    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   13.67 -           "outside underlying space?");
   13.68 -    _bm.clear_bit(heapWordToOffset(addr));
   13.69 -  }
   13.70 -  bool parMark(HeapWord* addr) {
   13.71 -    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   13.72 -           "outside underlying space?");
   13.73 -    return _bm.par_set_bit(heapWordToOffset(addr));
   13.74 -  }
   13.75 -  bool parClear(HeapWord* addr) {
   13.76 -    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   13.77 -           "outside underlying space?");
   13.78 -    return _bm.par_clear_bit(heapWordToOffset(addr));
   13.79 -  }
   13.80 +  // Initializes the underlying BitMap to cover the given area.
   13.81 +  void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
   13.82 +
   13.83 +  // Write marks.
   13.84 +  inline void mark(HeapWord* addr);
   13.85 +  inline void clear(HeapWord* addr);
   13.86 +  inline bool parMark(HeapWord* addr);
   13.87 +  inline bool parClear(HeapWord* addr);
   13.88 +
   13.89    void markRange(MemRegion mr);
   13.90 -  void clearAll();
   13.91    void clearRange(MemRegion mr);
   13.92  
   13.93    // Starting at the bit corresponding to "addr" (inclusive), find the next
   13.94 @@ -161,6 +161,9 @@
   13.95    // the run.  If there is no "1" bit at or after "addr", return an empty
   13.96    // MemRegion.
   13.97    MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
   13.98 +
   13.99 +  // Clear the whole mark bitmap.
  13.100 +  void clearAll();
  13.101  };
  13.102  
  13.103  // Represents a marking stack used by ConcurrentMarking in the G1 collector.
  13.104 @@ -680,7 +683,7 @@
  13.105      return _task_queues->steal(worker_id, hash_seed, obj);
  13.106    }
  13.107  
  13.108 -  ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
  13.109 +  ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage);
  13.110    ~ConcurrentMark();
  13.111  
  13.112    ConcurrentMarkThread* cmThread() { return _cmThread; }
  13.113 @@ -736,7 +739,8 @@
  13.114    // Clear the next marking bitmap (will be called concurrently).
  13.115    void clearNextBitmap();
  13.116  
  13.117 -  // Return whether the next mark bitmap has no marks set.
  13.118 +  // Return whether the next mark bitmap has no marks set. To be used for assertions
  13.119 +  // only. Will not yield to pause requests.
  13.120    bool nextMarkBitmapIsClear();
  13.121  
  13.122    // These two do the work that needs to be done before and after the
  13.123 @@ -794,12 +798,6 @@
  13.124                             bool verify_thread_buffers,
  13.125                             bool verify_fingers) PRODUCT_RETURN;
  13.126  
  13.127 -  // It is called at the end of an evacuation pause during marking so
  13.128 -  // that CM is notified of where the new end of the heap is. It
  13.129 -  // doesn't do anything if concurrent_marking_in_progress() is false,
  13.130 -  // unless the force parameter is true.
  13.131 -  void update_g1_committed(bool force = false);
  13.132 -
  13.133    bool isMarked(oop p) const {
  13.134      assert(p != NULL && p->is_oop(), "expected an oop");
  13.135      HeapWord* addr = (HeapWord*)p;
    14.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Fri Aug 22 13:24:04 2014 +0200
    14.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Tue Aug 26 13:38:33 2014 -0700
    14.3 @@ -268,6 +268,36 @@
    14.4    return iterate(cl, mr);
    14.5  }
    14.6  
    14.7 +#define check_mark(addr)                                                       \
    14.8 +  assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize),      \
    14.9 +         "outside underlying space?");                                         \
   14.10 +  assert(G1CollectedHeap::heap()->is_in_exact(addr),                           \
   14.11 +         err_msg("Trying to access not available bitmap "PTR_FORMAT            \
   14.12 +                 " corresponding to "PTR_FORMAT" (%u)",                        \
   14.13 +                 p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)));
   14.14 +
   14.15 +inline void CMBitMap::mark(HeapWord* addr) {
   14.16 +  check_mark(addr);
   14.17 +  _bm.set_bit(heapWordToOffset(addr));
   14.18 +}
   14.19 +
   14.20 +inline void CMBitMap::clear(HeapWord* addr) {
   14.21 +  check_mark(addr);
   14.22 +  _bm.clear_bit(heapWordToOffset(addr));
   14.23 +}
   14.24 +
   14.25 +inline bool CMBitMap::parMark(HeapWord* addr) {
   14.26 +  check_mark(addr);
   14.27 +  return _bm.par_set_bit(heapWordToOffset(addr));
   14.28 +}
   14.29 +
   14.30 +inline bool CMBitMap::parClear(HeapWord* addr) {
   14.31 +  check_mark(addr);
   14.32 +  return _bm.par_clear_bit(heapWordToOffset(addr));
   14.33 +}
   14.34 +
   14.35 +#undef check_mark
   14.36 +
   14.37  inline void CMTask::push(oop obj) {
   14.38    HeapWord* objAddr = (HeapWord*) obj;
   14.39    assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
    15.1 --- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Fri Aug 22 13:24:04 2014 +0200
    15.2 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Tue Aug 26 13:38:33 2014 -0700
    15.3 @@ -173,7 +173,7 @@
    15.4  
    15.5    // Should be called when we want to release the active region which
    15.6    // is returned after it's been retired.
    15.7 -  HeapRegion* release();
    15.8 +  virtual HeapRegion* release();
    15.9  
   15.10  #if G1_ALLOC_REGION_TRACING
   15.11    void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL);
    16.1 --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Fri Aug 22 13:24:04 2014 +0200
    16.2 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Tue Aug 26 13:38:33 2014 -0700
    16.3 @@ -32,64 +32,37 @@
    16.4  
    16.5  PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    16.6  
    16.7 +void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
    16.8 +  // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
    16.9 +  // retrieve it here since this would cause firing of several asserts. The code
   16.10 +  // executed after commit of a region already needs to do some re-initialization of
   16.11 +  // the HeapRegion, so we combine that.
   16.12 +}
   16.13 +
   16.14  //////////////////////////////////////////////////////////////////////
   16.15  // G1BlockOffsetSharedArray
   16.16  //////////////////////////////////////////////////////////////////////
   16.17  
   16.18 -G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
   16.19 -                                                   size_t init_word_size) :
   16.20 -  _reserved(reserved), _end(NULL)
   16.21 -{
   16.22 -  size_t size = compute_size(reserved.word_size());
   16.23 -  ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
   16.24 -  if (!rs.is_reserved()) {
   16.25 -    vm_exit_during_initialization("Could not reserve enough space for heap offset array");
   16.26 -  }
   16.27 -  if (!_vs.initialize(rs, 0)) {
   16.28 -    vm_exit_during_initialization("Could not reserve enough space for heap offset array");
   16.29 -  }
   16.30 +G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
   16.31 +  _reserved(), _end(NULL), _listener(), _offset_array(NULL) {
   16.32  
   16.33 -  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
   16.34 +  _reserved = heap;
   16.35 +  _end = NULL;
   16.36  
   16.37 -  _offset_array = (u_char*)_vs.low_boundary();
   16.38 -  resize(init_word_size);
   16.39 +  MemRegion bot_reserved = storage->reserved();
   16.40 +
   16.41 +  _offset_array = (u_char*)bot_reserved.start();
   16.42 +  _end = _reserved.end();
   16.43 +
   16.44 +  storage->set_mapping_changed_listener(&_listener);
   16.45 +
   16.46    if (TraceBlockOffsetTable) {
   16.47      gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
   16.48      gclog_or_tty->print_cr("  "
   16.49                    "  rs.base(): " INTPTR_FORMAT
   16.50                    "  rs.size(): " INTPTR_FORMAT
   16.51                    "  rs end(): " INTPTR_FORMAT,
   16.52 -                  rs.base(), rs.size(), rs.base() + rs.size());
   16.53 -    gclog_or_tty->print_cr("  "
   16.54 -                  "  _vs.low_boundary(): " INTPTR_FORMAT
   16.55 -                  "  _vs.high_boundary(): " INTPTR_FORMAT,
   16.56 -                  _vs.low_boundary(),
   16.57 -                  _vs.high_boundary());
   16.58 -  }
   16.59 -}
   16.60 -
   16.61 -void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
   16.62 -  assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
   16.63 -  size_t new_size = compute_size(new_word_size);
   16.64 -  size_t old_size = _vs.committed_size();
   16.65 -  size_t delta;
   16.66 -  char* high = _vs.high();
   16.67 -  _end = _reserved.start() + new_word_size;
   16.68 -  if (new_size > old_size) {
   16.69 -    delta = ReservedSpace::page_align_size_up(new_size - old_size);
   16.70 -    assert(delta > 0, "just checking");
   16.71 -    if (!_vs.expand_by(delta)) {
   16.72 -      // Do better than this for Merlin
   16.73 -      vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
   16.74 -    }
   16.75 -    assert(_vs.high() == high + delta, "invalid expansion");
   16.76 -    // Initialization of the contents is left to the
   16.77 -    // G1BlockOffsetArray that uses it.
   16.78 -  } else {
   16.79 -    delta = ReservedSpace::page_align_size_down(old_size - new_size);
   16.80 -    if (delta == 0) return;
   16.81 -    _vs.shrink_by(delta);
   16.82 -    assert(_vs.high() == high - delta, "invalid expansion");
   16.83 +                  bot_reserved.start(), bot_reserved.byte_size(), bot_reserved.end());
   16.84    }
   16.85  }
   16.86  
   16.87 @@ -100,18 +73,7 @@
   16.88  }
   16.89  
   16.90  void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
   16.91 -  check_index(index_for(right - 1), "right address out of range");
   16.92 -  assert(left  < right, "Heap addresses out of order");
   16.93 -  size_t num_cards = pointer_delta(right, left) >> LogN_words;
   16.94 -  if (UseMemSetInBOT) {
   16.95 -    memset(&_offset_array[index_for(left)], offset, num_cards);
   16.96 -  } else {
   16.97 -    size_t i = index_for(left);
   16.98 -    const size_t end = i + num_cards;
   16.99 -    for (; i < end; i++) {
  16.100 -      _offset_array[i] = offset;
  16.101 -    }
  16.102 -  }
  16.103 +  set_offset_array(index_for(left), index_for(right -1), offset);
  16.104  }
  16.105  
  16.106  //////////////////////////////////////////////////////////////////////
  16.107 @@ -651,6 +613,25 @@
  16.108    _next_offset_index = 0;
  16.109  }
  16.110  
  16.111 +HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
  16.112 +  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
  16.113 +         "just checking");
  16.114 +  _next_offset_index = _array->index_for_raw(_bottom);
  16.115 +  _next_offset_index++;
  16.116 +  _next_offset_threshold =
  16.117 +    _array->address_for_index_raw(_next_offset_index);
  16.118 +  return _next_offset_threshold;
  16.119 +}
  16.120 +
  16.121 +void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
  16.122 +  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
  16.123 +         "just checking");
  16.124 +  size_t bottom_index = _array->index_for_raw(_bottom);
  16.125 +  assert(_array->address_for_index_raw(bottom_index) == _bottom,
  16.126 +         "Precondition of call");
  16.127 +  _array->set_offset_array_raw(bottom_index, 0);
  16.128 +}
  16.129 +
  16.130  HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
  16.131    assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
  16.132           "just checking");
  16.133 @@ -675,8 +656,7 @@
  16.134    assert(new_top <= _end, "_end should have already been updated");
  16.135  
  16.136    // The first BOT entry should have offset 0.
  16.137 -  zero_bottom_entry();
  16.138 -  initialize_threshold();
  16.139 +  reset_bot();
  16.140    alloc_block(_bottom, new_top);
  16.141   }
  16.142  
    17.1 --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Fri Aug 22 13:24:04 2014 +0200
    17.2 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Tue Aug 26 13:38:33 2014 -0700
    17.3 @@ -25,6 +25,7 @@
    17.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
    17.5  #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
    17.6  
    17.7 +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
    17.8  #include "memory/memRegion.hpp"
    17.9  #include "runtime/virtualspace.hpp"
   17.10  #include "utilities/globalDefinitions.hpp"
   17.11 @@ -106,6 +107,11 @@
   17.12    inline HeapWord* block_start_const(const void* addr) const;
   17.13  };
   17.14  
   17.15 +class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
   17.16 + public:
   17.17 +  virtual void on_commit(uint start_idx, size_t num_regions);
   17.18 +};
   17.19 +
   17.20  // This implementation of "G1BlockOffsetTable" divides the covered region
   17.21  // into "N"-word subregions (where "N" = 2^"LogN".  An array with an entry
   17.22  // for each such subregion indicates how far back one must go to find the
   17.23 @@ -125,6 +131,7 @@
   17.24    friend class VMStructs;
   17.25  
   17.26  private:
   17.27 +  G1BlockOffsetSharedArrayMappingChangedListener _listener;
   17.28    // The reserved region covered by the shared array.
   17.29    MemRegion _reserved;
   17.30  
   17.31 @@ -133,16 +140,8 @@
   17.32  
   17.33    // Array for keeping offsets for retrieving object start fast given an
   17.34    // address.
   17.35 -  VirtualSpace _vs;
   17.36    u_char* _offset_array;          // byte array keeping backwards offsets
   17.37  
   17.38 -  void check_index(size_t index, const char* msg) const {
   17.39 -    assert(index < _vs.committed_size(),
   17.40 -           err_msg("%s - "
   17.41 -                   "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
   17.42 -                   msg, index, _vs.committed_size()));
   17.43 -  }
   17.44 -
   17.45    void check_offset(size_t offset, const char* msg) const {
   17.46      assert(offset <= N_words,
   17.47             err_msg("%s - "
   17.48 @@ -152,63 +151,33 @@
   17.49  
   17.50    // Bounds checking accessors:
   17.51    // For performance these have to devolve to array accesses in product builds.
   17.52 -  u_char offset_array(size_t index) const {
   17.53 -    check_index(index, "index out of range");
   17.54 -    return _offset_array[index];
   17.55 -  }
   17.56 +  inline u_char offset_array(size_t index) const;
   17.57  
   17.58    void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
   17.59  
   17.60 -  void set_offset_array(size_t index, u_char offset) {
   17.61 -    check_index(index, "index out of range");
   17.62 -    check_offset(offset, "offset too large");
   17.63 +  void set_offset_array_raw(size_t index, u_char offset) {
   17.64      _offset_array[index] = offset;
   17.65    }
   17.66  
   17.67 -  void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
   17.68 -    check_index(index, "index out of range");
   17.69 -    assert(high >= low, "addresses out of order");
   17.70 -    check_offset(pointer_delta(high, low), "offset too large");
   17.71 -    _offset_array[index] = (u_char) pointer_delta(high, low);
   17.72 -  }
   17.73 +  inline void set_offset_array(size_t index, u_char offset);
   17.74  
   17.75 -  void set_offset_array(size_t left, size_t right, u_char offset) {
   17.76 -    check_index(right, "right index out of range");
   17.77 -    assert(left <= right, "indexes out of order");
   17.78 -    size_t num_cards = right - left + 1;
   17.79 -    if (UseMemSetInBOT) {
   17.80 -      memset(&_offset_array[left], offset, num_cards);
   17.81 -    } else {
   17.82 -      size_t i = left;
   17.83 -      const size_t end = i + num_cards;
   17.84 -      for (; i < end; i++) {
   17.85 -        _offset_array[i] = offset;
   17.86 -      }
   17.87 -    }
   17.88 -  }
   17.89 +  inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
   17.90  
   17.91 -  void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
   17.92 -    check_index(index, "index out of range");
   17.93 -    assert(high >= low, "addresses out of order");
   17.94 -    check_offset(pointer_delta(high, low), "offset too large");
   17.95 -    assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
   17.96 -  }
   17.97 +  inline void set_offset_array(size_t left, size_t right, u_char offset);
   17.98 +
   17.99 +  inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
  17.100  
  17.101    bool is_card_boundary(HeapWord* p) const;
  17.102  
  17.103 +public:
  17.104 +
  17.105    // Return the number of slots needed for an offset array
  17.106    // that covers mem_region_words words.
  17.107 -  // We always add an extra slot because if an object
  17.108 -  // ends on a card boundary we put a 0 in the next
  17.109 -  // offset array slot, so we want that slot always
  17.110 -  // to be reserved.
  17.111 -
  17.112 -  size_t compute_size(size_t mem_region_words) {
  17.113 -    size_t number_of_slots = (mem_region_words / N_words) + 1;
  17.114 -    return ReservedSpace::page_align_size_up(number_of_slots);
  17.115 +  static size_t compute_size(size_t mem_region_words) {
  17.116 +    size_t number_of_slots = (mem_region_words / N_words);
  17.117 +    return ReservedSpace::allocation_align_size_up(number_of_slots);
  17.118    }
  17.119  
  17.120 -public:
  17.121    enum SomePublicConstants {
  17.122      LogN = 9,
  17.123      LogN_words = LogN - LogHeapWordSize,
  17.124 @@ -222,25 +191,21 @@
  17.125    // least "init_word_size".) The contents of the initial table are
  17.126    // undefined; it is the responsibility of the constituent
  17.127    // G1BlockOffsetTable(s) to initialize cards.
  17.128 -  G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
  17.129 -
  17.130 -  // Notes a change in the committed size of the region covered by the
  17.131 -  // table.  The "new_word_size" may not be larger than the size of the
  17.132 -  // reserved region this table covers.
  17.133 -  void resize(size_t new_word_size);
  17.134 +  G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
  17.135  
  17.136    void set_bottom(HeapWord* new_bottom);
  17.137  
  17.138 -  // Updates all the BlockOffsetArray's sharing this shared array to
  17.139 -  // reflect the current "top"'s of their spaces.
  17.140 -  void update_offset_arrays();
  17.141 -
  17.142    // Return the appropriate index into "_offset_array" for "p".
  17.143    inline size_t index_for(const void* p) const;
  17.144 +  inline size_t index_for_raw(const void* p) const;
  17.145  
  17.146    // Return the address indicating the start of the region corresponding to
  17.147    // "index" in "_offset_array".
  17.148    inline HeapWord* address_for_index(size_t index) const;
  17.149 +  // Variant of address_for_index that does not check the index for validity.
  17.150 +  inline HeapWord* address_for_index_raw(size_t index) const {
  17.151 +    return _reserved.start() + (index << LogN_words);
  17.152 +  }
  17.153  };
  17.154  
  17.155  // And here is the G1BlockOffsetTable subtype that uses the array.
  17.156 @@ -480,6 +445,14 @@
  17.157                        blk_start, blk_end);
  17.158    }
  17.159  
  17.160 +  // Variant of zero_bottom_entry that does not check for availability of the
  17.161 +  // memory first.
  17.162 +  void zero_bottom_entry_raw();
  17.163 +  // Variant of initialize_threshold that does not check for availability of the
  17.164 +  // memory first.
  17.165 +  HeapWord* initialize_threshold_raw();
  17.166 +  // Zero out the entry for _bottom (offset will be zero).
  17.167 +  void zero_bottom_entry();
  17.168   public:
  17.169    G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
  17.170  
  17.171 @@ -487,8 +460,10 @@
  17.172    // bottom of the covered region.
  17.173    HeapWord* initialize_threshold();
  17.174  
  17.175 -  // Zero out the entry for _bottom (offset will be zero).
  17.176 -  void      zero_bottom_entry();
  17.177 +  void reset_bot() {
  17.178 +    zero_bottom_entry_raw();
  17.179 +    initialize_threshold_raw();
  17.180 +  }
  17.181  
  17.182    // Return the next threshold, the point at which the table should be
  17.183    // updated.
    18.1 --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Fri Aug 22 13:24:04 2014 +0200
    18.2 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Tue Aug 26 13:38:33 2014 -0700
    18.3 @@ -47,14 +47,69 @@
    18.4    }
    18.5  }
    18.6  
    18.7 +#define check_index(index, msg)                                                \
    18.8 +  assert((index) < (_reserved.word_size() >> LogN_words),                      \
    18.9 +         err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \
   18.10 +                 msg, (index), (_reserved.word_size() >> LogN_words)));        \
   18.11 +  assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)),   \
   18.12 +         err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT             \
   18.13 +                 " (%u) is not in committed area.",                            \
   18.14 +                 (index),                                                      \
   18.15 +                 p2i(address_for_index_raw(index)),                            \
   18.16 +                 G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index))));
   18.17 +
   18.18 +u_char G1BlockOffsetSharedArray::offset_array(size_t index) const {
   18.19 +  check_index(index, "index out of range");
   18.20 +  return _offset_array[index];
   18.21 +}
   18.22 +
   18.23 +void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) {
   18.24 +  check_index(index, "index out of range");
   18.25 +  set_offset_array_raw(index, offset);
   18.26 +}
   18.27 +
   18.28 +void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
   18.29 +  check_index(index, "index out of range");
   18.30 +  assert(high >= low, "addresses out of order");
   18.31 +  size_t offset = pointer_delta(high, low);
   18.32 +  check_offset(offset, "offset too large");
   18.33 +  set_offset_array(index, (u_char)offset);
   18.34 +}
   18.35 +
   18.36 +void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) {
   18.37 +  check_index(right, "right index out of range");
   18.38 +  assert(left <= right, "indexes out of order");
   18.39 +  size_t num_cards = right - left + 1;
   18.40 +  if (UseMemSetInBOT) {
   18.41 +    memset(&_offset_array[left], offset, num_cards);
   18.42 +  } else {
   18.43 +    size_t i = left;
   18.44 +    const size_t end = i + num_cards;
   18.45 +    for (; i < end; i++) {
   18.46 +      _offset_array[i] = offset;
   18.47 +    }
   18.48 +  }
   18.49 +}
   18.50 +
   18.51 +void G1BlockOffsetSharedArray::check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
   18.52 +  check_index(index, "index out of range");
   18.53 +  assert(high >= low, "addresses out of order");
   18.54 +  check_offset(pointer_delta(high, low), "offset too large");
   18.55 +  assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
   18.56 +}
   18.57 +
   18.58 +// Variant of index_for that does not check the index for validity.
   18.59 +inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
   18.60 +  return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
   18.61 +}
   18.62 +
   18.63  inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
   18.64    char* pc = (char*)p;
   18.65    assert(pc >= (char*)_reserved.start() &&
   18.66           pc <  (char*)_reserved.end(),
   18.67           err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
   18.68                   p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
   18.69 -  size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
   18.70 -  size_t result = delta >> LogN;
   18.71 +  size_t result = index_for_raw(p);
   18.72    check_index(result, "bad index from address");
   18.73    return result;
   18.74  }
   18.75 @@ -62,7 +117,7 @@
   18.76  inline HeapWord*
   18.77  G1BlockOffsetSharedArray::address_for_index(size_t index) const {
   18.78    check_index(index, "index out of range");
   18.79 -  HeapWord* result = _reserved.start() + (index << LogN_words);
   18.80 +  HeapWord* result = address_for_index_raw(index);
   18.81    assert(result >= _reserved.start() && result < _reserved.end(),
   18.82           err_msg("bad address from index result " PTR_FORMAT
   18.83                   " _reserved.start() " PTR_FORMAT " _reserved.end() "
   18.84 @@ -71,6 +126,8 @@
   18.85    return result;
   18.86  }
   18.87  
   18.88 +#undef check_index
   18.89 +
   18.90  inline size_t
   18.91  G1BlockOffsetArray::block_size(const HeapWord* p) const {
   18.92    return gsp()->block_size(p);
    19.1 --- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Fri Aug 22 13:24:04 2014 +0200
    19.2 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Tue Aug 26 13:38:33 2014 -0700
    19.3 @@ -33,31 +33,26 @@
    19.4  
    19.5  PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    19.6  
    19.7 +void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
    19.8 +  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
    19.9 +  _counts->clear_range(mr);
   19.10 +}
   19.11 +
   19.12  void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
   19.13    if (has_count_table()) {
   19.14 -    assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
   19.15 -           err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
   19.16      assert(from_card_num < to_card_num,
   19.17             err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
   19.18                     from_card_num, to_card_num));
   19.19 -    assert(to_card_num <= _committed_max_card_num,
   19.20 -           err_msg("to card num out of range: "
   19.21 -                   "to: "SIZE_FORMAT ", "
   19.22 -                   "max: "SIZE_FORMAT,
   19.23 -                   to_card_num, _committed_max_card_num));
   19.24 -
   19.25 -    to_card_num = MIN2(_committed_max_card_num, to_card_num);
   19.26 -
   19.27      Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
   19.28    }
   19.29  }
   19.30  
   19.31  G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
   19.32 -  _g1h(g1h), _card_counts(NULL),
   19.33 -  _reserved_max_card_num(0), _committed_max_card_num(0),
   19.34 -  _committed_size(0) {}
   19.35 +  _listener(), _g1h(g1h), _card_counts(NULL), _reserved_max_card_num(0) {
   19.36 +  _listener.set_cardcounts(this);
   19.37 +}
   19.38  
   19.39 -void G1CardCounts::initialize() {
   19.40 +void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
   19.41    assert(_g1h->max_capacity() > 0, "initialization order");
   19.42    assert(_g1h->capacity() == 0, "initialization order");
   19.43  
   19.44 @@ -70,70 +65,9 @@
   19.45      _ct_bs = _g1h->g1_barrier_set();
   19.46      _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
   19.47  
   19.48 -    // Allocate/Reserve the counts table
   19.49 -    size_t reserved_bytes = _g1h->max_capacity();
   19.50 -    _reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift;
   19.51 -
   19.52 -    size_t reserved_size = _reserved_max_card_num * sizeof(jbyte);
   19.53 -    ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size));
   19.54 -    if (!rs.is_reserved()) {
   19.55 -      warning("Could not reserve enough space for the card counts table");
   19.56 -      guarantee(!has_reserved_count_table(), "should be NULL");
   19.57 -      return;
   19.58 -    }
   19.59 -
   19.60 -    MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
   19.61 -
   19.62 -    _card_counts_storage.initialize(rs, 0);
   19.63 -    _card_counts = (jubyte*) _card_counts_storage.low();
   19.64 -  }
   19.65 -}
   19.66 -
   19.67 -void G1CardCounts::resize(size_t heap_capacity) {
   19.68 -  // Expand the card counts table to handle a heap with the given capacity.
   19.69 -
   19.70 -  if (!has_reserved_count_table()) {
   19.71 -    // Don't expand if we failed to reserve the card counts table.
   19.72 -    return;
   19.73 -  }
   19.74 -
   19.75 -  assert(_committed_size ==
   19.76 -         ReservedSpace::allocation_align_size_up(_committed_size),
   19.77 -         err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
   19.78 -
   19.79 -  // Verify that the committed space for the card counts matches our
   19.80 -  // committed max card num. Note for some allocation alignments, the
   19.81 -  // amount of space actually committed for the counts table will be able
   19.82 -  // to span more cards than the number spanned by the maximum heap.
   19.83 -  size_t prev_committed_size = _committed_size;
   19.84 -  size_t prev_committed_card_num = committed_to_card_num(prev_committed_size);
   19.85 -
   19.86 -  assert(prev_committed_card_num == _committed_max_card_num,
   19.87 -         err_msg("Card mismatch: "
   19.88 -                 "prev: " SIZE_FORMAT ", "
   19.89 -                 "committed: "SIZE_FORMAT", "
   19.90 -                 "reserved: "SIZE_FORMAT,
   19.91 -                 prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num));
   19.92 -
   19.93 -  size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
   19.94 -  size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
   19.95 -  size_t new_committed_card_num = committed_to_card_num(new_committed_size);
   19.96 -
   19.97 -  if (_committed_max_card_num < new_committed_card_num) {
   19.98 -    // we need to expand the backing store for the card counts
   19.99 -    size_t expand_size = new_committed_size - prev_committed_size;
  19.100 -
  19.101 -    if (!_card_counts_storage.expand_by(expand_size)) {
  19.102 -      warning("Card counts table backing store commit failure");
  19.103 -      return;
  19.104 -    }
  19.105 -    assert(_card_counts_storage.committed_size() == new_committed_size,
  19.106 -           "expansion commit failure");
  19.107 -
  19.108 -    _committed_size = new_committed_size;
  19.109 -    _committed_max_card_num = new_committed_card_num;
  19.110 -
  19.111 -    clear_range(prev_committed_card_num, _committed_max_card_num);
  19.112 +    _card_counts = (jubyte*) mapper->reserved().start();
  19.113 +    _reserved_max_card_num = mapper->reserved().byte_size();
  19.114 +    mapper->set_mapping_changed_listener(&_listener);
  19.115    }
  19.116  }
  19.117  
  19.118 @@ -149,12 +83,13 @@
  19.119    uint count = 0;
  19.120    if (has_count_table()) {
  19.121      size_t card_num = ptr_2_card_num(card_ptr);
  19.122 -    if (card_num < _committed_max_card_num) {
  19.123 -      count = (uint) _card_counts[card_num];
  19.124 -      if (count < G1ConcRSHotCardLimit) {
  19.125 -        _card_counts[card_num] =
  19.126 -          (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
  19.127 -      }
  19.128 +    assert(card_num < _reserved_max_card_num,
  19.129 +           err_msg("Card "SIZE_FORMAT" outside of card counts table (max size "SIZE_FORMAT")",
  19.130 +                   card_num, _reserved_max_card_num));
  19.131 +    count = (uint) _card_counts[card_num];
  19.132 +    if (count < G1ConcRSHotCardLimit) {
  19.133 +      _card_counts[card_num] =
  19.134 +        (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
  19.135      }
  19.136    }
  19.137    return count;
  19.138 @@ -165,31 +100,23 @@
  19.139  }
  19.140  
  19.141  void G1CardCounts::clear_region(HeapRegion* hr) {
  19.142 -  assert(!hr->isHumongous(), "Should have been cleared");
  19.143 +  MemRegion mr(hr->bottom(), hr->end());
  19.144 +  clear_range(mr);
  19.145 +}
  19.146 +
  19.147 +void G1CardCounts::clear_range(MemRegion mr) {
  19.148    if (has_count_table()) {
  19.149 -    HeapWord* bottom = hr->bottom();
  19.150 -
  19.151 -    // We use the last address in hr as hr could be the
  19.152 -    // last region in the heap. In which case trying to find
  19.153 -    // the card for hr->end() will be an OOB accesss to the
  19.154 -    // card table.
  19.155 -    HeapWord* last = hr->end() - 1;
  19.156 -    assert(_g1h->g1_committed().contains(last),
  19.157 -           err_msg("last not in committed: "
  19.158 -                   "last: " PTR_FORMAT ", "
  19.159 -                   "committed: [" PTR_FORMAT ", " PTR_FORMAT ")",
  19.160 -                   last,
  19.161 -                   _g1h->g1_committed().start(),
  19.162 -                   _g1h->g1_committed().end()));
  19.163 -
  19.164 -    const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom);
  19.165 -    const jbyte* last_card_ptr = _ct_bs->byte_for_const(last);
  19.166 +    const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start());
  19.167 +    // We use the last address in the range as the range could represent the
  19.168 +    // last region in the heap. In which case trying to find the card will be an
  19.169 +    // OOB access to the card table.
  19.170 +    const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last());
  19.171  
  19.172  #ifdef ASSERT
  19.173      HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
  19.174 -    assert(start_addr == hr->bottom(), "alignment");
  19.175 +    assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
  19.176      HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
  19.177 -    assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment");
  19.178 +    assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
  19.179  #endif // ASSERT
  19.180  
  19.181      // Clear the counts for the (exclusive) card range.
  19.182 @@ -199,14 +126,22 @@
  19.183    }
  19.184  }
  19.185  
  19.186 +class G1CardCountsClearClosure : public HeapRegionClosure {
  19.187 + private:
  19.188 +  G1CardCounts* _card_counts;
  19.189 + public:
  19.190 +  G1CardCountsClearClosure(G1CardCounts* card_counts) :
  19.191 +    HeapRegionClosure(), _card_counts(card_counts) { }
  19.192 +
  19.193 +
  19.194 +  virtual bool doHeapRegion(HeapRegion* r) {
  19.195 +    _card_counts->clear_region(r);
  19.196 +    return false;
  19.197 +  }
  19.198 +};
  19.199 +
  19.200  void G1CardCounts::clear_all() {
  19.201    assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
  19.202 -  clear_range((size_t)0, _committed_max_card_num);
  19.203 +  G1CardCountsClearClosure cl(this);
  19.204 +  _g1h->heap_region_iterate(&cl);
  19.205  }
  19.206 -
  19.207 -G1CardCounts::~G1CardCounts() {
  19.208 -  if (has_reserved_count_table()) {
  19.209 -    _card_counts_storage.release();
  19.210 -  }
  19.211 -}
  19.212 -
    20.1 --- a/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Fri Aug 22 13:24:04 2014 +0200
    20.2 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Tue Aug 26 13:38:33 2014 -0700
    20.3 @@ -25,14 +25,26 @@
    20.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
    20.5  #define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
    20.6  
    20.7 +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
    20.8  #include "memory/allocation.hpp"
    20.9  #include "runtime/virtualspace.hpp"
   20.10  #include "utilities/globalDefinitions.hpp"
   20.11  
   20.12  class CardTableModRefBS;
   20.13 +class G1CardCounts;
   20.14  class G1CollectedHeap;
   20.15 +class G1RegionToSpaceMapper;
   20.16  class HeapRegion;
   20.17  
   20.18 +class G1CardCountsMappingChangedListener : public G1MappingChangedListener {
   20.19 + private:
   20.20 +  G1CardCounts* _counts;
   20.21 + public:
   20.22 +  void set_cardcounts(G1CardCounts* counts) { _counts = counts; }
   20.23 +
   20.24 +  virtual void on_commit(uint start_idx, size_t num_regions);
   20.25 +};
   20.26 +
   20.27  // Table to track the number of times a card has been refined. Once
   20.28  // a card has been refined a certain number of times, it is
   20.29  // considered 'hot' and its refinement is delayed by inserting the
   20.30 @@ -41,6 +53,8 @@
   20.31  // is 'drained' during the next evacuation pause.
   20.32  
   20.33  class G1CardCounts: public CHeapObj<mtGC> {
   20.34 +  G1CardCountsMappingChangedListener _listener;
   20.35 +
   20.36    G1CollectedHeap* _g1h;
   20.37  
   20.38    // The table of counts
   20.39 @@ -49,27 +63,18 @@
   20.40    // Max capacity of the reserved space for the counts table
   20.41    size_t _reserved_max_card_num;
   20.42  
   20.43 -  // Max capacity of the committed space for the counts table
   20.44 -  size_t _committed_max_card_num;
   20.45 -
   20.46 -  // Size of committed space for the counts table
   20.47 -  size_t _committed_size;
   20.48 -
   20.49    // CardTable bottom.
   20.50    const jbyte* _ct_bot;
   20.51  
   20.52    // Barrier set
   20.53    CardTableModRefBS* _ct_bs;
   20.54  
   20.55 -  // The virtual memory backing the counts table
   20.56 -  VirtualSpace _card_counts_storage;
   20.57 -
   20.58    // Returns true if the card counts table has been reserved.
   20.59    bool has_reserved_count_table() { return _card_counts != NULL; }
   20.60  
   20.61    // Returns true if the card counts table has been reserved and committed.
   20.62    bool has_count_table() {
   20.63 -    return has_reserved_count_table() && _committed_max_card_num > 0;
   20.64 +    return has_reserved_count_table();
   20.65    }
   20.66  
   20.67    size_t ptr_2_card_num(const jbyte* card_ptr) {
   20.68 @@ -79,37 +84,24 @@
   20.69                     "_ct_bot: " PTR_FORMAT,
   20.70                     p2i(card_ptr), p2i(_ct_bot)));
   20.71      size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
   20.72 -    assert(card_num >= 0 && card_num < _committed_max_card_num,
   20.73 +    assert(card_num >= 0 && card_num < _reserved_max_card_num,
   20.74             err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr)));
   20.75      return card_num;
   20.76    }
   20.77  
   20.78    jbyte* card_num_2_ptr(size_t card_num) {
   20.79 -    assert(card_num >= 0 && card_num < _committed_max_card_num,
   20.80 +    assert(card_num >= 0 && card_num < _reserved_max_card_num,
   20.81             err_msg("card num out of range: "SIZE_FORMAT, card_num));
   20.82      return (jbyte*) (_ct_bot + card_num);
   20.83    }
   20.84  
   20.85 -  // Helper routine.
   20.86 -  // Returns the number of cards that can be counted by the given committed
   20.87 -  // table size, with a maximum of the number of cards spanned by the max
   20.88 -  // capacity of the heap.
   20.89 -  size_t committed_to_card_num(size_t committed_size) {
   20.90 -    return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte));
   20.91 -  }
   20.92 -
   20.93    // Clear the counts table for the given (exclusive) index range.
   20.94    void clear_range(size_t from_card_num, size_t to_card_num);
   20.95  
   20.96   public:
   20.97    G1CardCounts(G1CollectedHeap* g1h);
   20.98 -  ~G1CardCounts();
   20.99  
  20.100 -  void initialize();
  20.101 -
  20.102 -  // Resize the committed space for the card counts table in
  20.103 -  // response to a resize of the committed space for the heap.
  20.104 -  void resize(size_t heap_capacity);
  20.105 +  void initialize(G1RegionToSpaceMapper* mapper);
  20.106  
  20.107    // Increments the refinement count for the given card.
  20.108    // Returns the pre-increment count value.
  20.109 @@ -122,8 +114,10 @@
  20.110    // Clears the card counts for the cards spanned by the region
  20.111    void clear_region(HeapRegion* hr);
  20.112  
  20.113 +  // Clears the card counts for the cards spanned by the MemRegion
  20.114 +  void clear_range(MemRegion mr);
  20.115 +
  20.116    // Clear the entire card counts table during GC.
  20.117 -  // Updates the policy stats with the duration.
  20.118    void clear_all();
  20.119  };
  20.120  
    21.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 22 13:24:04 2014 +0200
    21.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Aug 26 13:38:33 2014 -0700
    21.3 @@ -43,12 +43,13 @@
    21.4  #include "gc_implementation/g1/g1MarkSweep.hpp"
    21.5  #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    21.6  #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
    21.7 +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
    21.8  #include "gc_implementation/g1/g1RemSet.inline.hpp"
    21.9  #include "gc_implementation/g1/g1StringDedup.hpp"
   21.10  #include "gc_implementation/g1/g1YCTypes.hpp"
   21.11  #include "gc_implementation/g1/heapRegion.inline.hpp"
   21.12  #include "gc_implementation/g1/heapRegionRemSet.hpp"
   21.13 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
   21.14 +#include "gc_implementation/g1/heapRegionSet.inline.hpp"
   21.15  #include "gc_implementation/g1/vm_operations_g1.hpp"
   21.16  #include "gc_implementation/shared/gcHeapSummary.hpp"
   21.17  #include "gc_implementation/shared/gcTimer.hpp"
   21.18 @@ -377,6 +378,14 @@
   21.19    gclog_or_tty->cr();
   21.20  }
   21.21  
   21.22 +void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
   21.23 +  OtherRegionsTable::invalidate(start_idx, num_regions);
   21.24 +}
   21.25 +
   21.26 +void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
   21.27 +  reset_from_card_cache(start_idx, num_regions);
   21.28 +}
   21.29 +
   21.30  void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   21.31  {
   21.32    // Claim the right to put the region on the dirty cards region list
   21.33 @@ -442,24 +451,18 @@
   21.34  // implementation of is_scavengable() for G1 will indicate that
   21.35  // all nmethods must be scanned during a partial collection.
   21.36  bool G1CollectedHeap::is_in_partial_collection(const void* p) {
   21.37 -  HeapRegion* hr = heap_region_containing(p);
   21.38 -  return hr != NULL && hr->in_collection_set();
   21.39 +  if (p == NULL) {
   21.40 +    return false;
   21.41 +  }
   21.42 +  return heap_region_containing(p)->in_collection_set();
   21.43  }
   21.44  #endif
   21.45  
   21.46  // Returns true if the reference points to an object that
   21.47  // can move in an incremental collection.
   21.48  bool G1CollectedHeap::is_scavengable(const void* p) {
   21.49 -  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   21.50 -  G1CollectorPolicy* g1p = g1h->g1_policy();
   21.51    HeapRegion* hr = heap_region_containing(p);
   21.52 -  if (hr == NULL) {
   21.53 -     // null
   21.54 -     assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
   21.55 -     return false;
   21.56 -  } else {
   21.57 -    return !hr->isHumongous();
   21.58 -  }
   21.59 +  return !hr->isHumongous();
   21.60  }
   21.61  
   21.62  void G1CollectedHeap::check_ct_logs_at_safepoint() {
   21.63 @@ -525,9 +528,9 @@
   21.64        // again to allocate from it.
   21.65        append_secondary_free_list();
   21.66  
   21.67 -      assert(!_free_list.is_empty(), "if the secondary_free_list was not "
   21.68 +      assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not "
   21.69               "empty we should have moved at least one entry to the free_list");
   21.70 -      HeapRegion* res = _free_list.remove_region(is_old);
   21.71 +      HeapRegion* res = _hrs.allocate_free_region(is_old);
   21.72        if (G1ConcRegionFreeingVerbose) {
   21.73          gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   21.74                                 "allocated "HR_FORMAT" from secondary_free_list",
   21.75 @@ -568,7 +571,7 @@
   21.76      }
   21.77    }
   21.78  
   21.79 -  res = _free_list.remove_region(is_old);
   21.80 +  res = _hrs.allocate_free_region(is_old);
   21.81  
   21.82    if (res == NULL) {
   21.83      if (G1ConcRegionFreeingVerbose) {
   21.84 @@ -593,8 +596,8 @@
   21.85        // Given that expand() succeeded in expanding the heap, and we
   21.86        // always expand the heap by an amount aligned to the heap
   21.87        // region size, the free list should in theory not be empty.
   21.88 -      // In either case remove_region() will check for NULL.
   21.89 -      res = _free_list.remove_region(is_old);
   21.90 +      // In either case allocate_free_region() will check for NULL.
   21.91 +      res = _hrs.allocate_free_region(is_old);
   21.92      } else {
   21.93        _expand_heap_after_alloc_failure = false;
   21.94      }
   21.95 @@ -602,55 +605,11 @@
   21.96    return res;
   21.97  }
   21.98  
   21.99 -uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
  21.100 -                                                        size_t word_size) {
  21.101 -  assert(isHumongous(word_size), "word_size should be humongous");
  21.102 -  assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
  21.103 -
  21.104 -  uint first = G1_NULL_HRS_INDEX;
  21.105 -  if (num_regions == 1) {
  21.106 -    // Only one region to allocate, no need to go through the slower
  21.107 -    // path. The caller will attempt the expansion if this fails, so
  21.108 -    // let's not try to expand here too.
  21.109 -    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
  21.110 -    if (hr != NULL) {
  21.111 -      first = hr->hrs_index();
  21.112 -    } else {
  21.113 -      first = G1_NULL_HRS_INDEX;
  21.114 -    }
  21.115 -  } else {
  21.116 -    // We can't allocate humongous regions while cleanupComplete() is
  21.117 -    // running, since some of the regions we find to be empty might not
  21.118 -    // yet be added to the free list and it is not straightforward to
  21.119 -    // know which list they are on so that we can remove them. Note
  21.120 -    // that we only need to do this if we need to allocate more than
  21.121 -    // one region to satisfy the current humongous allocation
  21.122 -    // request. If we are only allocating one region we use the common
  21.123 -    // region allocation code (see above).
  21.124 -    wait_while_free_regions_coming();
  21.125 -    append_secondary_free_list_if_not_empty_with_lock();
  21.126 -
  21.127 -    if (free_regions() >= num_regions) {
  21.128 -      first = _hrs.find_contiguous(num_regions);
  21.129 -      if (first != G1_NULL_HRS_INDEX) {
  21.130 -        for (uint i = first; i < first + num_regions; ++i) {
  21.131 -          HeapRegion* hr = region_at(i);
  21.132 -          assert(hr->is_empty(), "sanity");
  21.133 -          assert(is_on_master_free_list(hr), "sanity");
  21.134 -          hr->set_pending_removal(true);
  21.135 -        }
  21.136 -        _free_list.remove_all_pending(num_regions);
  21.137 -      }
  21.138 -    }
  21.139 -  }
  21.140 -  return first;
  21.141 -}
  21.142 -
  21.143  HeapWord*
  21.144  G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
  21.145                                                             uint num_regions,
  21.146                                                             size_t word_size) {
  21.147 -  assert(first != G1_NULL_HRS_INDEX, "pre-condition");
  21.148 +  assert(first != G1_NO_HRS_INDEX, "pre-condition");
  21.149    assert(isHumongous(word_size), "word_size should be humongous");
  21.150    assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
  21.151  
  21.152 @@ -788,42 +747,70 @@
  21.153  
  21.154    verify_region_sets_optional();
  21.155  
  21.156 -  size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
  21.157 -  uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
  21.158 -  uint x_num = expansion_regions();
  21.159 -  uint fs = _hrs.free_suffix();
  21.160 -  uint first = humongous_obj_allocate_find_first(num_regions, word_size);
  21.161 -  if (first == G1_NULL_HRS_INDEX) {
  21.162 -    // The only thing we can do now is attempt expansion.
  21.163 -    if (fs + x_num >= num_regions) {
  21.164 -      // If the number of regions we're trying to allocate for this
  21.165 -      // object is at most the number of regions in the free suffix,
  21.166 -      // then the call to humongous_obj_allocate_find_first() above
  21.167 -      // should have succeeded and we wouldn't be here.
  21.168 -      //
  21.169 -      // We should only be trying to expand when the free suffix is
  21.170 -      // not sufficient for the object _and_ we have some expansion
  21.171 -      // room available.
  21.172 -      assert(num_regions > fs, "earlier allocation should have succeeded");
  21.173 -
  21.174 +  uint first = G1_NO_HRS_INDEX;
  21.175 +  uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
  21.176 +
  21.177 +  if (obj_regions == 1) {
  21.178 +    // Only one region to allocate, try to use a fast path by directly allocating
  21.179 +    // from the free lists. Do not try to expand here, we will potentially do that
  21.180 +    // later.
  21.181 +    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
  21.182 +    if (hr != NULL) {
  21.183 +      first = hr->hrs_index();
  21.184 +    }
  21.185 +  } else {
  21.186 +    // We can't allocate humongous regions spanning more than one region while
  21.187 +    // cleanupComplete() is running, since some of the regions we find to be
  21.188 +    // empty might not yet be added to the free list. It is not straightforward
  21.189 +    // to know in which list they are on so that we can remove them. We only
  21.190 +    // need to do this if we need to allocate more than one region to satisfy the
  21.191 +    // current humongous allocation request. If we are only allocating one region
  21.192 +    // we use the one-region region allocation code (see above), that already
  21.193 +    // potentially waits for regions from the secondary free list.
  21.194 +    wait_while_free_regions_coming();
  21.195 +    append_secondary_free_list_if_not_empty_with_lock();
  21.196 +
  21.197 +    // Policy: Try only empty regions (i.e. already committed first). Maybe we
  21.198 +    // are lucky enough to find some.
  21.199 +    first = _hrs.find_contiguous_only_empty(obj_regions);
  21.200 +    if (first != G1_NO_HRS_INDEX) {
  21.201 +      _hrs.allocate_free_regions_starting_at(first, obj_regions);
  21.202 +    }
  21.203 +  }
  21.204 +
  21.205 +  if (first == G1_NO_HRS_INDEX) {
  21.206 +    // Policy: We could not find enough regions for the humongous object in the
  21.207 +    // free list. Look through the heap to find a mix of free and uncommitted regions.
  21.208 +    // If so, try expansion.
  21.209 +    first = _hrs.find_contiguous_empty_or_unavailable(obj_regions);
  21.210 +    if (first != G1_NO_HRS_INDEX) {
  21.211 +      // We found something. Make sure these regions are committed, i.e. expand
  21.212 +      // the heap. Alternatively we could do a defragmentation GC.
  21.213        ergo_verbose1(ErgoHeapSizing,
  21.214                      "attempt heap expansion",
  21.215                      ergo_format_reason("humongous allocation request failed")
  21.216                      ergo_format_byte("allocation request"),
  21.217                      word_size * HeapWordSize);
  21.218 -      if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
  21.219 -        // Even though the heap was expanded, it might not have
  21.220 -        // reached the desired size. So, we cannot assume that the
  21.221 -        // allocation will succeed.
  21.222 -        first = humongous_obj_allocate_find_first(num_regions, word_size);
  21.223 +
  21.224 +      _hrs.expand_at(first, obj_regions);
  21.225 +      g1_policy()->record_new_heap_size(num_regions());
  21.226 +
  21.227 +#ifdef ASSERT
  21.228 +      for (uint i = first; i < first + obj_regions; ++i) {
  21.229 +        HeapRegion* hr = region_at(i);
  21.230 +        assert(hr->is_empty(), "sanity");
  21.231 +        assert(is_on_master_free_list(hr), "sanity");
  21.232        }
  21.233 +#endif
  21.234 +      _hrs.allocate_free_regions_starting_at(first, obj_regions);
  21.235 +    } else {
  21.236 +      // Policy: Potentially trigger a defragmentation GC.
  21.237      }
  21.238    }
  21.239  
  21.240    HeapWord* result = NULL;
  21.241 -  if (first != G1_NULL_HRS_INDEX) {
  21.242 -    result =
  21.243 -      humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
  21.244 +  if (first != G1_NO_HRS_INDEX) {
  21.245 +    result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
  21.246      assert(result != NULL, "it should always return a valid result");
  21.247  
  21.248      // A successful humongous object allocation changes the used space
  21.249 @@ -1386,7 +1373,7 @@
  21.250          G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
  21.251        }
  21.252  
  21.253 -      assert(free_regions() == 0, "we should not have added any free regions");
  21.254 +      assert(num_free_regions() == 0, "we should not have added any free regions");
  21.255        rebuild_region_sets(false /* free_list_only */);
  21.256  
  21.257        // Enqueue any discovered reference objects that have
  21.258 @@ -1751,21 +1738,6 @@
  21.259    return NULL;
  21.260  }
  21.261  
  21.262 -void G1CollectedHeap::update_committed_space(HeapWord* old_end,
  21.263 -                                             HeapWord* new_end) {
  21.264 -  assert(old_end != new_end, "don't call this otherwise");
  21.265 -  assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
  21.266 -
  21.267 -  // Update the committed mem region.
  21.268 -  _g1_committed.set_end(new_end);
  21.269 -  // Tell the card table about the update.
  21.270 -  Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  21.271 -  // Tell the BOT about the update.
  21.272 -  _bot_shared->resize(_g1_committed.word_size());
  21.273 -  // Tell the hot card cache about the update
  21.274 -  _cg1r->hot_card_cache()->resize_card_counts(capacity());
  21.275 -}
  21.276 -
  21.277  bool G1CollectedHeap::expand(size_t expand_bytes) {
  21.278    size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
  21.279    aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  21.280 @@ -1776,55 +1748,22 @@
  21.281                  ergo_format_byte("attempted expansion amount"),
  21.282                  expand_bytes, aligned_expand_bytes);
  21.283  
  21.284 -  if (_g1_storage.uncommitted_size() == 0) {
  21.285 +  if (is_maximal_no_gc()) {
  21.286      ergo_verbose0(ErgoHeapSizing,
  21.287                        "did not expand the heap",
  21.288                        ergo_format_reason("heap already fully expanded"));
  21.289      return false;
  21.290    }
  21.291  
  21.292 -  // First commit the memory.
  21.293 -  HeapWord* old_end = (HeapWord*) _g1_storage.high();
  21.294 -  bool successful = _g1_storage.expand_by(aligned_expand_bytes);
  21.295 -  if (successful) {
  21.296 -    // Then propagate this update to the necessary data structures.
  21.297 -    HeapWord* new_end = (HeapWord*) _g1_storage.high();
  21.298 -    update_committed_space(old_end, new_end);
  21.299 -
  21.300 -    FreeRegionList expansion_list("Local Expansion List");
  21.301 -    MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
  21.302 -    assert(mr.start() == old_end, "post-condition");
  21.303 -    // mr might be a smaller region than what was requested if
  21.304 -    // expand_by() was unable to allocate the HeapRegion instances
  21.305 -    assert(mr.end() <= new_end, "post-condition");
  21.306 -
  21.307 -    size_t actual_expand_bytes = mr.byte_size();
  21.308 +  uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
  21.309 +  assert(regions_to_expand > 0, "Must expand by at least one region");
  21.310 +
  21.311 +  uint expanded_by = _hrs.expand_by(regions_to_expand);
  21.312 +
  21.313 +  if (expanded_by > 0) {
  21.314 +    size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
  21.315      assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
  21.316 -    assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
  21.317 -           "post-condition");
  21.318 -    if (actual_expand_bytes < aligned_expand_bytes) {
  21.319 -      // We could not expand _hrs to the desired size. In this case we
  21.320 -      // need to shrink the committed space accordingly.
  21.321 -      assert(mr.end() < new_end, "invariant");
  21.322 -
  21.323 -      size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
  21.324 -      // First uncommit the memory.
  21.325 -      _g1_storage.shrink_by(diff_bytes);
  21.326 -      // Then propagate this update to the necessary data structures.
  21.327 -      update_committed_space(new_end, mr.end());
  21.328 -    }
  21.329 -    _free_list.add_as_tail(&expansion_list);
  21.330 -
  21.331 -    if (_hr_printer.is_active()) {
  21.332 -      HeapWord* curr = mr.start();
  21.333 -      while (curr < mr.end()) {
  21.334 -        HeapWord* curr_end = curr + HeapRegion::GrainWords;
  21.335 -        _hr_printer.commit(curr, curr_end);
  21.336 -        curr = curr_end;
  21.337 -      }
  21.338 -      assert(curr == mr.end(), "post-condition");
  21.339 -    }
  21.340 -    g1_policy()->record_new_heap_size(n_regions());
  21.341 +    g1_policy()->record_new_heap_size(num_regions());
  21.342    } else {
  21.343      ergo_verbose0(ErgoHeapSizing,
  21.344                    "did not expand the heap",
  21.345 @@ -1832,12 +1771,12 @@
  21.346      // The expansion of the virtual storage space was unsuccessful.
  21.347      // Let's see if it was because we ran out of swap.
  21.348      if (G1ExitOnExpansionFailure &&
  21.349 -        _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
  21.350 +        _hrs.available() >= regions_to_expand) {
  21.351        // We had head room...
  21.352        vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
  21.353      }
  21.354    }
  21.355 -  return successful;
  21.356 +  return regions_to_expand > 0;
  21.357  }
  21.358  
  21.359  void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
  21.360 @@ -1848,7 +1787,6 @@
  21.361    uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
  21.362  
  21.363    uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
  21.364 -  HeapWord* old_end = (HeapWord*) _g1_storage.high();
  21.365    size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
  21.366  
  21.367    ergo_verbose3(ErgoHeapSizing,
  21.368 @@ -1858,22 +1796,7 @@
  21.369                  ergo_format_byte("attempted shrinking amount"),
  21.370                  shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
  21.371    if (num_regions_removed > 0) {
  21.372 -    _g1_storage.shrink_by(shrunk_bytes);
  21.373 -    HeapWord* new_end = (HeapWord*) _g1_storage.high();
  21.374 -
  21.375 -    if (_hr_printer.is_active()) {
  21.376 -      HeapWord* curr = old_end;
  21.377 -      while (curr > new_end) {
  21.378 -        HeapWord* curr_end = curr;
  21.379 -        curr -= HeapRegion::GrainWords;
  21.380 -        _hr_printer.uncommit(curr, curr_end);
  21.381 -      }
  21.382 -    }
  21.383 -
  21.384 -    _expansion_regions += num_regions_removed;
  21.385 -    update_committed_space(old_end, new_end);
  21.386 -    HeapRegionRemSet::shrink_heap(n_regions());
  21.387 -    g1_policy()->record_new_heap_size(n_regions());
  21.388 +    g1_policy()->record_new_heap_size(num_regions());
  21.389    } else {
  21.390      ergo_verbose0(ErgoHeapSizing,
  21.391                    "did not shrink the heap",
  21.392 @@ -1924,7 +1847,6 @@
  21.393    _g1mm(NULL),
  21.394    _refine_cte_cl(NULL),
  21.395    _full_collection(false),
  21.396 -  _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
  21.397    _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
  21.398    _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
  21.399    _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
  21.400 @@ -2038,8 +1960,6 @@
  21.401    _reserved.set_start((HeapWord*)heap_rs.base());
  21.402    _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  21.403  
  21.404 -  _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
  21.405 -
  21.406    // Create the gen rem set (and barrier set) for the entire reserved region.
  21.407    _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  21.408    set_barrier_set(rem_set()->bs());
  21.409 @@ -2053,20 +1973,65 @@
  21.410  
  21.411    // Carve out the G1 part of the heap.
  21.412  
  21.413 -  ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  21.414 -  _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  21.415 -                           g1_rs.size()/HeapWordSize);
  21.416 -
  21.417 -  _g1_storage.initialize(g1_rs, 0);
  21.418 -  _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  21.419 -  _hrs.initialize((HeapWord*) _g1_reserved.start(),
  21.420 -                  (HeapWord*) _g1_reserved.end());
  21.421 -  assert(_hrs.max_length() == _expansion_regions,
  21.422 -         err_msg("max length: %u expansion regions: %u",
  21.423 -                 _hrs.max_length(), _expansion_regions));
  21.424 -
  21.425 -  // Do later initialization work for concurrent refinement.
  21.426 -  _cg1r->init();
  21.427 +  ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
  21.428 +  G1RegionToSpaceMapper* heap_storage =
  21.429 +    G1RegionToSpaceMapper::create_mapper(g1_rs,
  21.430 +                                         UseLargePages ? os::large_page_size() : os::vm_page_size(),
  21.431 +                                         HeapRegion::GrainBytes,
  21.432 +                                         1,
  21.433 +                                         mtJavaHeap);
  21.434 +  heap_storage->set_mapping_changed_listener(&_listener);
  21.435 +
  21.436 +  // Reserve space for the block offset table. We do not support automatic uncommit
  21.437 +  // for the card table at this time. BOT only.
  21.438 +  ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
  21.439 +  G1RegionToSpaceMapper* bot_storage =
  21.440 +    G1RegionToSpaceMapper::create_mapper(bot_rs,
  21.441 +                                         os::vm_page_size(),
  21.442 +                                         HeapRegion::GrainBytes,
  21.443 +                                         G1BlockOffsetSharedArray::N_bytes,
  21.444 +                                         mtGC);
  21.445 +
  21.446 +  ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
  21.447 +  G1RegionToSpaceMapper* cardtable_storage =
  21.448 +    G1RegionToSpaceMapper::create_mapper(cardtable_rs,
  21.449 +                                         os::vm_page_size(),
  21.450 +                                         HeapRegion::GrainBytes,
  21.451 +                                         G1BlockOffsetSharedArray::N_bytes,
  21.452 +                                         mtGC);
  21.453 +
  21.454 +  // Reserve space for the card counts table.
  21.455 +  ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
  21.456 +  G1RegionToSpaceMapper* card_counts_storage =
  21.457 +    G1RegionToSpaceMapper::create_mapper(card_counts_rs,
  21.458 +                                         os::vm_page_size(),
  21.459 +                                         HeapRegion::GrainBytes,
  21.460 +                                         G1BlockOffsetSharedArray::N_bytes,
  21.461 +                                         mtGC);
  21.462 +
  21.463 +  // Reserve space for prev and next bitmap.
  21.464 +  size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
  21.465 +
  21.466 +  ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
  21.467 +  G1RegionToSpaceMapper* prev_bitmap_storage =
  21.468 +    G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
  21.469 +                                         os::vm_page_size(),
  21.470 +                                         HeapRegion::GrainBytes,
  21.471 +                                         CMBitMap::mark_distance(),
  21.472 +                                         mtGC);
  21.473 +
  21.474 +  ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
  21.475 +  G1RegionToSpaceMapper* next_bitmap_storage =
  21.476 +    G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
  21.477 +                                         os::vm_page_size(),
  21.478 +                                         HeapRegion::GrainBytes,
  21.479 +                                         CMBitMap::mark_distance(),
  21.480 +                                         mtGC);
  21.481 +
  21.482 +  _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
  21.483 +  g1_barrier_set()->initialize(cardtable_storage);
  21.484 +   // Do later initialization work for concurrent refinement.
  21.485 +  _cg1r->init(card_counts_storage);
  21.486  
  21.487    // 6843694 - ensure that the maximum region index can fit
  21.488    // in the remembered set structures.
  21.489 @@ -2080,17 +2045,16 @@
  21.490  
  21.491    FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
  21.492  
  21.493 -  _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  21.494 -                                             heap_word_size(init_byte_size));
  21.495 +  _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
  21.496  
  21.497    _g1h = this;
  21.498  
  21.499 -  _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
  21.500 -  _humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
  21.501 +  _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
  21.502 +  _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
  21.503  
  21.504    // Create the ConcurrentMark data structure and thread.
  21.505    // (Must do this late, so that "max_regions" is defined.)
  21.506 -  _cm = new ConcurrentMark(this, heap_rs);
  21.507 +  _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
  21.508    if (_cm == NULL || !_cm->completed_initialization()) {
  21.509      vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
  21.510      return JNI_ENOMEM;
  21.511 @@ -2145,12 +2109,10 @@
  21.512    // counts and that mechanism.
  21.513    SpecializationStats::clear();
  21.514  
  21.515 -  // Here we allocate the dummy full region that is required by the
  21.516 -  // G1AllocRegion class. If we don't pass an address in the reserved
  21.517 -  // space here, lots of asserts fire.
  21.518 -
  21.519 -  HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
  21.520 -                                             _g1_reserved.start());
  21.521 +  // Here we allocate the dummy HeapRegion that is required by the
  21.522 +  // G1AllocRegion class.
  21.523 +  HeapRegion* dummy_region = _hrs.get_dummy_region();
  21.524 +
  21.525    // We'll re-use the same region whether the alloc region will
  21.526    // require BOT updates or not and, if it doesn't, then a non-young
  21.527    // region will complain that it cannot support allocations without
  21.528 @@ -2266,7 +2228,7 @@
  21.529  }
  21.530  
  21.531  size_t G1CollectedHeap::capacity() const {
  21.532 -  return _g1_committed.byte_size();
  21.533 +  return _hrs.length() * HeapRegion::GrainBytes;
  21.534  }
  21.535  
  21.536  void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
  21.537 @@ -2375,7 +2337,7 @@
  21.538  }
  21.539  
  21.540  size_t G1CollectedHeap::unsafe_max_alloc() {
  21.541 -  if (free_regions() > 0) return HeapRegion::GrainBytes;
  21.542 +  if (num_free_regions() > 0) return HeapRegion::GrainBytes;
  21.543    // otherwise, is there space in the current allocation region?
  21.544  
  21.545    // We need to store the current allocation region in a local variable
  21.546 @@ -2590,8 +2552,8 @@
  21.547  }
  21.548  
  21.549  bool G1CollectedHeap::is_in(const void* p) const {
  21.550 -  if (_g1_committed.contains(p)) {
  21.551 -    // Given that we know that p is in the committed space,
  21.552 +  if (_hrs.reserved().contains(p)) {
  21.553 +    // Given that we know that p is in the reserved space,
  21.554      // heap_region_containing_raw() should successfully
  21.555      // return the containing region.
  21.556      HeapRegion* hr = heap_region_containing_raw(p);
  21.557 @@ -2601,6 +2563,18 @@
  21.558    }
  21.559  }
  21.560  
  21.561 +#ifdef ASSERT
  21.562 +bool G1CollectedHeap::is_in_exact(const void* p) const {
  21.563 +  bool contains = reserved_region().contains(p);
  21.564 +  bool available = _hrs.is_available(addr_to_region((HeapWord*)p));
  21.565 +  if (contains && available) {
  21.566 +    return true;
  21.567 +  } else {
  21.568 +    return false;
  21.569 +  }
  21.570 +}
  21.571 +#endif
  21.572 +
  21.573  // Iteration functions.
  21.574  
  21.575  // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
  21.576 @@ -2665,83 +2639,9 @@
  21.577  void
  21.578  G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  21.579                                                   uint worker_id,
  21.580 -                                                 uint no_of_par_workers,
  21.581 -                                                 jint claim_value) {
  21.582 -  const uint regions = n_regions();
  21.583 -  const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  21.584 -                             no_of_par_workers :
  21.585 -                             1);
  21.586 -  assert(UseDynamicNumberOfGCThreads ||
  21.587 -         no_of_par_workers == workers()->total_workers(),
  21.588 -         "Non dynamic should use fixed number of workers");
  21.589 -  // try to spread out the starting points of the workers
  21.590 -  const HeapRegion* start_hr =
  21.591 -                        start_region_for_worker(worker_id, no_of_par_workers);
  21.592 -  const uint start_index = start_hr->hrs_index();
  21.593 -
  21.594 -  // each worker will actually look at all regions
  21.595 -  for (uint count = 0; count < regions; ++count) {
  21.596 -    const uint index = (start_index + count) % regions;
  21.597 -    assert(0 <= index && index < regions, "sanity");
  21.598 -    HeapRegion* r = region_at(index);
  21.599 -    // we'll ignore "continues humongous" regions (we'll process them
  21.600 -    // when we come across their corresponding "start humongous"
  21.601 -    // region) and regions already claimed
  21.602 -    if (r->claim_value() == claim_value || r->continuesHumongous()) {
  21.603 -      continue;
  21.604 -    }
  21.605 -    // OK, try to claim it
  21.606 -    if (r->claimHeapRegion(claim_value)) {
  21.607 -      // success!
  21.608 -      assert(!r->continuesHumongous(), "sanity");
  21.609 -      if (r->startsHumongous()) {
  21.610 -        // If the region is "starts humongous" we'll iterate over its
  21.611 -        // "continues humongous" first; in fact we'll do them
  21.612 -        // first. The order is important. In on case, calling the
  21.613 -        // closure on the "starts humongous" region might de-allocate
  21.614 -        // and clear all its "continues humongous" regions and, as a
  21.615 -        // result, we might end up processing them twice. So, we'll do
  21.616 -        // them first (notice: most closures will ignore them anyway) and
  21.617 -        // then we'll do the "starts humongous" region.
  21.618 -        for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
  21.619 -          HeapRegion* chr = region_at(ch_index);
  21.620 -
  21.621 -          // if the region has already been claimed or it's not
  21.622 -          // "continues humongous" we're done
  21.623 -          if (chr->claim_value() == claim_value ||
  21.624 -              !chr->continuesHumongous()) {
  21.625 -            break;
  21.626 -          }
  21.627 -
  21.628 -          // No one should have claimed it directly. We can given
  21.629 -          // that we claimed its "starts humongous" region.
  21.630 -          assert(chr->claim_value() != claim_value, "sanity");
  21.631 -          assert(chr->humongous_start_region() == r, "sanity");
  21.632 -
  21.633 -          if (chr->claimHeapRegion(claim_value)) {
  21.634 -            // we should always be able to claim it; no one else should
  21.635 -            // be trying to claim this region
  21.636 -
  21.637 -            bool res2 = cl->doHeapRegion(chr);
  21.638 -            assert(!res2, "Should not abort");
  21.639 -
  21.640 -            // Right now, this holds (i.e., no closure that actually
  21.641 -            // does something with "continues humongous" regions
  21.642 -            // clears them). We might have to weaken it in the future,
  21.643 -            // but let's leave these two asserts here for extra safety.
  21.644 -            assert(chr->continuesHumongous(), "should still be the case");
  21.645 -            assert(chr->humongous_start_region() == r, "sanity");
  21.646 -          } else {
  21.647 -            guarantee(false, "we should not reach here");
  21.648 -          }
  21.649 -        }
  21.650 -      }
  21.651 -
  21.652 -      assert(!r->continuesHumongous(), "sanity");
  21.653 -      bool res = cl->doHeapRegion(r);
  21.654 -      assert(!res, "Should not abort");
  21.655 -    }
  21.656 -  }
  21.657 +                                                 uint num_workers,
  21.658 +                                                 jint claim_value) const {
  21.659 +  _hrs.par_iterate(cl, worker_id, num_workers, claim_value);
  21.660  }
  21.661  
  21.662  class ResetClaimValuesClosure: public HeapRegionClosure {
  21.663 @@ -2919,17 +2819,6 @@
  21.664    return result;
  21.665  }
  21.666  
  21.667 -HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
  21.668 -                                                     uint no_of_par_workers) {
  21.669 -  uint worker_num =
  21.670 -           G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
  21.671 -  assert(UseDynamicNumberOfGCThreads ||
  21.672 -         no_of_par_workers == workers()->total_workers(),
  21.673 -         "Non dynamic should use fixed number of workers");
  21.674 -  const uint start_index = n_regions() * worker_i / worker_num;
  21.675 -  return region_at(start_index);
  21.676 -}
  21.677 -
  21.678  void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  21.679    HeapRegion* r = g1_policy()->collection_set();
  21.680    while (r != NULL) {
  21.681 @@ -2972,33 +2861,24 @@
  21.682  }
  21.683  
  21.684  HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
  21.685 -  // We're not using an iterator given that it will wrap around when
  21.686 -  // it reaches the last region and this is not what we want here.
  21.687 -  for (uint index = from->hrs_index() + 1; index < n_regions(); index++) {
  21.688 -    HeapRegion* hr = region_at(index);
  21.689 -    if (!hr->isHumongous()) {
  21.690 -      return hr;
  21.691 -    }
  21.692 -  }
  21.693 -  return NULL;
  21.694 +  HeapRegion* result = _hrs.next_region_in_heap(from);
  21.695 +  while (result != NULL && result->isHumongous()) {
  21.696 +    result = _hrs.next_region_in_heap(result);
  21.697 +  }
  21.698 +  return result;
  21.699  }
  21.700  
  21.701  Space* G1CollectedHeap::space_containing(const void* addr) const {
  21.702 -  Space* res = heap_region_containing(addr);
  21.703 -  return res;
  21.704 +  return heap_region_containing(addr);
  21.705  }
  21.706  
  21.707  HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  21.708    Space* sp = space_containing(addr);
  21.709 -  if (sp != NULL) {
  21.710 -    return sp->block_start(addr);
  21.711 -  }
  21.712 -  return NULL;
  21.713 +  return sp->block_start(addr);
  21.714  }
  21.715  
  21.716  size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  21.717    Space* sp = space_containing(addr);
  21.718 -  assert(sp != NULL, "block_size of address outside of heap");
  21.719    return sp->block_size(addr);
  21.720  }
  21.721  
  21.722 @@ -3043,7 +2923,7 @@
  21.723  }
  21.724  
  21.725  size_t G1CollectedHeap::max_capacity() const {
  21.726 -  return _g1_reserved.byte_size();
  21.727 +  return _hrs.reserved().byte_size();
  21.728  }
  21.729  
  21.730  jlong G1CollectedHeap::millis_since_last_gc() {
  21.731 @@ -3572,9 +3452,9 @@
  21.732    st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  21.733              capacity()/K, used_unlocked()/K);
  21.734    st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  21.735 -            _g1_storage.low_boundary(),
  21.736 -            _g1_storage.high(),
  21.737 -            _g1_storage.high_boundary());
  21.738 +            _hrs.reserved().start(),
  21.739 +            _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords,
  21.740 +            _hrs.reserved().end());
  21.741    st->cr();
  21.742    st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
  21.743    uint young_regions = _young_list->length();
  21.744 @@ -4264,10 +4144,7 @@
  21.745              // No need for an ergo verbose message here,
  21.746              // expansion_amount() does this when it returns a value > 0.
  21.747              if (!expand(expand_bytes)) {
  21.748 -              // We failed to expand the heap so let's verify that
  21.749 -              // committed/uncommitted amount match the backing store
  21.750 -              assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
  21.751 -              assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
  21.752 +              // We failed to expand the heap. Cannot do anything about it.
  21.753              }
  21.754            }
  21.755          }
  21.756 @@ -4327,10 +4204,6 @@
  21.757        // RETIRE events are generated before the end GC event.
  21.758        _hr_printer.end_gc(false /* full */, (size_t) total_collections());
  21.759  
  21.760 -      if (mark_in_progress()) {
  21.761 -        concurrent_mark()->update_g1_committed();
  21.762 -      }
  21.763 -
  21.764  #ifdef TRACESPINNING
  21.765        ParallelTaskTerminator::print_termination_counts();
  21.766  #endif
  21.767 @@ -4652,30 +4525,19 @@
  21.768    ParGCAllocBuffer(gclab_word_size), _retired(true) { }
  21.769  
  21.770  void G1ParCopyHelper::mark_object(oop obj) {
  21.771 -#ifdef ASSERT
  21.772 -  HeapRegion* hr = _g1->heap_region_containing(obj);
  21.773 -  assert(hr != NULL, "sanity");
  21.774 -  assert(!hr->in_collection_set(), "should not mark objects in the CSet");
  21.775 -#endif // ASSERT
  21.776 +  assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
  21.777  
  21.778    // We know that the object is not moving so it's safe to read its size.
  21.779    _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
  21.780  }
  21.781  
  21.782  void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
  21.783 -#ifdef ASSERT
  21.784    assert(from_obj->is_forwarded(), "from obj should be forwarded");
  21.785    assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
  21.786    assert(from_obj != to_obj, "should not be self-forwarded");
  21.787  
  21.788 -  HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
  21.789 -  assert(from_hr != NULL, "sanity");
  21.790 -  assert(from_hr->in_collection_set(), "from obj should be in the CSet");
  21.791 -
  21.792 -  HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
  21.793 -  assert(to_hr != NULL, "sanity");
  21.794 -  assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
  21.795 -#endif // ASSERT
  21.796 +  assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
  21.797 +  assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
  21.798  
  21.799    // The object might be in the process of being copied by another
  21.800    // worker so we cannot trust that its to-space image is
  21.801 @@ -6176,6 +6038,7 @@
  21.802                                    bool locked) {
  21.803    assert(!hr->isHumongous(), "this is only for non-humongous regions");
  21.804    assert(!hr->is_empty(), "the region should not be empty");
  21.805 +  assert(_hrs.is_available(hr->hrs_index()), "region should be committed");
  21.806    assert(free_list != NULL, "pre-condition");
  21.807  
  21.808    if (G1VerifyBitmaps) {
  21.809 @@ -6230,7 +6093,7 @@
  21.810    assert(list != NULL, "list can't be null");
  21.811    if (!list->is_empty()) {
  21.812      MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  21.813 -    _free_list.add_ordered(list);
  21.814 +    _hrs.insert_list_into_free_list(list);
  21.815    }
  21.816  }
  21.817  
  21.818 @@ -6838,22 +6701,22 @@
  21.819      // this is that during a full GC string deduplication needs to know if
  21.820      // a collected region was young or old when the full GC was initiated.
  21.821    }
  21.822 -  _free_list.remove_all();
  21.823 +  _hrs.remove_all_free_regions();
  21.824  }
  21.825  
  21.826  class RebuildRegionSetsClosure : public HeapRegionClosure {
  21.827  private:
  21.828    bool            _free_list_only;
  21.829    HeapRegionSet*   _old_set;
  21.830 -  FreeRegionList* _free_list;
  21.831 +  HeapRegionSeq*   _hrs;
  21.832    size_t          _total_used;
  21.833  
  21.834  public:
  21.835    RebuildRegionSetsClosure(bool free_list_only,
  21.836 -                           HeapRegionSet* old_set, FreeRegionList* free_list) :
  21.837 +                           HeapRegionSet* old_set, HeapRegionSeq* hrs) :
  21.838      _free_list_only(free_list_only),
  21.839 -    _old_set(old_set), _free_list(free_list), _total_used(0) {
  21.840 -    assert(_free_list->is_empty(), "pre-condition");
  21.841 +    _old_set(old_set), _hrs(hrs), _total_used(0) {
  21.842 +    assert(_hrs->num_free_regions() == 0, "pre-condition");
  21.843      if (!free_list_only) {
  21.844        assert(_old_set->is_empty(), "pre-condition");
  21.845      }
  21.846 @@ -6866,7 +6729,7 @@
  21.847  
  21.848      if (r->is_empty()) {
  21.849        // Add free regions to the free list
  21.850 -      _free_list->add_as_tail(r);
  21.851 +      _hrs->insert_into_free_list(r);
  21.852      } else if (!_free_list_only) {
  21.853        assert(!r->is_young(), "we should not come across young regions");
  21.854  
  21.855 @@ -6894,7 +6757,7 @@
  21.856      _young_list->empty_list();
  21.857    }
  21.858  
  21.859 -  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
  21.860 +  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs);
  21.861    heap_region_iterate(&cl);
  21.862  
  21.863    if (!free_list_only) {
  21.864 @@ -6912,11 +6775,7 @@
  21.865  
  21.866  bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  21.867    HeapRegion* hr = heap_region_containing(p);
  21.868 -  if (hr == NULL) {
  21.869 -    return false;
  21.870 -  } else {
  21.871 -    return hr->is_in(p);
  21.872 -  }
  21.873 +  return hr->is_in(p);
  21.874  }
  21.875  
  21.876  // Methods for the mutator alloc region
  21.877 @@ -7053,13 +6912,42 @@
  21.878    _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
  21.879                                 GCAllocForTenured);
  21.880  }
  21.881 +
  21.882 +HeapRegion* OldGCAllocRegion::release() {
  21.883 +  HeapRegion* cur = get();
  21.884 +  if (cur != NULL) {
  21.885 +    // Determine how far we are from the next card boundary. If it is smaller than
  21.886 +    // the minimum object size we can allocate into, expand into the next card.
  21.887 +    HeapWord* top = cur->top();
  21.888 +    HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
  21.889 +
  21.890 +    size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
  21.891 +
  21.892 +    if (to_allocate_words != 0) {
  21.893 +      // We are not at a card boundary. Fill up, possibly into the next, taking the
  21.894 +      // end of the region and the minimum object size into account.
  21.895 +      to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
  21.896 +                               MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
  21.897 +
  21.898 +      // Skip allocation if there is not enough space to allocate even the smallest
  21.899 +      // possible object. In this case this region will not be retained, so the
  21.900 +      // original problem cannot occur.
  21.901 +      if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
  21.902 +        HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
  21.903 +        CollectedHeap::fill_with_object(dummy, to_allocate_words);
  21.904 +      }
  21.905 +    }
  21.906 +  }
  21.907 +  return G1AllocRegion::release();
  21.908 +}
  21.909 +
  21.910  // Heap region set verification
  21.911  
  21.912  class VerifyRegionListsClosure : public HeapRegionClosure {
  21.913  private:
  21.914    HeapRegionSet*   _old_set;
  21.915    HeapRegionSet*   _humongous_set;
  21.916 -  FreeRegionList*  _free_list;
  21.917 +  HeapRegionSeq*   _hrs;
  21.918  
  21.919  public:
  21.920    HeapRegionSetCount _old_count;
  21.921 @@ -7068,8 +6956,8 @@
  21.922  
  21.923    VerifyRegionListsClosure(HeapRegionSet* old_set,
  21.924                             HeapRegionSet* humongous_set,
  21.925 -                           FreeRegionList* free_list) :
  21.926 -    _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list),
  21.927 +                           HeapRegionSeq* hrs) :
  21.928 +    _old_set(old_set), _humongous_set(humongous_set), _hrs(hrs),
  21.929      _old_count(), _humongous_count(), _free_count(){ }
  21.930  
  21.931    bool doHeapRegion(HeapRegion* hr) {
  21.932 @@ -7080,19 +6968,19 @@
  21.933      if (hr->is_young()) {
  21.934        // TODO
  21.935      } else if (hr->startsHumongous()) {
  21.936 -      assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num()));
  21.937 +      assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
  21.938        _humongous_count.increment(1u, hr->capacity());
  21.939      } else if (hr->is_empty()) {
  21.940 -      assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num()));
  21.941 +      assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
  21.942        _free_count.increment(1u, hr->capacity());
  21.943      } else {
  21.944 -      assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num()));
  21.945 +      assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
  21.946        _old_count.increment(1u, hr->capacity());
  21.947      }
  21.948      return false;
  21.949    }
  21.950  
  21.951 -  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) {
  21.952 +  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) {
  21.953      guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
  21.954      guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  21.955          old_set->total_capacity_bytes(), _old_count.capacity()));
  21.956 @@ -7101,26 +6989,17 @@
  21.957      guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  21.958          humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
  21.959  
  21.960 -    guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length()));
  21.961 +    guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
  21.962      guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  21.963          free_list->total_capacity_bytes(), _free_count.capacity()));
  21.964    }
  21.965  };
  21.966  
  21.967 -HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
  21.968 -                                             HeapWord* bottom) {
  21.969 -  HeapWord* end = bottom + HeapRegion::GrainWords;
  21.970 -  MemRegion mr(bottom, end);
  21.971 -  assert(_g1_reserved.contains(mr), "invariant");
  21.972 -  // This might return NULL if the allocation fails
  21.973 -  return new HeapRegion(hrs_index, _bot_shared, mr);
  21.974 -}
  21.975 -
  21.976  void G1CollectedHeap::verify_region_sets() {
  21.977    assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  21.978  
  21.979    // First, check the explicit lists.
  21.980 -  _free_list.verify_list();
  21.981 +  _hrs.verify();
  21.982    {
  21.983      // Given that a concurrent operation might be adding regions to
  21.984      // the secondary free list we have to take the lock before
  21.985 @@ -7151,9 +7030,9 @@
  21.986    // Finally, make sure that the region accounting in the lists is
  21.987    // consistent with what we see in the heap.
  21.988  
  21.989 -  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
  21.990 +  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs);
  21.991    heap_region_iterate(&cl);
  21.992 -  cl.verify_counts(&_old_set, &_humongous_set, &_free_list);
  21.993 +  cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
  21.994  }
  21.995  
  21.996  // Optimized nmethod scanning
    22.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Aug 22 13:24:04 2014 +0200
    22.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Aug 26 13:38:33 2014 -0700
    22.3 @@ -183,6 +183,13 @@
    22.4  public:
    22.5    OldGCAllocRegion()
    22.6    : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
    22.7 +
    22.8 +  // This specialization of release() makes sure that the last card that has been
    22.9 +  // allocated into has been completely filled by a dummy object.
   22.10 +  // This avoids races when remembered set scanning wants to update the BOT of the
   22.11 +  // last card in the retained old gc alloc region, and allocation threads
   22.12 +  // allocating into that card at the same time.
   22.13 +  virtual HeapRegion* release();
   22.14  };
   22.15  
   22.16  // The G1 STW is alive closure.
   22.17 @@ -199,6 +206,13 @@
   22.18  
   22.19  class RefineCardTableEntryClosure;
   22.20  
   22.21 +class G1RegionMappingChangedListener : public G1MappingChangedListener {
   22.22 + private:
   22.23 +  void reset_from_card_cache(uint start_idx, size_t num_regions);
   22.24 + public:
   22.25 +  virtual void on_commit(uint start_idx, size_t num_regions);
   22.26 +};
   22.27 +
   22.28  class G1CollectedHeap : public SharedHeap {
   22.29    friend class VM_CollectForMetadataAllocation;
   22.30    friend class VM_G1CollectForAllocation;
   22.31 @@ -237,19 +251,9 @@
   22.32  
   22.33    static size_t _humongous_object_threshold_in_words;
   22.34  
   22.35 -  // Storage for the G1 heap.
   22.36 -  VirtualSpace _g1_storage;
   22.37 -  MemRegion    _g1_reserved;
   22.38 -
   22.39 -  // The part of _g1_storage that is currently committed.
   22.40 -  MemRegion _g1_committed;
   22.41 -
   22.42 -  // The master free list. It will satisfy all new region allocations.
   22.43 -  FreeRegionList _free_list;
   22.44 -
   22.45    // The secondary free list which contains regions that have been
   22.46 -  // freed up during the cleanup process. This will be appended to the
   22.47 -  // master free list when appropriate.
   22.48 +  // freed up during the cleanup process. This will be appended to
   22.49 +  // the master free list when appropriate.
   22.50    FreeRegionList _secondary_free_list;
   22.51  
   22.52    // It keeps track of the old regions.
   22.53 @@ -283,6 +287,9 @@
   22.54    // after heap shrinking (free_list_only == true).
   22.55    void rebuild_region_sets(bool free_list_only);
   22.56  
   22.57 +  // Callback for region mapping changed events.
   22.58 +  G1RegionMappingChangedListener _listener;
   22.59 +
   22.60    // The sequence of all heap regions in the heap.
   22.61    HeapRegionSeq _hrs;
   22.62  
   22.63 @@ -513,14 +520,6 @@
   22.64    // humongous object, set is_old to true. If not, to false.
   22.65    HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
   22.66  
   22.67 -  // Attempt to satisfy a humongous allocation request of the given
   22.68 -  // size by finding a contiguous set of free regions of num_regions
   22.69 -  // length and remove them from the master free list. Return the
   22.70 -  // index of the first region or G1_NULL_HRS_INDEX if the search
   22.71 -  // was unsuccessful.
   22.72 -  uint humongous_obj_allocate_find_first(uint num_regions,
   22.73 -                                         size_t word_size);
   22.74 -
   22.75    // Initialize a contiguous set of free regions of length num_regions
   22.76    // and starting at index first so that they appear as a single
   22.77    // humongous region.
   22.78 @@ -862,11 +861,6 @@
   22.79                          CodeBlobClosure* scan_strong_code,
   22.80                          uint worker_i);
   22.81  
   22.82 -  // Notifies all the necessary spaces that the committed space has
   22.83 -  // been updated (either expanded or shrunk). It should be called
   22.84 -  // after _g1_storage is updated.
   22.85 -  void update_committed_space(HeapWord* old_end, HeapWord* new_end);
   22.86 -
   22.87    // The concurrent marker (and the thread it runs in.)
   22.88    ConcurrentMark* _cm;
   22.89    ConcurrentMarkThread* _cmThread;
   22.90 @@ -1186,27 +1180,20 @@
   22.91    virtual size_t unsafe_max_alloc();
   22.92  
   22.93    virtual bool is_maximal_no_gc() const {
   22.94 -    return _g1_storage.uncommitted_size() == 0;
   22.95 +    return _hrs.available() == 0;
   22.96    }
   22.97  
   22.98 -  // The total number of regions in the heap.
   22.99 -  uint n_regions() const { return _hrs.length(); }
  22.100 +  // The current number of regions in the heap.
  22.101 +  uint num_regions() const { return _hrs.length(); }
  22.102  
  22.103    // The max number of regions in the heap.
  22.104    uint max_regions() const { return _hrs.max_length(); }
  22.105  
  22.106    // The number of regions that are completely free.
  22.107 -  uint free_regions() const { return _free_list.length(); }
  22.108 +  uint num_free_regions() const { return _hrs.num_free_regions(); }
  22.109  
  22.110    // The number of regions that are not completely free.
  22.111 -  uint used_regions() const { return n_regions() - free_regions(); }
  22.112 -
  22.113 -  // The number of regions available for "regular" expansion.
  22.114 -  uint expansion_regions() const { return _expansion_regions; }
  22.115 -
  22.116 -  // Factory method for HeapRegion instances. It will return NULL if
  22.117 -  // the allocation fails.
  22.118 -  HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
  22.119 +  uint num_used_regions() const { return num_regions() - num_free_regions(); }
  22.120  
  22.121    void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
  22.122    void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
  22.123 @@ -1255,7 +1242,7 @@
  22.124  
  22.125  #ifdef ASSERT
  22.126    bool is_on_master_free_list(HeapRegion* hr) {
  22.127 -    return hr->containing_set() == &_free_list;
  22.128 +    return _hrs.is_free(hr);
  22.129    }
  22.130  #endif // ASSERT
  22.131  
  22.132 @@ -1267,7 +1254,7 @@
  22.133    }
  22.134  
  22.135    void append_secondary_free_list() {
  22.136 -    _free_list.add_ordered(&_secondary_free_list);
  22.137 +    _hrs.insert_list_into_free_list(&_secondary_free_list);
  22.138    }
  22.139  
  22.140    void append_secondary_free_list_if_not_empty_with_lock() {
  22.141 @@ -1313,6 +1300,11 @@
  22.142  
  22.143    // Returns "TRUE" iff "p" points into the committed areas of the heap.
  22.144    virtual bool is_in(const void* p) const;
  22.145 +#ifdef ASSERT
  22.146 +  // Returns whether p is in one of the available areas of the heap. Slow but
  22.147 +  // extensive version.
  22.148 +  bool is_in_exact(const void* p) const;
  22.149 +#endif
  22.150  
  22.151    // Return "TRUE" iff the given object address is within the collection
  22.152    // set. Slow implementation.
  22.153 @@ -1373,25 +1365,19 @@
  22.154    // Return "TRUE" iff the given object address is in the reserved
  22.155    // region of g1.
  22.156    bool is_in_g1_reserved(const void* p) const {
  22.157 -    return _g1_reserved.contains(p);
  22.158 +    return _hrs.reserved().contains(p);
  22.159    }
  22.160  
  22.161    // Returns a MemRegion that corresponds to the space that has been
  22.162    // reserved for the heap
  22.163 -  MemRegion g1_reserved() {
  22.164 -    return _g1_reserved;
  22.165 -  }
  22.166 -
  22.167 -  // Returns a MemRegion that corresponds to the space that has been
  22.168 -  // committed in the heap
  22.169 -  MemRegion g1_committed() {
  22.170 -    return _g1_committed;
  22.171 +  MemRegion g1_reserved() const {
  22.172 +    return _hrs.reserved();
  22.173    }
  22.174  
  22.175    virtual bool is_in_closed_subset(const void* p) const;
  22.176  
  22.177 -  G1SATBCardTableModRefBS* g1_barrier_set() {
  22.178 -    return (G1SATBCardTableModRefBS*) barrier_set();
  22.179 +  G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
  22.180 +    return (G1SATBCardTableLoggingModRefBS*) barrier_set();
  22.181    }
  22.182  
  22.183    // This resets the card table to all zeros.  It is used after
  22.184 @@ -1425,6 +1411,8 @@
  22.185    // within the heap.
  22.186    inline uint addr_to_region(HeapWord* addr) const;
  22.187  
  22.188 +  inline HeapWord* bottom_addr_for_region(uint index) const;
  22.189 +
  22.190    // Divide the heap region sequence into "chunks" of some size (the number
  22.191    // of regions divided by the number of parallel threads times some
  22.192    // overpartition factor, currently 4).  Assumes that this will be called
  22.193 @@ -1438,10 +1426,10 @@
  22.194    // setting the claim value of the second and subsequent regions of the
  22.195    // chunk.)  For now requires that "doHeapRegion" always returns "false",
  22.196    // i.e., that a closure never attempt to abort a traversal.
  22.197 -  void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
  22.198 -                                       uint worker,
  22.199 -                                       uint no_of_par_workers,
  22.200 -                                       jint claim_value);
  22.201 +  void heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  22.202 +                                       uint worker_id,
  22.203 +                                       uint num_workers,
  22.204 +                                       jint claim_value) const;
  22.205  
  22.206    // It resets all the region claim values to the default.
  22.207    void reset_heap_region_claim_values();
  22.208 @@ -1466,11 +1454,6 @@
  22.209    // starting region for iterating over the current collection set.
  22.210    HeapRegion* start_cset_region_for_worker(uint worker_i);
  22.211  
  22.212 -  // This is a convenience method that is used by the
  22.213 -  // HeapRegionIterator classes to calculate the starting region for
  22.214 -  // each worker so that they do not all start from the same region.
  22.215 -  HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
  22.216 -
  22.217    // Iterate over the regions (if any) in the current collection set.
  22.218    void collection_set_iterate(HeapRegionClosure* blk);
  22.219  
  22.220 @@ -1483,17 +1466,15 @@
  22.221    // space containing a given address, or else returns NULL.
  22.222    virtual Space* space_containing(const void* addr) const;
  22.223  
  22.224 -  // A G1CollectedHeap will contain some number of heap regions.  This
  22.225 -  // finds the region containing a given address, or else returns NULL.
  22.226 +  // Returns the HeapRegion that contains addr. addr must not be NULL.
  22.227 +  template <class T>
  22.228 +  inline HeapRegion* heap_region_containing_raw(const T addr) const;
  22.229 +
  22.230 +  // Returns the HeapRegion that contains addr. addr must not be NULL.
  22.231 +  // If addr is within a humongous continues region, it returns its humongous start region.
  22.232    template <class T>
  22.233    inline HeapRegion* heap_region_containing(const T addr) const;
  22.234  
  22.235 -  // Like the above, but requires "addr" to be in the heap (to avoid a
  22.236 -  // null-check), and unlike the above, may return an continuing humongous
  22.237 -  // region.
  22.238 -  template <class T>
  22.239 -  inline HeapRegion* heap_region_containing_raw(const T addr) const;
  22.240 -
  22.241    // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
  22.242    // each address in the (reserved) heap is a member of exactly
  22.243    // one block.  The defining characteristic of a block is that it is
  22.244 @@ -1635,7 +1616,6 @@
  22.245    // the region to which the object belongs. An object is dead
  22.246    // iff a) it was not allocated since the last mark and b) it
  22.247    // is not marked.
  22.248 -
  22.249    bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
  22.250      return
  22.251        !hr->obj_allocated_since_prev_marking(obj) &&
  22.252 @@ -1645,7 +1625,6 @@
  22.253    // This function returns true when an object has been
  22.254    // around since the previous marking and hasn't yet
  22.255    // been marked during this marking.
  22.256 -
  22.257    bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
  22.258      return
  22.259        !hr->obj_allocated_since_next_marking(obj) &&
    23.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Aug 22 13:24:04 2014 +0200
    23.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Aug 26 13:38:33 2014 -0700
    23.3 @@ -47,23 +47,26 @@
    23.4    return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
    23.5  }
    23.6  
    23.7 -template <class T>
    23.8 -inline HeapRegion*
    23.9 -G1CollectedHeap::heap_region_containing(const T addr) const {
   23.10 -  HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
   23.11 -  // hr can be null if addr in perm_gen
   23.12 -  if (hr != NULL && hr->continuesHumongous()) {
   23.13 -    hr = hr->humongous_start_region();
   23.14 -  }
   23.15 -  return hr;
   23.16 +inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
   23.17 +  return _hrs.reserved().start() + index * HeapRegion::GrainWords;
   23.18  }
   23.19  
   23.20  template <class T>
   23.21 -inline HeapRegion*
   23.22 -G1CollectedHeap::heap_region_containing_raw(const T addr) const {
   23.23 -  assert(_g1_reserved.contains((const void*) addr), "invariant");
   23.24 -  HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
   23.25 -  return res;
   23.26 +inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
   23.27 +  assert(addr != NULL, "invariant");
   23.28 +  assert(is_in_g1_reserved((const void*) addr),
   23.29 +      err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
   23.30 +          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
   23.31 +  return _hrs.addr_to_region((HeapWord*) addr);
   23.32 +}
   23.33 +
   23.34 +template <class T>
   23.35 +inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
   23.36 +  HeapRegion* hr = heap_region_containing_raw(addr);
   23.37 +  if (hr->continuesHumongous()) {
   23.38 +    return hr->humongous_start_region();
   23.39 +  }
   23.40 +  return hr;
   23.41  }
   23.42  
   23.43  inline void G1CollectedHeap::reset_gc_time_stamp() {
   23.44 @@ -88,10 +91,9 @@
   23.45    return r != NULL && r->in_collection_set();
   23.46  }
   23.47  
   23.48 -inline HeapWord*
   23.49 -G1CollectedHeap::attempt_allocation(size_t word_size,
   23.50 -                                    unsigned int* gc_count_before_ret,
   23.51 -                                    int* gclocker_retry_count_ret) {
   23.52 +inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
   23.53 +                                                     unsigned int* gc_count_before_ret,
   23.54 +                                                     int* gclocker_retry_count_ret) {
   23.55    assert_heap_not_locked_and_not_at_safepoint();
   23.56    assert(!isHumongous(word_size), "attempt_allocation() should not "
   23.57           "be called for humongous allocation requests");
   23.58 @@ -154,8 +156,7 @@
   23.59    // have to keep calling heap_region_containing_raw() in the
   23.60    // asserts below.
   23.61    DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
   23.62 -  assert(containing_hr != NULL && start != NULL && word_size > 0,
   23.63 -         "pre-condition");
   23.64 +  assert(word_size > 0, "pre-condition");
   23.65    assert(containing_hr->is_in(start), "it should contain start");
   23.66    assert(containing_hr->is_young(), "it should be young");
   23.67    assert(!containing_hr->isHumongous(), "it should not be humongous");
   23.68 @@ -252,8 +253,7 @@
   23.69    }
   23.70  }
   23.71  
   23.72 -inline bool
   23.73 -G1CollectedHeap::evacuation_should_fail() {
   23.74 +inline bool G1CollectedHeap::evacuation_should_fail() {
   23.75    if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
   23.76      return false;
   23.77    }
   23.78 @@ -277,8 +277,10 @@
   23.79  #endif  // #ifndef PRODUCT
   23.80  
   23.81  inline bool G1CollectedHeap::is_in_young(const oop obj) {
   23.82 -  HeapRegion* hr = heap_region_containing(obj);
   23.83 -  return hr != NULL && hr->is_young();
   23.84 +  if (obj == NULL) {
   23.85 +    return false;
   23.86 +  }
   23.87 +  return heap_region_containing(obj)->is_young();
   23.88  }
   23.89  
   23.90  // We don't need barriers for initializing stores to objects
   23.91 @@ -291,21 +293,17 @@
   23.92  }
   23.93  
   23.94  inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
   23.95 -  const HeapRegion* hr = heap_region_containing(obj);
   23.96 -  if (hr == NULL) {
   23.97 -    if (obj == NULL) return false;
   23.98 -    else return true;
   23.99 +  if (obj == NULL) {
  23.100 +    return false;
  23.101    }
  23.102 -  else return is_obj_dead(obj, hr);
  23.103 +  return is_obj_dead(obj, heap_region_containing(obj));
  23.104  }
  23.105  
  23.106  inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
  23.107 -  const HeapRegion* hr = heap_region_containing(obj);
  23.108 -  if (hr == NULL) {
  23.109 -    if (obj == NULL) return false;
  23.110 -    else return true;
  23.111 +  if (obj == NULL) {
  23.112 +    return false;
  23.113    }
  23.114 -  else return is_obj_ill(obj, hr);
  23.115 +  return is_obj_ill(obj, heap_region_containing(obj));
  23.116  }
  23.117  
  23.118  inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
    24.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Aug 22 13:24:04 2014 +0200
    24.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Aug 26 13:38:33 2014 -0700
    24.3 @@ -455,7 +455,7 @@
    24.4    } else {
    24.5      _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
    24.6    }
    24.7 -  _free_regions_at_end_of_collection = _g1->free_regions();
    24.8 +  _free_regions_at_end_of_collection = _g1->num_free_regions();
    24.9    update_young_list_target_length();
   24.10  
   24.11    // We may immediately start allocating regions and placing them on the
   24.12 @@ -828,7 +828,7 @@
   24.13  
   24.14    record_survivor_regions(0, NULL, NULL);
   24.15  
   24.16 -  _free_regions_at_end_of_collection = _g1->free_regions();
   24.17 +  _free_regions_at_end_of_collection = _g1->num_free_regions();
   24.18    // Reset survivors SurvRateGroup.
   24.19    _survivor_surv_rate_group->reset();
   24.20    update_young_list_target_length();
   24.21 @@ -1180,7 +1180,7 @@
   24.22  
   24.23    _in_marking_window = new_in_marking_window;
   24.24    _in_marking_window_im = new_in_marking_window_im;
   24.25 -  _free_regions_at_end_of_collection = _g1->free_regions();
   24.26 +  _free_regions_at_end_of_collection = _g1->num_free_regions();
   24.27    update_young_list_target_length();
   24.28  
   24.29    // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
   24.30 @@ -1202,7 +1202,7 @@
   24.31    _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
   24.32    _heap_capacity_bytes_before_gc = _g1->capacity();
   24.33    _heap_used_bytes_before_gc = _g1->used();
   24.34 -  _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   24.35 +  _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
   24.36  
   24.37    _eden_capacity_bytes_before_gc =
   24.38           (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
   24.39 @@ -1617,7 +1617,7 @@
   24.40  G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
   24.41    _collectionSetChooser->clear();
   24.42  
   24.43 -  uint region_num = _g1->n_regions();
   24.44 +  uint region_num = _g1->num_regions();
   24.45    if (G1CollectedHeap::use_parallel_gc_threads()) {
   24.46      const uint OverpartitionFactor = 4;
   24.47      uint WorkUnit;
   24.48 @@ -1638,7 +1638,7 @@
   24.49          MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
   24.50               MinWorkUnit);
   24.51      }
   24.52 -    _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
   24.53 +    _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
   24.54                                                             WorkUnit);
   24.55      ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
   24.56                                              (int) WorkUnit);
   24.57 @@ -1935,7 +1935,7 @@
   24.58    // of them are available.
   24.59  
   24.60    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   24.61 -  const size_t region_num = g1h->n_regions();
   24.62 +  const size_t region_num = g1h->num_regions();
   24.63    const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
   24.64    size_t result = region_num * perc / 100;
   24.65    // emulate ceiling
    25.1 --- a/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Fri Aug 22 13:24:04 2014 +0200
    25.2 +++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Tue Aug 26 13:38:33 2014 -0700
    25.3 @@ -33,7 +33,7 @@
    25.4  G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
    25.5    _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
    25.6  
    25.7 -void G1HotCardCache::initialize() {
    25.8 +void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
    25.9    if (default_use_cache()) {
   25.10      _use_cache = true;
   25.11  
   25.12 @@ -49,7 +49,7 @@
   25.13      _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
   25.14      _hot_cache_par_claimed_idx = 0;
   25.15  
   25.16 -    _card_counts.initialize();
   25.17 +    _card_counts.initialize(card_counts_storage);
   25.18    }
   25.19  }
   25.20  
   25.21 @@ -135,11 +135,8 @@
   25.22    // above, are discarded prior to re-enabling the cache near the end of the GC.
   25.23  }
   25.24  
   25.25 -void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
   25.26 -  _card_counts.resize(heap_capacity);
   25.27 -}
   25.28 -
   25.29  void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
   25.30 +  assert(!hr->isHumongous(), "Should have been cleared");
   25.31    _card_counts.clear_region(hr);
   25.32  }
   25.33  
    26.1 --- a/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Fri Aug 22 13:24:04 2014 +0200
    26.2 +++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Tue Aug 26 13:38:33 2014 -0700
    26.3 @@ -78,7 +78,7 @@
    26.4    G1HotCardCache(G1CollectedHeap* g1h);
    26.5    ~G1HotCardCache();
    26.6  
    26.7 -  void initialize();
    26.8 +  void initialize(G1RegionToSpaceMapper* card_counts_storage);
    26.9  
   26.10    bool use_cache() { return _use_cache; }
   26.11  
   26.12 @@ -115,9 +115,6 @@
   26.13  
   26.14    bool hot_cache_is_empty() { return _n_hot == 0; }
   26.15  
   26.16 -  // Resizes the card counts table to match the given capacity
   26.17 -  void resize_card_counts(size_t heap_capacity);
   26.18 -
   26.19    // Zeros the values in the card counts table for entire committed heap
   26.20    void reset_card_counts();
   26.21  
    27.1 --- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Fri Aug 22 13:24:04 2014 +0200
    27.2 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Tue Aug 26 13:38:33 2014 -0700
    27.3 @@ -130,9 +130,7 @@
    27.4    if (!oopDesc::is_null(heap_oop)) {
    27.5      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
    27.6      HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
    27.7 -    if (hr != NULL) {
    27.8 -      _cm->grayRoot(obj, obj->size(), _worker_id, hr);
    27.9 -    }
   27.10 +    _cm->grayRoot(obj, obj->size(), _worker_id, hr);
   27.11    }
   27.12  }
   27.13  
   27.14 @@ -159,57 +157,61 @@
   27.15  template <class T>
   27.16  inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
   27.17    oop obj = oopDesc::load_decode_heap_oop(p);
   27.18 +  if (obj == NULL) {
   27.19 +    return;
   27.20 +  }
   27.21  #ifdef ASSERT
   27.22    // can't do because of races
   27.23    // assert(obj == NULL || obj->is_oop(), "expected an oop");
   27.24  
   27.25    // Do the safe subset of is_oop
   27.26 -  if (obj != NULL) {
   27.27  #ifdef CHECK_UNHANDLED_OOPS
   27.28 -    oopDesc* o = obj.obj();
   27.29 +  oopDesc* o = obj.obj();
   27.30  #else
   27.31 -    oopDesc* o = obj;
   27.32 +  oopDesc* o = obj;
   27.33  #endif // CHECK_UNHANDLED_OOPS
   27.34 -    assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
   27.35 -    assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
   27.36 -  }
   27.37 +  assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
   27.38 +  assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
   27.39  #endif // ASSERT
   27.40  
   27.41    assert(_from != NULL, "from region must be non-NULL");
   27.42    assert(_from->is_in_reserved(p), "p is not in from");
   27.43  
   27.44    HeapRegion* to = _g1->heap_region_containing(obj);
   27.45 -  if (to != NULL && _from != to) {
   27.46 -    // The _record_refs_into_cset flag is true during the RSet
   27.47 -    // updating part of an evacuation pause. It is false at all
   27.48 -    // other times:
   27.49 -    //  * rebuilding the rembered sets after a full GC
   27.50 -    //  * during concurrent refinement.
   27.51 -    //  * updating the remembered sets of regions in the collection
   27.52 -    //    set in the event of an evacuation failure (when deferred
   27.53 -    //    updates are enabled).
   27.54 +  if (_from == to) {
   27.55 +    // Normally this closure should only be called with cross-region references.
   27.56 +    // But since Java threads are manipulating the references concurrently and we
   27.57 +    // reload the values things may have changed.
   27.58 +    return;
   27.59 +  }
   27.60 +  // The _record_refs_into_cset flag is true during the RSet
   27.61 +  // updating part of an evacuation pause. It is false at all
   27.62 +  // other times:
   27.63 +  //  * rebuilding the remembered sets after a full GC
   27.64 +  //  * during concurrent refinement.
   27.65 +  //  * updating the remembered sets of regions in the collection
   27.66 +  //    set in the event of an evacuation failure (when deferred
   27.67 +  //    updates are enabled).
   27.68  
   27.69 -    if (_record_refs_into_cset && to->in_collection_set()) {
   27.70 -      // We are recording references that point into the collection
   27.71 -      // set and this particular reference does exactly that...
   27.72 -      // If the referenced object has already been forwarded
   27.73 -      // to itself, we are handling an evacuation failure and
   27.74 -      // we have already visited/tried to copy this object
   27.75 -      // there is no need to retry.
   27.76 -      if (!self_forwarded(obj)) {
   27.77 -        assert(_push_ref_cl != NULL, "should not be null");
   27.78 -        // Push the reference in the refs queue of the G1ParScanThreadState
   27.79 -        // instance for this worker thread.
   27.80 -        _push_ref_cl->do_oop(p);
   27.81 -      }
   27.82 +  if (_record_refs_into_cset && to->in_collection_set()) {
   27.83 +    // We are recording references that point into the collection
   27.84 +    // set and this particular reference does exactly that...
   27.85 +    // If the referenced object has already been forwarded
   27.86 +    // to itself, we are handling an evacuation failure and
   27.87 +    // we have already visited/tried to copy this object
   27.88 +    // there is no need to retry.
   27.89 +    if (!self_forwarded(obj)) {
   27.90 +      assert(_push_ref_cl != NULL, "should not be null");
   27.91 +      // Push the reference in the refs queue of the G1ParScanThreadState
   27.92 +      // instance for this worker thread.
   27.93 +      _push_ref_cl->do_oop(p);
   27.94 +     }
   27.95  
   27.96 -      // Deferred updates to the CSet are either discarded (in the normal case),
   27.97 -      // or processed (if an evacuation failure occurs) at the end
   27.98 -      // of the collection.
   27.99 -      // See G1RemSet::cleanup_after_oops_into_collection_set_do().
  27.100 -      return;
  27.101 -    }
  27.102 -
  27.103 +    // Deferred updates to the CSet are either discarded (in the normal case),
  27.104 +    // or processed (if an evacuation failure occurs) at the end
  27.105 +    // of the collection.
  27.106 +    // See G1RemSet::cleanup_after_oops_into_collection_set_do().
  27.107 +  } else {
  27.108      // We either don't care about pushing references that point into the
  27.109      // collection set (i.e. we're not during an evacuation pause) _or_
  27.110      // the reference doesn't point into the collection set. Either way
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp	Tue Aug 26 13:38:33 2014 -0700
    28.3 @@ -0,0 +1,167 @@
    28.4 +/*
    28.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    28.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    28.7 + *
    28.8 + * This code is free software; you can redistribute it and/or modify it
    28.9 + * under the terms of the GNU General Public License version 2 only, as
   28.10 + * published by the Free Software Foundation.
   28.11 + *
   28.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   28.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   28.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   28.15 + * version 2 for more details (a copy is included in the LICENSE file that
   28.16 + * accompanied this code).
   28.17 + *
   28.18 + * You should have received a copy of the GNU General Public License version
   28.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   28.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   28.21 + *
   28.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   28.23 + * or visit www.oracle.com if you need additional information or have any
   28.24 + * questions.
   28.25 + *
   28.26 + */
   28.27 +
   28.28 +#include "precompiled.hpp"
   28.29 +#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
   28.30 +#include "oops/markOop.hpp"
   28.31 +#include "oops/oop.inline.hpp"
   28.32 +#include "services/memTracker.hpp"
   28.33 +#ifdef TARGET_OS_FAMILY_linux
   28.34 +# include "os_linux.inline.hpp"
   28.35 +#endif
   28.36 +#ifdef TARGET_OS_FAMILY_solaris
   28.37 +# include "os_solaris.inline.hpp"
   28.38 +#endif
   28.39 +#ifdef TARGET_OS_FAMILY_windows
   28.40 +# include "os_windows.inline.hpp"
   28.41 +#endif
   28.42 +#ifdef TARGET_OS_FAMILY_aix
   28.43 +# include "os_aix.inline.hpp"
   28.44 +#endif
   28.45 +#ifdef TARGET_OS_FAMILY_bsd
   28.46 +# include "os_bsd.inline.hpp"
   28.47 +#endif
   28.48 +#include "utilities/bitMap.inline.hpp"
   28.49 +
   28.50 +G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
   28.51 +  _high_boundary(NULL), _committed(), _page_size(0), _special(false), _executable(false) {
   28.52 +}
   28.53 +
   28.54 +bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
   28.55 +  if (!rs.is_reserved()) {
   28.56 +    return false;  // Allocation failed.
   28.57 +  }
   28.58 +  assert(_low_boundary == NULL, "VirtualSpace already initialized");
   28.59 +  assert(page_size > 0, "Granularity must be non-zero.");
   28.60 +
   28.61 +  _low_boundary  = rs.base();
   28.62 +  _high_boundary = _low_boundary + rs.size();
   28.63 +
   28.64 +  _special = rs.special();
   28.65 +  _executable = rs.executable();
   28.66 +
   28.67 +  _page_size = page_size;
   28.68 +
   28.69 +  assert(_committed.size() == 0, "virtual space initialized more than once");
   28.70 +  uintx size_in_bits = rs.size() / page_size;
   28.71 +  _committed.resize(size_in_bits, /* in_resource_area */ false);
   28.72 +
   28.73 +  return true;
   28.74 +}
   28.75 +
   28.76 +
   28.77 +G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
   28.78 +  release();
   28.79 +}
   28.80 +
   28.81 +void G1PageBasedVirtualSpace::release() {
   28.82 +  // This does not release memory it never reserved.
   28.83 +  // Caller must release via rs.release();
   28.84 +  _low_boundary           = NULL;
   28.85 +  _high_boundary          = NULL;
   28.86 +  _special                = false;
   28.87 +  _executable             = false;
   28.88 +  _page_size              = 0;
   28.89 +  _committed.resize(0, false);
   28.90 +}
   28.91 +
   28.92 +size_t G1PageBasedVirtualSpace::committed_size() const {
   28.93 +  return _committed.count_one_bits() * _page_size;
   28.94 +}
   28.95 +
   28.96 +size_t G1PageBasedVirtualSpace::reserved_size() const {
   28.97 +  return pointer_delta(_high_boundary, _low_boundary, sizeof(char));
   28.98 +}
   28.99 +
  28.100 +size_t G1PageBasedVirtualSpace::uncommitted_size()  const {
  28.101 +  return reserved_size() - committed_size();
  28.102 +}
  28.103 +
  28.104 +uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
  28.105 +  return (addr - _low_boundary) / _page_size;
  28.106 +}
  28.107 +
  28.108 +bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
  28.109 +  uintptr_t end = start + size_in_pages;
  28.110 +  return _committed.get_next_zero_offset(start, end) >= end;
  28.111 +}
  28.112 +
  28.113 +bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
  28.114 +  uintptr_t end = start + size_in_pages;
  28.115 +  return _committed.get_next_one_offset(start, end) >= end;
  28.116 +}
  28.117 +
  28.118 +char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
  28.119 +  return _low_boundary + index * _page_size;
  28.120 +}
  28.121 +
  28.122 +size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
  28.123 +  return num * _page_size;
  28.124 +}
  28.125 +
  28.126 +MemRegion G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
  28.127 +  // We need to make sure to commit all pages covered by the given area.
  28.128 +  guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
  28.129 +
  28.130 +  if (!_special) {
  28.131 +    os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
  28.132 +                              err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
  28.133 +  }
  28.134 +  _committed.set_range(start, start + size_in_pages);
  28.135 +
  28.136 +  MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
  28.137 +  return result;
  28.138 +}
  28.139 +
  28.140 +MemRegion G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
  28.141 +  guarantee(is_area_committed(start, size_in_pages), "checking");
  28.142 +
  28.143 +  if (!_special) {
  28.144 +    os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
  28.145 +  }
  28.146 +
  28.147 +  _committed.clear_range(start, start + size_in_pages);
  28.148 +
  28.149 +  MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
  28.150 +  return result;
  28.151 +}
  28.152 +
  28.153 +bool G1PageBasedVirtualSpace::contains(const void* p) const {
  28.154 +  return _low_boundary <= (const char*) p && (const char*) p < _high_boundary;
  28.155 +}
  28.156 +
  28.157 +#ifndef PRODUCT
  28.158 +void G1PageBasedVirtualSpace::print_on(outputStream* out) {
  28.159 +  out->print   ("Virtual space:");
  28.160 +  if (special()) out->print(" (pinned in memory)");
  28.161 +  out->cr();
  28.162 +  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
  28.163 +  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
  28.164 +  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_low_boundary), p2i(_high_boundary));
  28.165 +}
  28.166 +
  28.167 +void G1PageBasedVirtualSpace::print() {
  28.168 +  print_on(tty);
  28.169 +}
  28.170 +#endif
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp	Tue Aug 26 13:38:33 2014 -0700
    29.3 @@ -0,0 +1,111 @@
    29.4 +/*
    29.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    29.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    29.7 + *
    29.8 + * This code is free software; you can redistribute it and/or modify it
    29.9 + * under the terms of the GNU General Public License version 2 only, as
   29.10 + * published by the Free Software Foundation.
   29.11 + *
   29.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   29.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   29.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   29.15 + * version 2 for more details (a copy is included in the LICENSE file that
   29.16 + * accompanied this code).
   29.17 + *
   29.18 + * You should have received a copy of the GNU General Public License version
   29.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   29.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   29.21 + *
   29.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   29.23 + * or visit www.oracle.com if you need additional information or have any
   29.24 + * questions.
   29.25 + *
   29.26 + */
   29.27 +
   29.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
   29.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
   29.30 +
   29.31 +#include "memory/allocation.hpp"
   29.32 +#include "memory/memRegion.hpp"
   29.33 +#include "runtime/virtualspace.hpp"
   29.34 +#include "utilities/bitMap.hpp"
   29.35 +
   29.36 +// Virtual space management helper for a virtual space with an OS page allocation
   29.37 +// granularity.
   29.38 +// (De-)Allocation requests are always OS page aligned by passing a page index
   29.39 +// and multiples of pages.
   29.40 +// The implementation gives an error when trying to commit or uncommit pages that
   29.41 +// have already been committed or uncommitted.
   29.42 +class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
   29.43 +  friend class VMStructs;
   29.44 + private:
   29.45 +  // Reserved area addresses.
   29.46 +  char* _low_boundary;
   29.47 +  char* _high_boundary;
   29.48 +
   29.49 +  // The commit/uncommit granularity in bytes.
   29.50 +  size_t _page_size;
   29.51 +
   29.52 +  // Bitmap used for verification of commit/uncommit operations.
   29.53 +  BitMap _committed;
   29.54 +
   29.55 +  // Indicates that the entire space has been committed and pinned in memory,
   29.56 +  // os::commit_memory() or os::uncommit_memory() have no function.
   29.57 +  bool _special;
   29.58 +
   29.59 +  // Indicates whether the committed space should be executable.
   29.60 +  bool _executable;
   29.61 +
   29.62 +  // Returns the index of the page which contains the given address.
   29.63 +  uintptr_t  addr_to_page_index(char* addr) const;
   29.64 +  // Returns the address of the given page index.
   29.65 +  char*  page_start(uintptr_t index);
   29.66 +  // Returns the byte size of the given number of pages.
   29.67 +  size_t byte_size_for_pages(size_t num);
   29.68 +
   29.69 +  // Returns true if the entire area is backed by committed memory.
   29.70 +  bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
   29.71 +  // Returns true if the entire area is not backed by committed memory.
   29.72 +  bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
   29.73 +
   29.74 + public:
   29.75 +
   29.76 +  // Commit the given area of pages starting at start being size_in_pages large.
   29.77 +  MemRegion commit(uintptr_t start, size_t size_in_pages);
   29.78 +
   29.79 +  // Uncommit the given area of pages starting at start being size_in_pages large.
   29.80 +  MemRegion uncommit(uintptr_t start, size_t size_in_pages);
   29.81 +
   29.82 +  bool special() const { return _special; }
   29.83 +
   29.84 +  // Initialization
   29.85 +  G1PageBasedVirtualSpace();
   29.86 +  bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
   29.87 +
   29.88 +  // Destruction
   29.89 +  ~G1PageBasedVirtualSpace();
   29.90 +
   29.91 +  // Amount of reserved memory.
   29.92 +  size_t reserved_size() const;
   29.93 +  // Memory used in this virtual space.
   29.94 +  size_t committed_size() const;
   29.95 +  // Memory left to use/expand in this virtual space.
   29.96 +  size_t uncommitted_size() const;
   29.97 +
   29.98 +  bool contains(const void* p) const;
   29.99 +
  29.100 +  MemRegion reserved() {
  29.101 +    MemRegion x((HeapWord*)_low_boundary, reserved_size() / HeapWordSize);
  29.102 +    return x;
  29.103 +  }
  29.104 +
  29.105 +  void release();
  29.106 +
  29.107 +  void check_for_contiguity() PRODUCT_RETURN;
  29.108 +
  29.109 +  // Debugging
  29.110 +  void print_on(outputStream* out) PRODUCT_RETURN;
  29.111 +  void print();
  29.112 +};
  29.113 +
  29.114 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
    30.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.2 +++ b/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp	Tue Aug 26 13:38:33 2014 -0700
    30.3 @@ -0,0 +1,159 @@
    30.4 +/*
    30.5 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    30.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    30.7 + *
    30.8 + * This code is free software; you can redistribute it and/or modify it
    30.9 + * under the terms of the GNU General Public License version 2 only, as
   30.10 + * published by the Free Software Foundation.
   30.11 + *
   30.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   30.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   30.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   30.15 + * version 2 for more details (a copy is included in the LICENSE file that
   30.16 + * accompanied this code).
   30.17 + *
   30.18 + * You should have received a copy of the GNU General Public License version
   30.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   30.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   30.21 + *
   30.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   30.23 + * or visit www.oracle.com if you need additional information or have any
   30.24 + * questions.
   30.25 + *
   30.26 + */
   30.27 +
   30.28 +#include "precompiled.hpp"
   30.29 +#include "gc_implementation/g1/g1BiasedArray.hpp"
   30.30 +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
   30.31 +#include "memory/allocation.inline.hpp"
   30.32 +#include "runtime/virtualspace.hpp"
   30.33 +#include "services/memTracker.hpp"
   30.34 +#include "utilities/bitMap.inline.hpp"
   30.35 +
   30.36 +G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
   30.37 +                                             size_t commit_granularity,
   30.38 +                                             size_t region_granularity,
   30.39 +                                             MemoryType type) :
   30.40 +  _storage(),
   30.41 +  _commit_granularity(commit_granularity),
   30.42 +  _region_granularity(region_granularity),
   30.43 +  _listener(NULL),
   30.44 +  _commit_map() {
   30.45 +  guarantee(is_power_of_2(commit_granularity), "must be");
   30.46 +  guarantee(is_power_of_2(region_granularity), "must be");
   30.47 +  _storage.initialize_with_granularity(rs, commit_granularity);
   30.48 +
   30.49 +  MemTracker::record_virtual_memory_type((address)rs.base(), type);
   30.50 +}
   30.51 +
   30.52 +// G1RegionToSpaceMapper implementation where the region granularity is larger than
   30.53 +// or the same as the commit granularity.
   30.54 +// Basically, the space corresponding to one region region spans several OS pages.
   30.55 +class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
   30.56 + private:
   30.57 +  size_t _pages_per_region;
   30.58 +
   30.59 + public:
   30.60 +  G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
   30.61 +                                      size_t os_commit_granularity,
   30.62 +                                      size_t alloc_granularity,
   30.63 +                                      size_t commit_factor,
   30.64 +                                      MemoryType type) :
   30.65 +     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
   30.66 +    _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
   30.67 +
   30.68 +    guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
   30.69 +    _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
   30.70 +  }
   30.71 +
   30.72 +  virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
   30.73 +    _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
   30.74 +    _commit_map.set_range(start_idx, start_idx + num_regions);
   30.75 +    fire_on_commit(start_idx, num_regions);
   30.76 +  }
   30.77 +
   30.78 +  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
   30.79 +    _storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
   30.80 +    _commit_map.clear_range(start_idx, start_idx + num_regions);
   30.81 +  }
   30.82 +};
   30.83 +
   30.84 +// G1RegionToSpaceMapper implementation where the region granularity is smaller
   30.85 +// than the commit granularity.
   30.86 +// Basically, the contents of one OS page span several regions.
   30.87 +class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
   30.88 + private:
   30.89 +  class CommitRefcountArray : public G1BiasedMappedArray<uint> {
   30.90 +   protected:
   30.91 +     virtual uint default_value() const { return 0; }
   30.92 +  };
   30.93 +
   30.94 +  size_t _regions_per_page;
   30.95 +
   30.96 +  CommitRefcountArray _refcounts;
   30.97 +
   30.98 +  uintptr_t region_idx_to_page_idx(uint region) const {
   30.99 +    return region / _regions_per_page;
  30.100 +  }
  30.101 +
  30.102 + public:
  30.103 +  G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
  30.104 +                                       size_t os_commit_granularity,
  30.105 +                                       size_t alloc_granularity,
  30.106 +                                       size_t commit_factor,
  30.107 +                                       MemoryType type) :
  30.108 +     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
  30.109 +    _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
  30.110 +
  30.111 +    guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
  30.112 +    _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
  30.113 +    _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
  30.114 +  }
  30.115 +
  30.116 +  virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
  30.117 +    for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
  30.118 +      assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
  30.119 +      uintptr_t idx = region_idx_to_page_idx(i);
  30.120 +      uint old_refcount = _refcounts.get_by_index(idx);
  30.121 +      if (old_refcount == 0) {
  30.122 +        _storage.commit(idx, 1);
  30.123 +      }
  30.124 +      _refcounts.set_by_index(idx, old_refcount + 1);
  30.125 +      _commit_map.set_bit(i);
  30.126 +      fire_on_commit(i, 1);
  30.127 +    }
  30.128 +  }
  30.129 +
  30.130 +  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
  30.131 +    for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
  30.132 +      assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
  30.133 +      uintptr_t idx = region_idx_to_page_idx(i);
  30.134 +      uint old_refcount = _refcounts.get_by_index(idx);
  30.135 +      assert(old_refcount > 0, "must be");
  30.136 +      if (old_refcount == 1) {
  30.137 +        _storage.uncommit(idx, 1);
  30.138 +      }
  30.139 +      _refcounts.set_by_index(idx, old_refcount - 1);
  30.140 +      _commit_map.clear_bit(i);
  30.141 +    }
  30.142 +  }
  30.143 +};
  30.144 +
  30.145 +void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions) {
  30.146 +  if (_listener != NULL) {
  30.147 +    _listener->on_commit(start_idx, num_regions);
  30.148 +  }
  30.149 +}
  30.150 +
  30.151 +G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
  30.152 +                                                            size_t os_commit_granularity,
  30.153 +                                                            size_t region_granularity,
  30.154 +                                                            size_t commit_factor,
  30.155 +                                                            MemoryType type) {
  30.156 +
  30.157 +  if (region_granularity >= (os_commit_granularity * commit_factor)) {
  30.158 +    return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
  30.159 +  } else {
  30.160 +    return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
  30.161 +  }
  30.162 +}
    31.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.2 +++ b/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp	Tue Aug 26 13:38:33 2014 -0700
    31.3 @@ -0,0 +1,83 @@
    31.4 +/*
    31.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    31.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    31.7 + *
    31.8 + * This code is free software; you can redistribute it and/or modify it
    31.9 + * under the terms of the GNU General Public License version 2 only, as
   31.10 + * published by the Free Software Foundation.
   31.11 + *
   31.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   31.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   31.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   31.15 + * version 2 for more details (a copy is included in the LICENSE file that
   31.16 + * accompanied this code).
   31.17 + *
   31.18 + * You should have received a copy of the GNU General Public License version
   31.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   31.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   31.21 + *
   31.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   31.23 + * or visit www.oracle.com if you need additional information or have any
   31.24 + * questions.
   31.25 + *
   31.26 + */
   31.27 +
   31.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
   31.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
   31.30 +
   31.31 +#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
   31.32 +#include "memory/allocation.hpp"
   31.33 +#include "utilities/debug.hpp"
   31.34 +
   31.35 +class G1MappingChangedListener VALUE_OBJ_CLASS_SPEC {
   31.36 + public:
   31.37 +  // Fired after commit of the memory, i.e. the memory this listener is registered
   31.38 +  // for can be accessed.
   31.39 +  virtual void on_commit(uint start_idx, size_t num_regions) = 0;
   31.40 +};
   31.41 +
   31.42 +// Maps region based commit/uncommit requests to the underlying page sized virtual
   31.43 +// space.
   31.44 +class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
   31.45 + private:
   31.46 +  G1MappingChangedListener* _listener;
   31.47 + protected:
   31.48 +  // Backing storage.
   31.49 +  G1PageBasedVirtualSpace _storage;
   31.50 +  size_t _commit_granularity;
   31.51 +  size_t _region_granularity;
   31.52 +  // Mapping management
   31.53 +  BitMap _commit_map;
   31.54 +
   31.55 +  G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
   31.56 +
   31.57 +  void fire_on_commit(uint start_idx, size_t num_regions);
   31.58 + public:
   31.59 +  MemRegion reserved() { return _storage.reserved(); }
   31.60 +
   31.61 +  void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; }
   31.62 +
   31.63 +  virtual ~G1RegionToSpaceMapper() {
   31.64 +    _commit_map.resize(0, /* in_resource_area */ false);
   31.65 +  }
   31.66 +
   31.67 +  bool is_committed(uintptr_t idx) const {
   31.68 +    return _commit_map.at(idx);
   31.69 +  }
   31.70 +
   31.71 +  virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
   31.72 +  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
   31.73 +
   31.74 +  // Creates an appropriate G1RegionToSpaceMapper for the given parameters.
   31.75 +  // The byte_translation_factor defines how many bytes in a region correspond to
   31.76 +  // a single byte in the data structure this mapper is for.
   31.77 +  // Eg. in the card table, this value corresponds to the size a single card
   31.78 +  // table entry corresponds to.
   31.79 +  static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
   31.80 +                                              size_t os_commit_granularity,
   31.81 +                                              size_t region_granularity,
   31.82 +                                              size_t byte_translation_factor,
   31.83 +                                              MemoryType type);
   31.84 +};
   31.85 +
   31.86 +#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP */
    32.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Aug 22 13:24:04 2014 +0200
    32.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Aug 26 13:38:33 2014 -0700
    32.3 @@ -211,7 +211,6 @@
    32.4  #endif
    32.5  
    32.6        HeapRegion* card_region = _g1h->heap_region_containing(card_start);
    32.7 -      assert(card_region != NULL, "Yielding cards not in the heap?");
    32.8        _cards++;
    32.9  
   32.10        if (!card_region->is_on_dirty_cards_region_list()) {
   32.11 @@ -406,7 +405,6 @@
   32.12      HeapWord* start = _ct_bs->addr_for(card_ptr);
   32.13      // And find the region containing it.
   32.14      HeapRegion* r = _g1->heap_region_containing(start);
   32.15 -    assert(r != NULL, "unexpected null");
   32.16  
   32.17      // Scan oops in the card looking for references into the collection set
   32.18      // Don't use addr_for(card_ptr + 1) which can ask for
   32.19 @@ -556,6 +554,12 @@
   32.20  
   32.21  bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
   32.22                             bool check_for_refs_into_cset) {
   32.23 +  assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)),
   32.24 +         err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap",
   32.25 +                 p2i(card_ptr),
   32.26 +                 _ct_bs->index_for(_ct_bs->addr_for(card_ptr)),
   32.27 +                 _ct_bs->addr_for(card_ptr),
   32.28 +                 _g1->addr_to_region(_ct_bs->addr_for(card_ptr))));
   32.29  
   32.30    // If the card is no longer dirty, nothing to do.
   32.31    if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
   32.32 @@ -568,11 +572,6 @@
   32.33    HeapWord* start = _ct_bs->addr_for(card_ptr);
   32.34    // And find the region containing it.
   32.35    HeapRegion* r = _g1->heap_region_containing(start);
   32.36 -  if (r == NULL) {
   32.37 -    // Again no need to return that this card contains refs that
   32.38 -    // point into the collection set.
   32.39 -    return false;  // Not in the G1 heap (might be in perm, for example.)
   32.40 -  }
   32.41  
   32.42    // Why do we have to check here whether a card is on a young region,
   32.43    // given that we dirty young regions and, as a result, the
   32.44 @@ -625,10 +624,6 @@
   32.45  
   32.46      start = _ct_bs->addr_for(card_ptr);
   32.47      r = _g1->heap_region_containing(start);
   32.48 -    if (r == NULL) {
   32.49 -      // Not in the G1 heap
   32.50 -      return false;
   32.51 -    }
   32.52  
   32.53      // Checking whether the region we got back from the cache
   32.54      // is young here is inappropriate. The region could have been
    33.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Fri Aug 22 13:24:04 2014 +0200
    33.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Tue Aug 26 13:38:33 2014 -0700
    33.3 @@ -46,26 +46,28 @@
    33.4  template <class T>
    33.5  inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
    33.6    oop obj = oopDesc::load_decode_heap_oop(p);
    33.7 +  if (obj == NULL) {
    33.8 +    return;
    33.9 +  }
   33.10 +
   33.11  #ifdef ASSERT
   33.12    // can't do because of races
   33.13    // assert(obj == NULL || obj->is_oop(), "expected an oop");
   33.14  
   33.15    // Do the safe subset of is_oop
   33.16 -  if (obj != NULL) {
   33.17  #ifdef CHECK_UNHANDLED_OOPS
   33.18 -    oopDesc* o = obj.obj();
   33.19 +  oopDesc* o = obj.obj();
   33.20  #else
   33.21 -    oopDesc* o = obj;
   33.22 +  oopDesc* o = obj;
   33.23  #endif // CHECK_UNHANDLED_OOPS
   33.24 -    assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
   33.25 -    assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
   33.26 -  }
   33.27 +  assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
   33.28 +  assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
   33.29  #endif // ASSERT
   33.30  
   33.31    assert(from == NULL || from->is_in_reserved(p), "p is not in from");
   33.32  
   33.33    HeapRegion* to = _g1->heap_region_containing(obj);
   33.34 -  if (to != NULL && from != to) {
   33.35 +  if (from != to) {
   33.36      assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
   33.37      to->rem_set()->add_reference(p, tid);
   33.38    }
    34.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Fri Aug 22 13:24:04 2014 +0200
    34.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Tue Aug 26 13:38:33 2014 -0700
    34.3 @@ -23,6 +23,7 @@
    34.4   */
    34.5  
    34.6  #include "precompiled.hpp"
    34.7 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    34.8  #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    34.9  #include "gc_implementation/g1/heapRegion.hpp"
   34.10  #include "gc_implementation/g1/satbQueue.hpp"
   34.11 @@ -37,7 +38,6 @@
   34.12    _kind = G1SATBCT;
   34.13  }
   34.14  
   34.15 -
   34.16  void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
   34.17    // Nulls should have been already filtered.
   34.18    assert(pre_val->is_oop(true), "Error");
   34.19 @@ -124,13 +124,52 @@
   34.20  }
   34.21  #endif
   34.22  
   34.23 +void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions) {
   34.24 +  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
   34.25 +  _card_table->clear(mr);
   34.26 +}
   34.27 +
   34.28  G1SATBCardTableLoggingModRefBS::
   34.29  G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
   34.30                                 int max_covered_regions) :
   34.31    G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
   34.32 -  _dcqs(JavaThread::dirty_card_queue_set())
   34.33 +  _dcqs(JavaThread::dirty_card_queue_set()),
   34.34 +  _listener()
   34.35  {
   34.36    _kind = G1SATBCTLogging;
   34.37 +  _listener.set_card_table(this);
   34.38 +}
   34.39 +
   34.40 +void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
   34.41 +  mapper->set_mapping_changed_listener(&_listener);
   34.42 +
   34.43 +  _byte_map_size = mapper->reserved().byte_size();
   34.44 +
   34.45 +  _guard_index = cards_required(_whole_heap.word_size()) - 1;
   34.46 +  _last_valid_index = _guard_index - 1;
   34.47 +
   34.48 +  HeapWord* low_bound  = _whole_heap.start();
   34.49 +  HeapWord* high_bound = _whole_heap.end();
   34.50 +
   34.51 +  _cur_covered_regions = 1;
   34.52 +  _covered[0] = _whole_heap;
   34.53 +
   34.54 +  _byte_map = (jbyte*) mapper->reserved().start();
   34.55 +  byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
   34.56 +  assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
   34.57 +  assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
   34.58 +
   34.59 +  if (TraceCardTableModRefBS) {
   34.60 +    gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
   34.61 +    gclog_or_tty->print_cr("  "
   34.62 +                  "  &_byte_map[0]: " INTPTR_FORMAT
   34.63 +                  "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
   34.64 +                  p2i(&_byte_map[0]),
   34.65 +                  p2i(&_byte_map[_last_valid_index]));
   34.66 +    gclog_or_tty->print_cr("  "
   34.67 +                  "  byte_map_base: " INTPTR_FORMAT,
   34.68 +                  p2i(byte_map_base));
   34.69 +  }
   34.70  }
   34.71  
   34.72  void
    35.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Fri Aug 22 13:24:04 2014 +0200
    35.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Tue Aug 26 13:38:33 2014 -0700
    35.3 @@ -25,6 +25,7 @@
    35.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
    35.5  #define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
    35.6  
    35.7 +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
    35.8  #include "memory/cardTableModRefBS.hpp"
    35.9  #include "memory/memRegion.hpp"
   35.10  #include "oops/oop.inline.hpp"
   35.11 @@ -33,6 +34,7 @@
   35.12  #if INCLUDE_ALL_GCS
   35.13  
   35.14  class DirtyCardQueueSet;
   35.15 +class G1SATBCardTableLoggingModRefBS;
   35.16  
   35.17  // This barrier is specialized to use a logging barrier to support
   35.18  // snapshot-at-the-beginning marking.
   35.19 @@ -126,18 +128,40 @@
   35.20      jbyte val = _byte_map[card_index];
   35.21      return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
   35.22    }
   35.23 +};
   35.24  
   35.25 +class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
   35.26 + private:
   35.27 +  G1SATBCardTableLoggingModRefBS* _card_table;
   35.28 + public:
   35.29 +  G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
   35.30 +
   35.31 +  void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
   35.32 +
   35.33 +  virtual void on_commit(uint start_idx, size_t num_regions);
   35.34  };
   35.35  
   35.36  // Adds card-table logging to the post-barrier.
   35.37  // Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
   35.38  class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
   35.39 +  friend class G1SATBCardTableLoggingModRefBSChangedListener;
   35.40   private:
   35.41 +  G1SATBCardTableLoggingModRefBSChangedListener _listener;
   35.42    DirtyCardQueueSet& _dcqs;
   35.43   public:
   35.44 +  static size_t compute_size(size_t mem_region_size_in_words) {
   35.45 +    size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
   35.46 +    return ReservedSpace::allocation_align_size_up(number_of_slots);
   35.47 +  }
   35.48 +
   35.49    G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
   35.50                                   int max_covered_regions);
   35.51  
   35.52 +  virtual void initialize() { }
   35.53 +  virtual void initialize(G1RegionToSpaceMapper* mapper);
   35.54 +
   35.55 +  virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
   35.56 +
   35.57    bool is_a(BarrierSet::Name bsn) {
   35.58      return bsn == BarrierSet::G1SATBCTLogging ||
   35.59        G1SATBCardTableModRefBS::is_a(bsn);
   35.60 @@ -154,8 +178,6 @@
   35.61  
   35.62    void write_region_work(MemRegion mr)    { invalidate(mr); }
   35.63    void write_ref_array_work(MemRegion mr) { invalidate(mr); }
   35.64 -
   35.65 -
   35.66  };
   35.67  
   35.68  
    36.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Aug 22 13:24:04 2014 +0200
    36.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Aug 26 13:38:33 2014 -0700
    36.3 @@ -344,11 +344,6 @@
    36.4    return low;
    36.5  }
    36.6  
    36.7 -#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
    36.8 -#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
    36.9 -#endif // _MSC_VER
   36.10 -
   36.11 -
   36.12  HeapRegion::HeapRegion(uint hrs_index,
   36.13                         G1BlockOffsetSharedArray* sharedOffsetArray,
   36.14                         MemRegion mr) :
   36.15 @@ -360,7 +355,7 @@
   36.16      _claimed(InitialClaimValue), _evacuation_failed(false),
   36.17      _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
   36.18      _young_type(NotYoung), _next_young_region(NULL),
   36.19 -    _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false),
   36.20 +    _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
   36.21  #ifdef ASSERT
   36.22      _containing_set(NULL),
   36.23  #endif // ASSERT
   36.24 @@ -369,14 +364,20 @@
   36.25      _predicted_bytes_to_copy(0)
   36.26  {
   36.27    _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
   36.28 +  assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
   36.29 +
   36.30 +  initialize(mr);
   36.31 +}
   36.32 +
   36.33 +void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
   36.34 +  assert(_rem_set->is_empty(), "Remembered set must be empty");
   36.35 +
   36.36 +  G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
   36.37 +
   36.38    _orig_end = mr.end();
   36.39 -  // Note that initialize() will set the start of the unmarked area of the
   36.40 -  // region.
   36.41    hr_clear(false /*par*/, false /*clear_space*/);
   36.42    set_top(bottom());
   36.43    record_top_and_timestamp();
   36.44 -
   36.45 -  assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
   36.46  }
   36.47  
   36.48  CompactibleSpace* HeapRegion::next_compaction_space() const {
   36.49 @@ -907,7 +908,7 @@
   36.50      }
   36.51  
   36.52      // If it returns false, verify_for_object() will output the
   36.53 -    // appropriate messasge.
   36.54 +    // appropriate message.
   36.55      if (do_bot_verify &&
   36.56          !g1->is_obj_dead(obj, this) &&
   36.57          !_offsets.verify_for_object(p, obj_size)) {
   36.58 @@ -1038,8 +1039,7 @@
   36.59    set_top(bottom());
   36.60    set_saved_mark_word(bottom());
   36.61    CompactibleSpace::clear(mangle_space);
   36.62 -  _offsets.zero_bottom_entry();
   36.63 -  _offsets.initialize_threshold();
   36.64 +  reset_bot();
   36.65  }
   36.66  
   36.67  void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
   36.68 @@ -1129,9 +1129,11 @@
   36.69    _gc_time_stamp(0)
   36.70  {
   36.71    _offsets.set_space(this);
   36.72 -  // false ==> we'll do the clearing if there's clearing to be done.
   36.73 -  CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
   36.74 +}
   36.75 +
   36.76 +void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
   36.77 +  CompactibleSpace::initialize(mr, clear_space, mangle_space);
   36.78    _top = bottom();
   36.79 -  _offsets.zero_bottom_entry();
   36.80 -  _offsets.initialize_threshold();
   36.81 +  reset_bot();
   36.82  }
   36.83 +
    37.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Aug 22 13:24:04 2014 +0200
    37.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Aug 26 13:38:33 2014 -0700
    37.3 @@ -62,7 +62,7 @@
    37.4                  p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
    37.5  
    37.6  // sentinel value for hrs_index
    37.7 -#define G1_NULL_HRS_INDEX ((uint) -1)
    37.8 +#define G1_NO_HRS_INDEX ((uint) -1)
    37.9  
   37.10  // A dirty card to oop closure for heap regions. It
   37.11  // knows how to get the G1 heap and how to use the bitmap
   37.12 @@ -146,6 +146,9 @@
   37.13    HeapWord* top() const { return _top; }
   37.14  
   37.15   protected:
   37.16 +  // Reset the G1OffsetTableContigSpace.
   37.17 +  virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   37.18 +
   37.19    HeapWord** top_addr() { return &_top; }
   37.20    // Allocation helpers (return NULL if full).
   37.21    inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
   37.22 @@ -200,8 +203,7 @@
   37.23    virtual void print() const;
   37.24  
   37.25    void reset_bot() {
   37.26 -    _offsets.zero_bottom_entry();
   37.27 -    _offsets.initialize_threshold();
   37.28 +    _offsets.reset_bot();
   37.29    }
   37.30  
   37.31    void update_bot_for_object(HeapWord* start, size_t word_size) {
   37.32 @@ -264,7 +266,6 @@
   37.33  #ifdef ASSERT
   37.34    HeapRegionSetBase* _containing_set;
   37.35  #endif // ASSERT
   37.36 -  bool _pending_removal;
   37.37  
   37.38    // For parallel heapRegion traversal.
   37.39    jint _claimed;
   37.40 @@ -333,6 +334,12 @@
   37.41               G1BlockOffsetSharedArray* sharedOffsetArray,
   37.42               MemRegion mr);
   37.43  
   37.44 +  // Initializing the HeapRegion not only resets the data structure, but also
   37.45 +  // resets the BOT for that heap region.
   37.46 +  // The default values for clear_space means that we will do the clearing if
   37.47 +  // there's clearing to be done ourselves. We also always mangle the space.
   37.48 +  virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
   37.49 +
   37.50    static int    LogOfHRGrainBytes;
   37.51    static int    LogOfHRGrainWords;
   37.52  
   37.53 @@ -553,26 +560,6 @@
   37.54    // to provide a dummy version of it.
   37.55  #endif // ASSERT
   37.56  
   37.57 -  // If we want to remove regions from a list in bulk we can simply tag
   37.58 -  // them with the pending_removal tag and call the
   37.59 -  // remove_all_pending() method on the list.
   37.60 -
   37.61 -  bool pending_removal() { return _pending_removal; }
   37.62 -
   37.63 -  void set_pending_removal(bool pending_removal) {
   37.64 -    if (pending_removal) {
   37.65 -      assert(!_pending_removal && containing_set() != NULL,
   37.66 -             "can only set pending removal to true if it's false and "
   37.67 -             "the region belongs to a region set");
   37.68 -    } else {
   37.69 -      assert( _pending_removal && containing_set() == NULL,
   37.70 -              "can only set pending removal to false if it's true and "
   37.71 -              "the region does not belong to a region set");
   37.72 -    }
   37.73 -
   37.74 -    _pending_removal = pending_removal;
   37.75 -  }
   37.76 -
   37.77    HeapRegion* get_next_young_region() { return _next_young_region; }
   37.78    void set_next_young_region(HeapRegion* hr) {
   37.79      _next_young_region = hr;
    38.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Aug 22 13:24:04 2014 +0200
    38.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Tue Aug 26 13:38:33 2014 -0700
    38.3 @@ -372,17 +372,17 @@
    38.4                                                         _max_regions,
    38.5                                                         &_static_mem_size);
    38.6  
    38.7 -  for (uint i = 0; i < n_par_rs; i++) {
    38.8 -    for (uint j = 0; j < _max_regions; j++) {
    38.9 -      set(i, j, InvalidCard);
   38.10 -    }
   38.11 -  }
   38.12 +  invalidate(0, _max_regions);
   38.13  }
   38.14  
   38.15 -void FromCardCache::shrink(uint new_num_regions) {
   38.16 +void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
   38.17 +  guarantee((size_t)start_idx + new_num_regions <= max_uintx,
   38.18 +            err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
   38.19 +                    start_idx, new_num_regions));
   38.20    for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
   38.21 -    assert(new_num_regions <= _max_regions, "Must be within max.");
   38.22 -    for (uint j = new_num_regions; j < _max_regions; j++) {
   38.23 +    uint end_idx = (start_idx + (uint)new_num_regions);
   38.24 +    assert(end_idx <= _max_regions, "Must be within max.");
   38.25 +    for (uint j = start_idx; j < end_idx; j++) {
   38.26        set(i, j, InvalidCard);
   38.27      }
   38.28    }
   38.29 @@ -406,12 +406,12 @@
   38.30    }
   38.31  }
   38.32  
   38.33 -void OtherRegionsTable::init_from_card_cache(uint max_regions) {
   38.34 +void OtherRegionsTable::initialize(uint max_regions) {
   38.35    FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
   38.36  }
   38.37  
   38.38 -void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) {
   38.39 -  FromCardCache::shrink(new_num_regions);
   38.40 +void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
   38.41 +  FromCardCache::invalidate(start_idx, num_regions);
   38.42  }
   38.43  
   38.44  void OtherRegionsTable::print_from_card_cache() {
   38.45 @@ -802,7 +802,6 @@
   38.46  
   38.47  bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
   38.48    HeapRegion* hr = _g1h->heap_region_containing_raw(from);
   38.49 -  if (hr == NULL) return false;
   38.50    RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
   38.51    // Is this region in the coarse map?
   38.52    if (_coarse_map.at(hr_ind)) return true;
   38.53 @@ -840,8 +839,8 @@
   38.54  HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
   38.55                                     HeapRegion* hr)
   38.56    : _bosa(bosa),
   38.57 -    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #"UINT32_FORMAT, hr->hrs_index()), true),
   38.58 -    _code_roots(), _other_regions(hr, &_m) {
   38.59 +    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
   38.60 +    _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
   38.61    reset_for_par_iteration();
   38.62  }
   38.63  
    39.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Aug 22 13:24:04 2014 +0200
    39.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Tue Aug 26 13:38:33 2014 -0700
    39.3 @@ -84,7 +84,7 @@
    39.4  
    39.5    static void initialize(uint n_par_rs, uint max_num_regions);
    39.6  
    39.7 -  static void shrink(uint new_num_regions);
    39.8 +  static void invalidate(uint start_idx, size_t num_regions);
    39.9  
   39.10    static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
   39.11  
   39.12 @@ -213,11 +213,11 @@
   39.13  
   39.14    // Declare the heap size (in # of regions) to the OtherRegionsTable.
   39.15    // (Uses it to initialize from_card_cache).
   39.16 -  static void init_from_card_cache(uint max_regions);
   39.17 +  static void initialize(uint max_regions);
   39.18  
   39.19 -  // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
   39.20 -  // Make sure any entries for higher regions are invalid.
   39.21 -  static void shrink_from_card_cache(uint new_num_regions);
   39.22 +  // Declares that regions between start_idx <= i < start_idx + num_regions are
   39.23 +  // not in use. Make sure that any entries for these regions are invalid.
   39.24 +  static void invalidate(uint start_idx, size_t num_regions);
   39.25  
   39.26    static void print_from_card_cache();
   39.27  };
   39.28 @@ -404,12 +404,11 @@
   39.29    // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
   39.30    // (Uses it to initialize from_card_cache).
   39.31    static void init_heap(uint max_regions) {
   39.32 -    OtherRegionsTable::init_from_card_cache(max_regions);
   39.33 +    OtherRegionsTable::initialize(max_regions);
   39.34    }
   39.35  
   39.36 -  // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
   39.37 -  static void shrink_heap(uint new_n_regs) {
   39.38 -    OtherRegionsTable::shrink_from_card_cache(new_n_regs);
   39.39 +  static void invalidate(uint start_idx, uint num_regions) {
   39.40 +    OtherRegionsTable::invalidate(start_idx, num_regions);
   39.41    }
   39.42  
   39.43  #ifndef PRODUCT
    40.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Aug 22 13:24:04 2014 +0200
    40.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Aug 26 13:38:33 2014 -0700
    40.3 @@ -25,163 +25,204 @@
    40.4  #include "precompiled.hpp"
    40.5  #include "gc_implementation/g1/heapRegion.hpp"
    40.6  #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    40.7 -#include "gc_implementation/g1/heapRegionSet.hpp"
    40.8 +#include "gc_implementation/g1/heapRegionSet.inline.hpp"
    40.9  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
   40.10 +#include "gc_implementation/g1/concurrentG1Refine.hpp"
   40.11  #include "memory/allocation.hpp"
   40.12  
   40.13 -// Private
   40.14 +void HeapRegionSeq::initialize(G1RegionToSpaceMapper* heap_storage,
   40.15 +                               G1RegionToSpaceMapper* prev_bitmap,
   40.16 +                               G1RegionToSpaceMapper* next_bitmap,
   40.17 +                               G1RegionToSpaceMapper* bot,
   40.18 +                               G1RegionToSpaceMapper* cardtable,
   40.19 +                               G1RegionToSpaceMapper* card_counts) {
   40.20 +  _allocated_heapregions_length = 0;
   40.21  
   40.22 -uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
   40.23 -  uint len = length();
   40.24 -  assert(num > 1, "use this only for sequences of length 2 or greater");
   40.25 -  assert(from <= len,
   40.26 -         err_msg("from: %u should be valid and <= than %u", from, len));
   40.27 +  _heap_mapper = heap_storage;
   40.28  
   40.29 -  uint curr = from;
   40.30 -  uint first = G1_NULL_HRS_INDEX;
   40.31 -  uint num_so_far = 0;
   40.32 -  while (curr < len && num_so_far < num) {
   40.33 -    if (at(curr)->is_empty()) {
   40.34 -      if (first == G1_NULL_HRS_INDEX) {
   40.35 -        first = curr;
   40.36 -        num_so_far = 1;
   40.37 -      } else {
   40.38 -        num_so_far += 1;
   40.39 -      }
   40.40 -    } else {
   40.41 -      first = G1_NULL_HRS_INDEX;
   40.42 -      num_so_far = 0;
   40.43 +  _prev_bitmap_mapper = prev_bitmap;
   40.44 +  _next_bitmap_mapper = next_bitmap;
   40.45 +
   40.46 +  _bot_mapper = bot;
   40.47 +  _cardtable_mapper = cardtable;
   40.48 +
   40.49 +  _card_counts_mapper = card_counts;
   40.50 +
   40.51 +  MemRegion reserved = heap_storage->reserved();
   40.52 +  _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
   40.53 +
   40.54 +  _available_map.resize(_regions.length(), false);
   40.55 +  _available_map.clear();
   40.56 +}
   40.57 +
   40.58 +bool HeapRegionSeq::is_available(uint region) const {
   40.59 +  return _available_map.at(region);
   40.60 +}
   40.61 +
   40.62 +#ifdef ASSERT
   40.63 +bool HeapRegionSeq::is_free(HeapRegion* hr) const {
   40.64 +  return _free_list.contains(hr);
   40.65 +}
   40.66 +#endif
   40.67 +
   40.68 +HeapRegion* HeapRegionSeq::new_heap_region(uint hrs_index) {
   40.69 +  HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrs_index);
   40.70 +  MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
   40.71 +  assert(reserved().contains(mr), "invariant");
   40.72 +  return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr);
   40.73 +}
   40.74 +
   40.75 +void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
   40.76 +  guarantee(num_regions > 0, "Must commit more than zero regions");
   40.77 +  guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
   40.78 +
   40.79 +  _num_committed += (uint)num_regions;
   40.80 +
   40.81 +  _heap_mapper->commit_regions(index, num_regions);
   40.82 +
   40.83 +  // Also commit auxiliary data
   40.84 +  _prev_bitmap_mapper->commit_regions(index, num_regions);
   40.85 +  _next_bitmap_mapper->commit_regions(index, num_regions);
   40.86 +
   40.87 +  _bot_mapper->commit_regions(index, num_regions);
   40.88 +  _cardtable_mapper->commit_regions(index, num_regions);
   40.89 +
   40.90 +  _card_counts_mapper->commit_regions(index, num_regions);
   40.91 +}
   40.92 +
   40.93 +void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
   40.94 +  guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
   40.95 +  guarantee(_num_committed >= num_regions, "pre-condition");
   40.96 +
   40.97 +  // Print before uncommitting.
   40.98 +  if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
   40.99 +    for (uint i = start; i < start + num_regions; i++) {
  40.100 +      HeapRegion* hr = at(i);
  40.101 +      G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
  40.102      }
  40.103 -    curr += 1;
  40.104    }
  40.105 -  assert(num_so_far <= num, "post-condition");
  40.106 -  if (num_so_far == num) {
  40.107 -    // we found enough space for the humongous object
  40.108 -    assert(from <= first && first < len, "post-condition");
  40.109 -    assert(first < curr && (curr - first) == num, "post-condition");
  40.110 -    for (uint i = first; i < first + num; ++i) {
  40.111 -      assert(at(i)->is_empty(), "post-condition");
  40.112 +
  40.113 +  _num_committed -= (uint)num_regions;
  40.114 +
  40.115 +  _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
  40.116 +  _heap_mapper->uncommit_regions(start, num_regions);
  40.117 +
  40.118 +  // Also uncommit auxiliary data
  40.119 +  _prev_bitmap_mapper->uncommit_regions(start, num_regions);
  40.120 +  _next_bitmap_mapper->uncommit_regions(start, num_regions);
  40.121 +
  40.122 +  _bot_mapper->uncommit_regions(start, num_regions);
  40.123 +  _cardtable_mapper->uncommit_regions(start, num_regions);
  40.124 +
  40.125 +  _card_counts_mapper->uncommit_regions(start, num_regions);
  40.126 +}
  40.127 +
  40.128 +void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
  40.129 +  guarantee(num_regions > 0, "No point in calling this for zero regions");
  40.130 +  commit_regions(start, num_regions);
  40.131 +  for (uint i = start; i < start + num_regions; i++) {
  40.132 +    if (_regions.get_by_index(i) == NULL) {
  40.133 +      HeapRegion* new_hr = new_heap_region(i);
  40.134 +      _regions.set_by_index(i, new_hr);
  40.135 +      _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
  40.136      }
  40.137 -    return first;
  40.138 -  } else {
  40.139 -    // we failed to find enough space for the humongous object
  40.140 -    return G1_NULL_HRS_INDEX;
  40.141 +  }
  40.142 +
  40.143 +  _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
  40.144 +
  40.145 +  for (uint i = start; i < start + num_regions; i++) {
  40.146 +    assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
  40.147 +    HeapRegion* hr = at(i);
  40.148 +    if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
  40.149 +      G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
  40.150 +    }
  40.151 +    HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
  40.152 +    MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
  40.153 +
  40.154 +    hr->initialize(mr);
  40.155 +    insert_into_free_list(at(i));
  40.156    }
  40.157  }
  40.158  
  40.159 -// Public
  40.160 -
  40.161 -void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
  40.162 -  assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
  40.163 -         "bottom should be heap region aligned");
  40.164 -  assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
  40.165 -         "end should be heap region aligned");
  40.166 -
  40.167 -  _next_search_index = 0;
  40.168 -  _allocated_length = 0;
  40.169 -
  40.170 -  _regions.initialize(bottom, end, HeapRegion::GrainBytes);
  40.171 +uint HeapRegionSeq::expand_by(uint num_regions) {
  40.172 +  return expand_at(0, num_regions);
  40.173  }
  40.174  
  40.175 -MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
  40.176 -                                   HeapWord* new_end,
  40.177 -                                   FreeRegionList* list) {
  40.178 -  assert(old_end < new_end, "don't call it otherwise");
  40.179 -  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  40.180 +uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
  40.181 +  if (num_regions == 0) {
  40.182 +    return 0;
  40.183 +  }
  40.184  
  40.185 -  HeapWord* next_bottom = old_end;
  40.186 -  assert(heap_bottom() <= next_bottom, "invariant");
  40.187 -  while (next_bottom < new_end) {
  40.188 -    assert(next_bottom < heap_end(), "invariant");
  40.189 -    uint index = length();
  40.190 +  uint cur = start;
  40.191 +  uint idx_last_found = 0;
  40.192 +  uint num_last_found = 0;
  40.193  
  40.194 -    assert(index < max_length(), "otherwise we cannot expand further");
  40.195 -    if (index == 0) {
  40.196 -      // We have not allocated any regions so far
  40.197 -      assert(next_bottom == heap_bottom(), "invariant");
  40.198 -    } else {
  40.199 -      // next_bottom should match the end of the last/previous region
  40.200 -      assert(next_bottom == at(index - 1)->end(), "invariant");
  40.201 -    }
  40.202 +  uint expanded = 0;
  40.203  
  40.204 -    if (index == _allocated_length) {
  40.205 -      // We have to allocate a new HeapRegion.
  40.206 -      HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
  40.207 -      if (new_hr == NULL) {
  40.208 -        // allocation failed, we bail out and return what we have done so far
  40.209 -        return MemRegion(old_end, next_bottom);
  40.210 -      }
  40.211 -      assert(_regions.get_by_index(index) == NULL, "invariant");
  40.212 -      _regions.set_by_index(index, new_hr);
  40.213 -      increment_allocated_length();
  40.214 -    }
  40.215 -    // Have to increment the length first, otherwise we will get an
  40.216 -    // assert failure at(index) below.
  40.217 -    increment_length();
  40.218 -    HeapRegion* hr = at(index);
  40.219 -    list->add_as_tail(hr);
  40.220 +  while (expanded < num_regions &&
  40.221 +         (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
  40.222 +    uint to_expand = MIN2(num_regions - expanded, num_last_found);
  40.223 +    make_regions_available(idx_last_found, to_expand);
  40.224 +    expanded += to_expand;
  40.225 +    cur = idx_last_found + num_last_found + 1;
  40.226 +  }
  40.227  
  40.228 -    next_bottom = hr->end();
  40.229 -  }
  40.230 -  assert(next_bottom == new_end, "post-condition");
  40.231 -  return MemRegion(old_end, next_bottom);
  40.232 +  verify_optional();
  40.233 +  return expanded;
  40.234  }
  40.235  
  40.236 -uint HeapRegionSeq::free_suffix() {
  40.237 -  uint res = 0;
  40.238 -  uint index = length();
  40.239 -  while (index > 0) {
  40.240 -    index -= 1;
  40.241 -    if (!at(index)->is_empty()) {
  40.242 -      break;
  40.243 +uint HeapRegionSeq::find_contiguous(size_t num, bool empty_only) {
  40.244 +  uint found = 0;
  40.245 +  size_t length_found = 0;
  40.246 +  uint cur = 0;
  40.247 +
  40.248 +  while (length_found < num && cur < max_length()) {
  40.249 +    HeapRegion* hr = _regions.get_by_index(cur);
  40.250 +    if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
  40.251 +      // This region is a potential candidate for allocation into.
  40.252 +      length_found++;
  40.253 +    } else {
  40.254 +      // This region is not a candidate. The next region is the next possible one.
  40.255 +      found = cur + 1;
  40.256 +      length_found = 0;
  40.257      }
  40.258 -    res += 1;
  40.259 +    cur++;
  40.260    }
  40.261 -  return res;
  40.262 +
  40.263 +  if (length_found == num) {
  40.264 +    for (uint i = found; i < (found + num); i++) {
  40.265 +      HeapRegion* hr = _regions.get_by_index(i);
  40.266 +      // sanity check
  40.267 +      guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
  40.268 +                err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
  40.269 +                        " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
  40.270 +    }
  40.271 +    return found;
  40.272 +  } else {
  40.273 +    return G1_NO_HRS_INDEX;
  40.274 +  }
  40.275  }
  40.276  
  40.277 -uint HeapRegionSeq::find_contiguous(uint num) {
  40.278 -  assert(num > 1, "use this only for sequences of length 2 or greater");
  40.279 -  assert(_next_search_index <= length(),
  40.280 -         err_msg("_next_search_index: %u should be valid and <= than %u",
  40.281 -                 _next_search_index, length()));
  40.282 -
  40.283 -  uint start = _next_search_index;
  40.284 -  uint res = find_contiguous_from(start, num);
  40.285 -  if (res == G1_NULL_HRS_INDEX && start > 0) {
  40.286 -    // Try starting from the beginning. If _next_search_index was 0,
  40.287 -    // no point in doing this again.
  40.288 -    res = find_contiguous_from(0, num);
  40.289 +HeapRegion* HeapRegionSeq::next_region_in_heap(const HeapRegion* r) const {
  40.290 +  guarantee(r != NULL, "Start region must be a valid region");
  40.291 +  guarantee(is_available(r->hrs_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrs_index()));
  40.292 +  for (uint i = r->hrs_index() + 1; i < _allocated_heapregions_length; i++) {
  40.293 +    HeapRegion* hr = _regions.get_by_index(i);
  40.294 +    if (is_available(i)) {
  40.295 +      return hr;
  40.296 +    }
  40.297    }
  40.298 -  if (res != G1_NULL_HRS_INDEX) {
  40.299 -    assert(res < length(), err_msg("res: %u should be valid", res));
  40.300 -    _next_search_index = res + num;
  40.301 -    assert(_next_search_index <= length(),
  40.302 -           err_msg("_next_search_index: %u should be valid and <= than %u",
  40.303 -                   _next_search_index, length()));
  40.304 -  }
  40.305 -  return res;
  40.306 +  return NULL;
  40.307  }
  40.308  
  40.309  void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
  40.310 -  iterate_from((HeapRegion*) NULL, blk);
  40.311 -}
  40.312 +  uint len = max_length();
  40.313  
  40.314 -void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
  40.315 -  uint hr_index = 0;
  40.316 -  if (hr != NULL) {
  40.317 -    hr_index = hr->hrs_index();
  40.318 -  }
  40.319 -
  40.320 -  uint len = length();
  40.321 -  for (uint i = hr_index; i < len; i += 1) {
  40.322 -    bool res = blk->doHeapRegion(at(i));
  40.323 -    if (res) {
  40.324 -      blk->incomplete();
  40.325 -      return;
  40.326 +  for (uint i = 0; i < len; i++) {
  40.327 +    if (!is_available(i)) {
  40.328 +      continue;
  40.329      }
  40.330 -  }
  40.331 -  for (uint i = 0; i < hr_index; i += 1) {
  40.332 +    guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
  40.333      bool res = blk->doHeapRegion(at(i));
  40.334      if (res) {
  40.335        blk->incomplete();
  40.336 @@ -190,72 +231,220 @@
  40.337    }
  40.338  }
  40.339  
  40.340 +uint HeapRegionSeq::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
  40.341 +  guarantee(res_idx != NULL, "checking");
  40.342 +  guarantee(start_idx <= (max_length() + 1), "checking");
  40.343 +
  40.344 +  uint num_regions = 0;
  40.345 +
  40.346 +  uint cur = start_idx;
  40.347 +  while (cur < max_length() && is_available(cur)) {
  40.348 +    cur++;
  40.349 +  }
  40.350 +  if (cur == max_length()) {
  40.351 +    return num_regions;
  40.352 +  }
  40.353 +  *res_idx = cur;
  40.354 +  while (cur < max_length() && !is_available(cur)) {
  40.355 +    cur++;
  40.356 +  }
  40.357 +  num_regions = cur - *res_idx;
  40.358 +#ifdef ASSERT
  40.359 +  for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
  40.360 +    assert(!is_available(i), "just checking");
  40.361 +  }
  40.362 +  assert(cur == max_length() || num_regions == 0 || is_available(cur),
  40.363 +         err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
  40.364 +#endif
  40.365 +  return num_regions;
  40.366 +}
  40.367 +
  40.368 +uint HeapRegionSeq::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
  40.369 +  return num_regions * worker_i / num_workers;
  40.370 +}
  40.371 +
  40.372 +void HeapRegionSeq::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
  40.373 +  const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
  40.374 +
  40.375 +  // Every worker will actually look at all regions, skipping over regions that
  40.376 +  // are currently not committed.
  40.377 +  // This also (potentially) iterates over regions newly allocated during GC. This
  40.378 +  // is no problem except for some extra work.
  40.379 +  for (uint count = 0; count < _allocated_heapregions_length; count++) {
  40.380 +    const uint index = (start_index + count) % _allocated_heapregions_length;
  40.381 +    assert(0 <= index && index < _allocated_heapregions_length, "sanity");
  40.382 +    // Skip over unavailable regions
  40.383 +    if (!is_available(index)) {
  40.384 +      continue;
  40.385 +    }
  40.386 +    HeapRegion* r = _regions.get_by_index(index);
  40.387 +    // We'll ignore "continues humongous" regions (we'll process them
  40.388 +    // when we come across their corresponding "start humongous"
  40.389 +    // region) and regions already claimed.
  40.390 +    if (r->claim_value() == claim_value || r->continuesHumongous()) {
  40.391 +      continue;
  40.392 +    }
  40.393 +    // OK, try to claim it
  40.394 +    if (!r->claimHeapRegion(claim_value)) {
  40.395 +      continue;
  40.396 +    }
  40.397 +    // Success!
  40.398 +    if (r->startsHumongous()) {
  40.399 +      // If the region is "starts humongous" we'll iterate over its
  40.400 +      // "continues humongous" first; in fact we'll do them
  40.401 +      // first. The order is important. In one case, calling the
  40.402 +      // closure on the "starts humongous" region might de-allocate
  40.403 +      // and clear all its "continues humongous" regions and, as a
  40.404 +      // result, we might end up processing them twice. So, we'll do
  40.405 +      // them first (note: most closures will ignore them anyway) and
  40.406 +      // then we'll do the "starts humongous" region.
  40.407 +      for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
  40.408 +        HeapRegion* chr = _regions.get_by_index(ch_index);
  40.409 +
  40.410 +        assert(chr->continuesHumongous(), "Must be humongous region");
  40.411 +        assert(chr->humongous_start_region() == r,
  40.412 +               err_msg("Must work on humongous continuation of the original start region "
  40.413 +                       PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
  40.414 +        assert(chr->claim_value() != claim_value,
  40.415 +               "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
  40.416 +
  40.417 +        bool claim_result = chr->claimHeapRegion(claim_value);
  40.418 +        // We should always be able to claim it; no one else should
  40.419 +        // be trying to claim this region.
  40.420 +        guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
  40.421 +
  40.422 +        bool res2 = blk->doHeapRegion(chr);
  40.423 +        if (res2) {
  40.424 +          return;
  40.425 +        }
  40.426 +
  40.427 +        // Right now, this holds (i.e., no closure that actually
  40.428 +        // does something with "continues humongous" regions
  40.429 +        // clears them). We might have to weaken it in the future,
  40.430 +        // but let's leave these two asserts here for extra safety.
  40.431 +        assert(chr->continuesHumongous(), "should still be the case");
  40.432 +        assert(chr->humongous_start_region() == r, "sanity");
  40.433 +      }
  40.434 +    }
  40.435 +
  40.436 +    bool res = blk->doHeapRegion(r);
  40.437 +    if (res) {
  40.438 +      return;
  40.439 +    }
  40.440 +  }
  40.441 +}
  40.442 +
  40.443  uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
  40.444 -  // Reset this in case it's currently pointing into the regions that
  40.445 -  // we just removed.
  40.446 -  _next_search_index = 0;
  40.447 -
  40.448    assert(length() > 0, "the region sequence should not be empty");
  40.449 -  assert(length() <= _allocated_length, "invariant");
  40.450 -  assert(_allocated_length > 0, "we should have at least one region committed");
  40.451 +  assert(length() <= _allocated_heapregions_length, "invariant");
  40.452 +  assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
  40.453    assert(num_regions_to_remove < length(), "We should never remove all regions");
  40.454  
  40.455 -  uint i = 0;
  40.456 -  for (; i < num_regions_to_remove; i++) {
  40.457 -    HeapRegion* cur = at(length() - 1);
  40.458 +  if (num_regions_to_remove == 0) {
  40.459 +    return 0;
  40.460 +  }
  40.461  
  40.462 -    if (!cur->is_empty()) {
  40.463 -      // We have to give up if the region can not be moved
  40.464 -      break;
  40.465 +  uint removed = 0;
  40.466 +  uint cur = _allocated_heapregions_length - 1;
  40.467 +  uint idx_last_found = 0;
  40.468 +  uint num_last_found = 0;
  40.469 +
  40.470 +  while ((removed < num_regions_to_remove) &&
  40.471 +      (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
  40.472 +    // Only allow uncommit from the end of the heap.
  40.473 +    if ((idx_last_found + num_last_found) != _allocated_heapregions_length) {
  40.474 +      return 0;
  40.475 +    }
  40.476 +    uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
  40.477 +
  40.478 +    uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
  40.479 +
  40.480 +    cur -= num_last_found;
  40.481 +    removed += to_remove;
  40.482    }
  40.483 -    assert(!cur->isHumongous(), "Humongous regions should not be empty");
  40.484  
  40.485 -    decrement_length();
  40.486 -  }
  40.487 -  return i;
  40.488 +  verify_optional();
  40.489 +
  40.490 +  return removed;
  40.491  }
  40.492  
  40.493 -#ifndef PRODUCT
  40.494 -void HeapRegionSeq::verify_optional() {
  40.495 -  guarantee(length() <= _allocated_length,
  40.496 +uint HeapRegionSeq::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
  40.497 +  guarantee(start_idx < _allocated_heapregions_length, "checking");
  40.498 +  guarantee(res_idx != NULL, "checking");
  40.499 +
  40.500 +  uint num_regions_found = 0;
  40.501 +
  40.502 +  jlong cur = start_idx;
  40.503 +  while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
  40.504 +    cur--;
  40.505 +  }
  40.506 +  if (cur == -1) {
  40.507 +    return num_regions_found;
  40.508 +  }
  40.509 +  jlong old_cur = cur;
  40.510 +  // cur indexes the first empty region
  40.511 +  while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
  40.512 +    cur--;
  40.513 +  }
  40.514 +  *res_idx = cur + 1;
  40.515 +  num_regions_found = old_cur - cur;
  40.516 +
  40.517 +#ifdef ASSERT
  40.518 +  for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
  40.519 +    assert(at(i)->is_empty(), "just checking");
  40.520 +  }
  40.521 +#endif
  40.522 +  return num_regions_found;
  40.523 +}
  40.524 +
  40.525 +void HeapRegionSeq::verify() {
  40.526 +  guarantee(length() <= _allocated_heapregions_length,
  40.527              err_msg("invariant: _length: %u _allocated_length: %u",
  40.528 -                    length(), _allocated_length));
  40.529 -  guarantee(_allocated_length <= max_length(),
  40.530 +                    length(), _allocated_heapregions_length));
  40.531 +  guarantee(_allocated_heapregions_length <= max_length(),
  40.532              err_msg("invariant: _allocated_length: %u _max_length: %u",
  40.533 -                    _allocated_length, max_length()));
  40.534 -  guarantee(_next_search_index <= length(),
  40.535 -            err_msg("invariant: _next_search_index: %u _length: %u",
  40.536 -                    _next_search_index, length()));
  40.537 +                    _allocated_heapregions_length, max_length()));
  40.538  
  40.539 +  bool prev_committed = true;
  40.540 +  uint num_committed = 0;
  40.541    HeapWord* prev_end = heap_bottom();
  40.542 -  for (uint i = 0; i < _allocated_length; i += 1) {
  40.543 +  for (uint i = 0; i < _allocated_heapregions_length; i++) {
  40.544 +    if (!is_available(i)) {
  40.545 +      prev_committed = false;
  40.546 +      continue;
  40.547 +    }
  40.548 +    num_committed++;
  40.549      HeapRegion* hr = _regions.get_by_index(i);
  40.550      guarantee(hr != NULL, err_msg("invariant: i: %u", i));
  40.551 -    guarantee(hr->bottom() == prev_end,
  40.552 +    guarantee(!prev_committed || hr->bottom() == prev_end,
  40.553                err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
  40.554                        i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
  40.555      guarantee(hr->hrs_index() == i,
  40.556                err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
  40.557 -    if (i < length()) {
  40.558 -      // Asserts will fire if i is >= _length
  40.559 -      HeapWord* addr = hr->bottom();
  40.560 -      guarantee(addr_to_region(addr) == hr, "sanity");
  40.561 -      guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
  40.562 -    } else {
  40.563 -      guarantee(hr->is_empty(), "sanity");
  40.564 -      guarantee(!hr->isHumongous(), "sanity");
  40.565 -      // using assert instead of guarantee here since containing_set()
  40.566 -      // is only available in non-product builds.
  40.567 -      assert(hr->containing_set() == NULL, "sanity");
  40.568 -    }
  40.569 +    // Asserts will fire if i is >= _length
  40.570 +    HeapWord* addr = hr->bottom();
  40.571 +    guarantee(addr_to_region(addr) == hr, "sanity");
  40.572 +    // We cannot check whether the region is part of a particular set: at the time
  40.573 +    // this method may be called, we have only completed allocation of the regions,
  40.574 +    // but not put into a region set.
  40.575 +    prev_committed = true;
  40.576      if (hr->startsHumongous()) {
  40.577        prev_end = hr->orig_end();
  40.578      } else {
  40.579        prev_end = hr->end();
  40.580      }
  40.581    }
  40.582 -  for (uint i = _allocated_length; i < max_length(); i += 1) {
  40.583 +  for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
  40.584      guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
  40.585    }
  40.586 +
  40.587 +  guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
  40.588 +  _free_list.verify();
  40.589 +}
  40.590 +
  40.591 +#ifndef PRODUCT
  40.592 +void HeapRegionSeq::verify_optional() {
  40.593 +  verify();
  40.594  }
  40.595  #endif // PRODUCT
  40.596 +
    41.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Aug 22 13:24:04 2014 +0200
    41.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Tue Aug 26 13:38:33 2014 -0700
    41.3 @@ -26,6 +26,8 @@
    41.4  #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
    41.5  
    41.6  #include "gc_implementation/g1/g1BiasedArray.hpp"
    41.7 +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
    41.8 +#include "gc_implementation/g1/heapRegionSet.hpp"
    41.9  
   41.10  class HeapRegion;
   41.11  class HeapRegionClosure;
   41.12 @@ -33,16 +35,20 @@
   41.13  
   41.14  class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
   41.15   protected:
   41.16 -   virtual HeapRegion* default_value() const { return NULL; }
   41.17 +  virtual HeapRegion* default_value() const { return NULL; }
   41.18  };
   41.19  
   41.20 -// This class keeps track of the region metadata (i.e., HeapRegion
   41.21 -// instances). They are kept in the _regions array in address
   41.22 -// order. A region's index in the array corresponds to its index in
   41.23 -// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
   41.24 -// the one after it, etc.). Two regions that are consecutive in the
   41.25 -// array should also be adjacent in the address space (i.e.,
   41.26 -// region(i).end() == region(i+1).bottom().
   41.27 +// This class keeps track of the actual heap memory, auxiliary data
   41.28 +// and its metadata (i.e., HeapRegion instances) and the list of free regions.
   41.29 +//
   41.30 +// This allows maximum flexibility for deciding what to commit or uncommit given
   41.31 +// a request from outside.
   41.32 +//
   41.33 +// HeapRegions are kept in the _regions array in address order. A region's
   41.34 +// index in the array corresponds to its index in the heap (i.e., 0 is the
   41.35 +// region at the bottom of the heap, 1 is the one after it, etc.). Two
   41.36 +// regions that are consecutive in the array should also be adjacent in the
   41.37 +// address space (i.e., region(i).end() == region(i+1).bottom().
   41.38  //
   41.39  // We create a HeapRegion when we commit the region's address space
   41.40  // for the first time. When we uncommit the address space of a
   41.41 @@ -51,56 +57,94 @@
   41.42  //
   41.43  // We keep track of three lengths:
   41.44  //
   41.45 -// * _committed_length (returned by length()) is the number of currently
   41.46 -//   committed regions.
   41.47 -// * _allocated_length (not exposed outside this class) is the
   41.48 -//   number of regions for which we have HeapRegions.
   41.49 +// * _num_committed (returned by length()) is the number of currently
   41.50 +//   committed regions. These may not be contiguous.
   41.51 +// * _allocated_heapregions_length (not exposed outside this class) is the
   41.52 +//   number of regions+1 for which we have HeapRegions.
   41.53  // * max_length() returns the maximum number of regions the heap can have.
   41.54  //
   41.55 -// and maintain that: _committed_length <= _allocated_length <= max_length()
   41.56  
   41.57  class HeapRegionSeq: public CHeapObj<mtGC> {
   41.58    friend class VMStructs;
   41.59  
   41.60    G1HeapRegionTable _regions;
   41.61  
   41.62 -  // The number of regions committed in the heap.
   41.63 -  uint _committed_length;
   41.64 +  G1RegionToSpaceMapper* _heap_mapper;
   41.65 +  G1RegionToSpaceMapper* _prev_bitmap_mapper;
   41.66 +  G1RegionToSpaceMapper* _next_bitmap_mapper;
   41.67 +  G1RegionToSpaceMapper* _bot_mapper;
   41.68 +  G1RegionToSpaceMapper* _cardtable_mapper;
   41.69 +  G1RegionToSpaceMapper* _card_counts_mapper;
   41.70  
   41.71 -  // A hint for which index to start searching from for humongous
   41.72 -  // allocations.
   41.73 -  uint _next_search_index;
   41.74 +  FreeRegionList _free_list;
   41.75  
   41.76 -  // The number of regions for which we have allocated HeapRegions for.
   41.77 -  uint _allocated_length;
   41.78 +  // Each bit in this bitmap indicates that the corresponding region is available
   41.79 +  // for allocation.
   41.80 +  BitMap _available_map;
   41.81  
   41.82 -  // Find a contiguous set of empty regions of length num, starting
   41.83 -  // from the given index.
   41.84 -  uint find_contiguous_from(uint from, uint num);
   41.85 +   // The number of regions committed in the heap.
   41.86 +  uint _num_committed;
   41.87  
   41.88 -  void increment_allocated_length() {
   41.89 -    assert(_allocated_length < max_length(), "pre-condition");
   41.90 -    _allocated_length++;
   41.91 -  }
   41.92 +  // Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
   41.93 +  uint _allocated_heapregions_length;
   41.94  
   41.95 -  void increment_length() {
   41.96 -    assert(length() < max_length(), "pre-condition");
   41.97 -    _committed_length++;
   41.98 -  }
   41.99 +   HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
  41.100 +   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
  41.101  
  41.102 -  void decrement_length() {
  41.103 -    assert(length() > 0, "pre-condition");
  41.104 -    _committed_length--;
  41.105 -  }
  41.106 +  void make_regions_available(uint index, uint num_regions = 1);
  41.107  
  41.108 -  HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
  41.109 -  HeapWord* heap_end() const {return _regions.end_address_mapped(); }
  41.110 +  // Pass down commit calls to the VirtualSpace.
  41.111 +  void commit_regions(uint index, size_t num_regions = 1);
  41.112 +  void uncommit_regions(uint index, size_t num_regions = 1);
  41.113  
  41.114 - public:
  41.115 -  // Empty contructor, we'll initialize it with the initialize() method.
  41.116 -  HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
  41.117 +  // Notify other data structures about change in the heap layout.
  41.118 +  void update_committed_space(HeapWord* old_end, HeapWord* new_end);
  41.119 +  // Calculate the starting region for each worker during parallel iteration so
  41.120 +  // that they do not all start from the same region.
  41.121 +  uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
  41.122  
  41.123 -  void initialize(HeapWord* bottom, HeapWord* end);
  41.124 +  // Find a contiguous set of empty or uncommitted regions of length num and return
  41.125 +  // the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
  41.126 +  // If only_empty is true, only empty regions are considered.
  41.127 +  // Searches from bottom to top of the heap, doing a first-fit.
  41.128 +  uint find_contiguous(size_t num, bool only_empty);
  41.129 +  // Finds the next sequence of unavailable regions starting from start_idx. Returns the
  41.130 +  // length of the sequence found. If this result is zero, no such sequence could be found,
  41.131 +  // otherwise res_idx indicates the start index of these regions.
  41.132 +  uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const;
  41.133 +  // Finds the next sequence of empty regions starting from start_idx, going backwards in
  41.134 +  // the heap. Returns the length of the sequence found. If this value is zero, no
  41.135 +  // sequence could be found, otherwise res_idx contains the start index of this range.
  41.136 +  uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
  41.137 +  // Allocate a new HeapRegion for the given index.
  41.138 +  HeapRegion* new_heap_region(uint hrs_index);
  41.139 +#ifdef ASSERT
  41.140 +public:
  41.141 +  bool is_free(HeapRegion* hr) const;
  41.142 +#endif
  41.143 +  // Returns whether the given region is available for allocation.
  41.144 +  bool is_available(uint region) const;
  41.145 +
  41.146 +  public:
  41.147 +   // Empty constructor, we'll initialize it with the initialize() method.
  41.148 +  HeapRegionSeq() : _regions(), _heap_mapper(NULL), _num_committed(0),
  41.149 +                    _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
  41.150 +                    _allocated_heapregions_length(0), _available_map(),
  41.151 +                    _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
  41.152 +  { }
  41.153 +
  41.154 +  void initialize(G1RegionToSpaceMapper* heap_storage,
  41.155 +                  G1RegionToSpaceMapper* prev_bitmap,
  41.156 +                  G1RegionToSpaceMapper* next_bitmap,
  41.157 +                  G1RegionToSpaceMapper* bot,
  41.158 +                  G1RegionToSpaceMapper* cardtable,
  41.159 +                  G1RegionToSpaceMapper* card_counts);
  41.160 +
  41.161 +  // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
  41.162 +  // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
  41.163 +  // the heap from the lowest address, this region (and its associated data
  41.164 +  // structures) are available and we do not need to check further.
  41.165 +  HeapRegion* get_dummy_region() { return new_heap_region(0); }
  41.166  
  41.167    // Return the HeapRegion at the given index. Assume that the index
  41.168    // is valid.
  41.169 @@ -110,47 +154,83 @@
  41.170    // HeapRegion, otherwise return NULL.
  41.171    inline HeapRegion* addr_to_region(HeapWord* addr) const;
  41.172  
  41.173 -  // Return the HeapRegion that corresponds to the given
  41.174 -  // address. Assume the address is valid.
  41.175 -  inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
  41.176 +  // Insert the given region into the free region list.
  41.177 +  inline void insert_into_free_list(HeapRegion* hr);
  41.178 +
  41.179 +  // Insert the given region list into the global free region list.
  41.180 +  void insert_list_into_free_list(FreeRegionList* list) {
  41.181 +    _free_list.add_ordered(list);
  41.182 +  }
  41.183 +
  41.184 +  HeapRegion* allocate_free_region(bool is_old) {
  41.185 +    HeapRegion* hr = _free_list.remove_region(is_old);
  41.186 +
  41.187 +    if (hr != NULL) {
  41.188 +      assert(hr->next() == NULL, "Single region should not have next");
  41.189 +      assert(is_available(hr->hrs_index()), "Must be committed");
  41.190 +    }
  41.191 +    return hr;
  41.192 +  }
  41.193 +
  41.194 +  inline void allocate_free_regions_starting_at(uint first, uint num_regions);
  41.195 +
  41.196 +  // Remove all regions from the free list.
  41.197 +  void remove_all_free_regions() {
  41.198 +    _free_list.remove_all();
  41.199 +  }
  41.200 +
  41.201 +  // Return the number of committed free regions in the heap.
  41.202 +  uint num_free_regions() const {
  41.203 +    return _free_list.length();
  41.204 +  }
  41.205 +
  41.206 +  size_t total_capacity_bytes() const {
  41.207 +    return num_free_regions() * HeapRegion::GrainBytes;
  41.208 +  }
  41.209 +
  41.210 +  // Return the number of available (uncommitted) regions.
  41.211 +  uint available() const { return max_length() - length(); }
  41.212  
  41.213    // Return the number of regions that have been committed in the heap.
  41.214 -  uint length() const { return _committed_length; }
  41.215 +  uint length() const { return _num_committed; }
  41.216  
  41.217    // Return the maximum number of regions in the heap.
  41.218    uint max_length() const { return (uint)_regions.length(); }
  41.219  
  41.220 -  // Expand the sequence to reflect that the heap has grown from
  41.221 -  // old_end to new_end. Either create new HeapRegions, or re-use
  41.222 -  // existing ones, and return them in the given list. Returns the
  41.223 -  // memory region that covers the newly-created regions. If a
  41.224 -  // HeapRegion allocation fails, the result memory region might be
  41.225 -  // smaller than the desired one.
  41.226 -  MemRegion expand_by(HeapWord* old_end, HeapWord* new_end,
  41.227 -                      FreeRegionList* list);
  41.228 +  MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
  41.229  
  41.230 -  // Return the number of contiguous regions at the end of the sequence
  41.231 -  // that are available for allocation.
  41.232 -  uint free_suffix();
  41.233 +  // Expand the sequence to reflect that the heap has grown. Either create new
  41.234 +  // HeapRegions, or re-use existing ones. Returns the number of regions the
  41.235 +  // sequence was expanded by. If a HeapRegion allocation fails, the resulting
  41.236 +  // number of regions might be smaller than what's desired.
  41.237 +  uint expand_by(uint num_regions);
  41.238  
  41.239 -  // Find a contiguous set of empty regions of length num and return
  41.240 -  // the index of the first region or G1_NULL_HRS_INDEX if the
  41.241 -  // search was unsuccessful.
  41.242 -  uint find_contiguous(uint num);
  41.243 +  // Makes sure that the regions from start to start+num_regions-1 are available
  41.244 +  // for allocation. Returns the number of regions that were committed to achieve
  41.245 +  // this.
  41.246 +  uint expand_at(uint start, uint num_regions);
  41.247 +
  41.248 +  // Find a contiguous set of empty regions of length num. Returns the start index of
  41.249 +  // that set, or G1_NO_HRS_INDEX.
  41.250 +  uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
  41.251 +  // Find a contiguous set of empty or unavailable regions of length num. Returns the
  41.252 +  // start index of that set, or G1_NO_HRS_INDEX.
  41.253 +  uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
  41.254 +
  41.255 +  HeapRegion* next_region_in_heap(const HeapRegion* r) const;
  41.256  
  41.257    // Apply blk->doHeapRegion() on all committed regions in address order,
  41.258    // terminating the iteration early if doHeapRegion() returns true.
  41.259    void iterate(HeapRegionClosure* blk) const;
  41.260  
  41.261 -  // As above, but start the iteration from hr and loop around. If hr
  41.262 -  // is NULL, we start from the first region in the heap.
  41.263 -  void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
  41.264 +  void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const;
  41.265  
  41.266 -  // Tag as uncommitted as many regions that are completely free as
  41.267 -  // possible, up to num_regions_to_remove, from the suffix of the committed
  41.268 -  // sequence. Return the actual number of removed regions.
  41.269 +  // Uncommit up to num_regions_to_remove regions that are completely free.
  41.270 +  // Return the actual number of uncommitted regions.
  41.271    uint shrink_by(uint num_regions_to_remove);
  41.272  
  41.273 +  void verify();
  41.274 +
  41.275    // Do some sanity checking.
  41.276    void verify_optional() PRODUCT_RETURN;
  41.277  };
    42.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Fri Aug 22 13:24:04 2014 +0200
    42.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Tue Aug 26 13:38:33 2014 -0700
    42.3 @@ -27,28 +27,32 @@
    42.4  
    42.5  #include "gc_implementation/g1/heapRegion.hpp"
    42.6  #include "gc_implementation/g1/heapRegionSeq.hpp"
    42.7 +#include "gc_implementation/g1/heapRegionSet.inline.hpp"
    42.8  
    42.9 -inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
   42.10 +inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
   42.11 +  assert(addr < heap_end(),
   42.12 +        err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, p2i(addr), p2i(heap_end())));
   42.13 +  assert(addr >= heap_bottom(),
   42.14 +        err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
   42.15 +
   42.16    HeapRegion* hr = _regions.get_by_address(addr);
   42.17 -  assert(hr != NULL, "invariant");
   42.18    return hr;
   42.19  }
   42.20  
   42.21 -inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
   42.22 -  if (addr != NULL && addr < heap_end()) {
   42.23 -    assert(addr >= heap_bottom(),
   42.24 -          err_msg("addr: " PTR_FORMAT " bottom: " PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
   42.25 -    return addr_to_region_unsafe(addr);
   42.26 -  }
   42.27 -  return NULL;
   42.28 -}
   42.29 -
   42.30  inline HeapRegion* HeapRegionSeq::at(uint index) const {
   42.31 -  assert(index < length(), "pre-condition");
   42.32 +  assert(is_available(index), "pre-condition");
   42.33    HeapRegion* hr = _regions.get_by_index(index);
   42.34    assert(hr != NULL, "sanity");
   42.35    assert(hr->hrs_index() == index, "sanity");
   42.36    return hr;
   42.37  }
   42.38  
   42.39 +inline void HeapRegionSeq::insert_into_free_list(HeapRegion* hr) {
   42.40 +  _free_list.add_ordered(hr);
   42.41 +}
   42.42 +
   42.43 +inline void HeapRegionSeq::allocate_free_regions_starting_at(uint first, uint num_regions) {
   42.44 +  _free_list.remove_starting_at(at(first), num_regions);
   42.45 +}
   42.46 +
   42.47  #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
    43.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Fri Aug 22 13:24:04 2014 +0200
    43.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Tue Aug 26 13:38:33 2014 -0700
    43.3 @@ -23,6 +23,7 @@
    43.4   */
    43.5  
    43.6  #include "precompiled.hpp"
    43.7 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    43.8  #include "gc_implementation/g1/heapRegionRemSet.hpp"
    43.9  #include "gc_implementation/g1/heapRegionSet.inline.hpp"
   43.10  
   43.11 @@ -67,7 +68,7 @@
   43.12    // Do the basic verification first before we do the checks over the regions.
   43.13    HeapRegionSetBase::verify();
   43.14  
   43.15 -  _verify_in_progress        = true;
   43.16 +  _verify_in_progress = true;
   43.17  }
   43.18  
   43.19  void HeapRegionSetBase::verify_end() {
   43.20 @@ -103,62 +104,7 @@
   43.21  }
   43.22  
   43.23  void FreeRegionList::fill_in_ext_msg_extra(hrs_ext_msg* msg) {
   43.24 -  msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
   43.25 -}
   43.26 -
   43.27 -void FreeRegionList::add_as_head_or_tail(FreeRegionList* from_list, bool as_head) {
   43.28 -  check_mt_safety();
   43.29 -  from_list->check_mt_safety();
   43.30 -
   43.31 -  verify_optional();
   43.32 -  from_list->verify_optional();
   43.33 -
   43.34 -  if (from_list->is_empty()) {
   43.35 -    return;
   43.36 -  }
   43.37 -
   43.38 -#ifdef ASSERT
   43.39 -  FreeRegionListIterator iter(from_list);
   43.40 -  while (iter.more_available()) {
   43.41 -    HeapRegion* hr = iter.get_next();
   43.42 -    // In set_containing_set() we check that we either set the value
   43.43 -    // from NULL to non-NULL or vice versa to catch bugs. So, we have
   43.44 -    // to NULL it first before setting it to the value.
   43.45 -    hr->set_containing_set(NULL);
   43.46 -    hr->set_containing_set(this);
   43.47 -  }
   43.48 -#endif // ASSERT
   43.49 -
   43.50 -  if (_head == NULL) {
   43.51 -    assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
   43.52 -    _head = from_list->_head;
   43.53 -    _tail = from_list->_tail;
   43.54 -  } else {
   43.55 -    assert(length() > 0 && _tail != NULL, hrs_ext_msg(this, "invariant"));
   43.56 -    if (as_head) {
   43.57 -      from_list->_tail->set_next(_head);
   43.58 -      _head->set_prev(from_list->_tail);
   43.59 -      _head = from_list->_head;
   43.60 -    } else {
   43.61 -      _tail->set_next(from_list->_head);
   43.62 -      from_list->_head->set_prev(_tail);
   43.63 -      _tail = from_list->_tail;
   43.64 -    }
   43.65 -  }
   43.66 -
   43.67 -  _count.increment(from_list->length(), from_list->total_capacity_bytes());
   43.68 -  from_list->clear();
   43.69 -
   43.70 -  verify_optional();
   43.71 -  from_list->verify_optional();
   43.72 -}
   43.73 -
   43.74 -void FreeRegionList::add_as_head(FreeRegionList* from_list) {
   43.75 -  add_as_head_or_tail(from_list, true /* as_head */);
   43.76 -}
   43.77 -
   43.78 -void FreeRegionList::add_as_tail(FreeRegionList* from_list) {
   43.79 -  add_as_head_or_tail(from_list, false /* as_head */);
   43.80 +  msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, _head, _tail);
   43.81  }
   43.82  
   43.83  void FreeRegionList::remove_all() {
   43.84 @@ -191,11 +137,6 @@
   43.85      return;
   43.86    }
   43.87  
   43.88 -  if (is_empty()) {
   43.89 -    add_as_head(from_list);
   43.90 -    return;
   43.91 -  }
   43.92 -
   43.93    #ifdef ASSERT
   43.94    FreeRegionListIterator iter(from_list);
   43.95    while (iter.more_available()) {
   43.96 @@ -208,39 +149,45 @@
   43.97    }
   43.98    #endif // ASSERT
   43.99  
  43.100 -  HeapRegion* curr_to = _head;
  43.101 -  HeapRegion* curr_from = from_list->_head;
  43.102 +  if (is_empty()) {
  43.103 +    assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
  43.104 +    _head = from_list->_head;
  43.105 +    _tail = from_list->_tail;
  43.106 +  } else {
  43.107 +    HeapRegion* curr_to = _head;
  43.108 +    HeapRegion* curr_from = from_list->_head;
  43.109  
  43.110 -  while (curr_from != NULL) {
  43.111 -    while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
  43.112 -      curr_to = curr_to->next();
  43.113 +    while (curr_from != NULL) {
  43.114 +      while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
  43.115 +        curr_to = curr_to->next();
  43.116 +      }
  43.117 +
  43.118 +      if (curr_to == NULL) {
  43.119 +        // The rest of the from list should be added as tail
  43.120 +        _tail->set_next(curr_from);
  43.121 +        curr_from->set_prev(_tail);
  43.122 +        curr_from = NULL;
  43.123 +      } else {
  43.124 +        HeapRegion* next_from = curr_from->next();
  43.125 +
  43.126 +        curr_from->set_next(curr_to);
  43.127 +        curr_from->set_prev(curr_to->prev());
  43.128 +        if (curr_to->prev() == NULL) {
  43.129 +          _head = curr_from;
  43.130 +        } else {
  43.131 +          curr_to->prev()->set_next(curr_from);
  43.132 +        }
  43.133 +        curr_to->set_prev(curr_from);
  43.134 +
  43.135 +        curr_from = next_from;
  43.136 +      }
  43.137      }
  43.138  
  43.139 -    if (curr_to == NULL) {
  43.140 -      // The rest of the from list should be added as tail
  43.141 -      _tail->set_next(curr_from);
  43.142 -      curr_from->set_prev(_tail);
  43.143 -      curr_from = NULL;
  43.144 -    } else {
  43.145 -      HeapRegion* next_from = curr_from->next();
  43.146 -
  43.147 -      curr_from->set_next(curr_to);
  43.148 -      curr_from->set_prev(curr_to->prev());
  43.149 -      if (curr_to->prev() == NULL) {
  43.150 -        _head = curr_from;
  43.151 -      } else {
  43.152 -        curr_to->prev()->set_next(curr_from);
  43.153 -      }
  43.154 -      curr_to->set_prev(curr_from);
  43.155 -
  43.156 -      curr_from = next_from;
  43.157 +    if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
  43.158 +      _tail = from_list->_tail;
  43.159      }
  43.160    }
  43.161  
  43.162 -  if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
  43.163 -    _tail = from_list->_tail;
  43.164 -  }
  43.165 -
  43.166    _count.increment(from_list->length(), from_list->total_capacity_bytes());
  43.167    from_list->clear();
  43.168  
  43.169 @@ -248,68 +195,59 @@
  43.170    from_list->verify_optional();
  43.171  }
  43.172  
  43.173 -void FreeRegionList::remove_all_pending(uint target_count) {
  43.174 +void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
  43.175    check_mt_safety();
  43.176 -  assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
  43.177 +  assert(num_regions >= 1, hrs_ext_msg(this, "pre-condition"));
  43.178    assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
  43.179  
  43.180    verify_optional();
  43.181    DEBUG_ONLY(uint old_length = length();)
  43.182  
  43.183 -  HeapRegion* curr = _head;
  43.184 +  HeapRegion* curr = first;
  43.185    uint count = 0;
  43.186 -  while (curr != NULL) {
  43.187 +  while (count < num_regions) {
  43.188      verify_region(curr);
  43.189      HeapRegion* next = curr->next();
  43.190      HeapRegion* prev = curr->prev();
  43.191  
  43.192 -    if (curr->pending_removal()) {
  43.193 -      assert(count < target_count,
  43.194 -             hrs_err_msg("[%s] should not come across more regions "
  43.195 -                         "pending for removal than target_count: %u",
  43.196 -                         name(), target_count));
  43.197 +    assert(count < num_regions,
  43.198 +           hrs_err_msg("[%s] should not come across more regions "
  43.199 +                       "pending for removal than num_regions: %u",
  43.200 +                       name(), num_regions));
  43.201  
  43.202 -      if (prev == NULL) {
  43.203 -        assert(_head == curr, hrs_ext_msg(this, "invariant"));
  43.204 -        _head = next;
  43.205 -      } else {
  43.206 -        assert(_head != curr, hrs_ext_msg(this, "invariant"));
  43.207 -        prev->set_next(next);
  43.208 -      }
  43.209 -      if (next == NULL) {
  43.210 -        assert(_tail == curr, hrs_ext_msg(this, "invariant"));
  43.211 -        _tail = prev;
  43.212 -      } else {
  43.213 -        assert(_tail != curr, hrs_ext_msg(this, "invariant"));
  43.214 -        next->set_prev(prev);
  43.215 -      }
  43.216 -      if (_last = curr) {
  43.217 -        _last = NULL;
  43.218 -      }
  43.219 +    if (prev == NULL) {
  43.220 +      assert(_head == curr, hrs_ext_msg(this, "invariant"));
  43.221 +      _head = next;
  43.222 +    } else {
  43.223 +      assert(_head != curr, hrs_ext_msg(this, "invariant"));
  43.224 +      prev->set_next(next);
  43.225 +    }
  43.226 +    if (next == NULL) {
  43.227 +      assert(_tail == curr, hrs_ext_msg(this, "invariant"));
  43.228 +      _tail = prev;
  43.229 +    } else {
  43.230 +      assert(_tail != curr, hrs_ext_msg(this, "invariant"));
  43.231 +      next->set_prev(prev);
  43.232 +    }
  43.233 +    if (_last = curr) {
  43.234 +      _last = NULL;
  43.235 +    }
  43.236  
  43.237 -      curr->set_next(NULL);
  43.238 -      curr->set_prev(NULL);
  43.239 -      remove(curr);
  43.240 -      curr->set_pending_removal(false);
  43.241 +    curr->set_next(NULL);
  43.242 +    curr->set_prev(NULL);
  43.243 +    remove(curr);
  43.244  
  43.245 -      count += 1;
  43.246 -
  43.247 -      // If we have come across the target number of regions we can
  43.248 -      // just bail out. However, for debugging purposes, we can just
  43.249 -      // carry on iterating to make sure there are not more regions
  43.250 -      // tagged with pending removal.
  43.251 -      DEBUG_ONLY(if (count == target_count) break;)
  43.252 -    }
  43.253 +    count++;
  43.254      curr = next;
  43.255    }
  43.256  
  43.257 -  assert(count == target_count,
  43.258 -         hrs_err_msg("[%s] count: %u should be == target_count: %u",
  43.259 -                     name(), count, target_count));
  43.260 -  assert(length() + target_count == old_length,
  43.261 +  assert(count == num_regions,
  43.262 +         hrs_err_msg("[%s] count: %u should be == num_regions: %u",
  43.263 +                     name(), count, num_regions));
  43.264 +  assert(length() + num_regions == old_length,
  43.265           hrs_err_msg("[%s] new length should be consistent "
  43.266 -                     "new length: %u old length: %u target_count: %u",
  43.267 -                     name(), length(), old_length, target_count));
  43.268 +                     "new length: %u old length: %u num_regions: %u",
  43.269 +                     name(), length(), old_length, num_regions));
  43.270  
  43.271    verify_optional();
  43.272  }
  43.273 @@ -348,10 +286,12 @@
  43.274        hr->print_on(out);
  43.275      }
  43.276    }
  43.277 +
  43.278 +  out->cr();
  43.279  }
  43.280  
  43.281  void FreeRegionList::verify_list() {
  43.282 -  HeapRegion* curr = head();
  43.283 +  HeapRegion* curr = _head;
  43.284    HeapRegion* prev1 = NULL;
  43.285    HeapRegion* prev0 = NULL;
  43.286    uint count = 0;
  43.287 @@ -379,7 +319,7 @@
  43.288      curr = curr->next();
  43.289    }
  43.290  
  43.291 -  guarantee(tail() == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), tail()->hrs_index(), prev0->hrs_index()));
  43.292 +  guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrs_index(), prev0->hrs_index()));
  43.293    guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
  43.294    guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count));
  43.295    guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  43.296 @@ -463,3 +403,41 @@
  43.297                "master humongous set MT safety protocol outside a safepoint");
  43.298    }
  43.299  }
  43.300 +
  43.301 +void FreeRegionList_test() {
  43.302 +  FreeRegionList l("test");
  43.303 +
  43.304 +  const uint num_regions_in_test = 5;
  43.305 +  // Create a fake heap. It does not need to be valid, as the HeapRegion constructor
  43.306 +  // does not access it.
  43.307 +  MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords);
  43.308 +  // Allocate a fake BOT because the HeapRegion constructor initializes
  43.309 +  // the BOT.
  43.310 +  size_t bot_size = G1BlockOffsetSharedArray::compute_size(heap.word_size());
  43.311 +  HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
  43.312 +  ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
  43.313 +  G1RegionToSpaceMapper* bot_storage =
  43.314 +    G1RegionToSpaceMapper::create_mapper(bot_rs,
  43.315 +                                         os::vm_page_size(),
  43.316 +                                         HeapRegion::GrainBytes,
  43.317 +                                         G1BlockOffsetSharedArray::N_bytes,
  43.318 +                                         mtGC);
  43.319 +  G1BlockOffsetSharedArray oa(heap, bot_storage);
  43.320 +  bot_storage->commit_regions(0, num_regions_in_test);
  43.321 +  HeapRegion hr0(0, &oa, heap);
  43.322 +  HeapRegion hr1(1, &oa, heap);
  43.323 +  HeapRegion hr2(2, &oa, heap);
  43.324 +  HeapRegion hr3(3, &oa, heap);
  43.325 +  HeapRegion hr4(4, &oa, heap);
  43.326 +  l.add_ordered(&hr1);
  43.327 +  l.add_ordered(&hr0);
  43.328 +  l.add_ordered(&hr3);
  43.329 +  l.add_ordered(&hr4);
  43.330 +  l.add_ordered(&hr2);
  43.331 +  assert(l.length() == num_regions_in_test, "wrong length");
  43.332 +  l.verify_list();
  43.333 +
  43.334 +  bot_storage->uncommit_regions(0, num_regions_in_test);
  43.335 +  delete bot_storage;
  43.336 +  FREE_C_HEAP_ARRAY(HeapWord, bot_data, mtGC);
  43.337 +}
    44.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Fri Aug 22 13:24:04 2014 +0200
    44.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Tue Aug 26 13:38:33 2014 -0700
    44.3 @@ -162,7 +162,7 @@
    44.4  // diagnosing failures.
    44.5  class hrs_ext_msg : public hrs_err_msg {
    44.6  public:
    44.7 -  hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s","") {
    44.8 +  hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s", "") {
    44.9      set->fill_in_ext_msg(this, message);
   44.10    }
   44.11  };
   44.12 @@ -192,13 +192,9 @@
   44.13  };
   44.14  
   44.15  // A set that links all the regions added to it in a doubly-linked
   44.16 -// list. We should try to avoid doing operations that iterate over
   44.17 +// sorted list. We should try to avoid doing operations that iterate over
   44.18  // such lists in performance critical paths. Typically we should
   44.19 -// add / remove one region at a time or concatenate two lists. There are
   44.20 -// two ways to treat your lists, ordered and un-ordered. All un-ordered
   44.21 -// operations are done in constant time. To keep a list ordered only use
   44.22 -// add_ordered() to add elements to the list. If a list is not ordered
   44.23 -// from start, there is no way to sort it later.
   44.24 +// add / remove one region at a time or concatenate two lists.
   44.25  
   44.26  class FreeRegionListIterator;
   44.27  
   44.28 @@ -210,13 +206,13 @@
   44.29    HeapRegion* _tail;
   44.30  
   44.31    // _last is used to keep track of where we added an element the last
   44.32 -  // time in ordered lists. It helps to improve performance when adding
   44.33 -  // several ordered items in a row.
   44.34 +  // time. It helps to improve performance when adding several ordered items in a row.
   44.35    HeapRegion* _last;
   44.36  
   44.37    static uint _unrealistically_long_length;
   44.38  
   44.39 -  void add_as_head_or_tail(FreeRegionList* from_list, bool as_head);
   44.40 +  inline HeapRegion* remove_from_head_impl();
   44.41 +  inline HeapRegion* remove_from_tail_impl();
   44.42  
   44.43  protected:
   44.44    virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg);
   44.45 @@ -232,8 +228,11 @@
   44.46  
   44.47    void verify_list();
   44.48  
   44.49 -  HeapRegion* head() { return _head; }
   44.50 -  HeapRegion* tail() { return _tail; }
   44.51 +#ifdef ASSERT
   44.52 +  bool contains(HeapRegion* hr) const {
   44.53 +    return hr->containing_set() == this;
   44.54 +  }
   44.55 +#endif
   44.56  
   44.57    static void set_unrealistically_long_length(uint len);
   44.58  
   44.59 @@ -242,55 +241,20 @@
   44.60    // is determined by hrs_index.
   44.61    inline void add_ordered(HeapRegion* hr);
   44.62  
   44.63 -  // It adds hr to the list as the new head. The region should not be
   44.64 -  // a member of another set.
   44.65 -  inline void add_as_head(HeapRegion* hr);
   44.66 -
   44.67 -  // It adds hr to the list as the new tail. The region should not be
   44.68 -  // a member of another set.
   44.69 -  inline void add_as_tail(HeapRegion* hr);
   44.70 -
   44.71 -  // It removes and returns the head of the list. It assumes that the
   44.72 -  // list is not empty so it will return a non-NULL value.
   44.73 -  inline HeapRegion* remove_head();
   44.74 -
   44.75 -  // Convenience method.
   44.76 -  inline HeapRegion* remove_head_or_null();
   44.77 -
   44.78 -  // Removes and returns the last element (_tail) of the list. It assumes
   44.79 -  // that the list isn't empty so that it can return a non-NULL value.
   44.80 -  inline HeapRegion* remove_tail();
   44.81 -
   44.82 -  // Convenience method
   44.83 -  inline HeapRegion* remove_tail_or_null();
   44.84 -
   44.85    // Removes from head or tail based on the given argument.
   44.86 -  inline HeapRegion* remove_region(bool from_head);
   44.87 +  HeapRegion* remove_region(bool from_head);
   44.88  
   44.89    // Merge two ordered lists. The result is also ordered. The order is
   44.90    // determined by hrs_index.
   44.91    void add_ordered(FreeRegionList* from_list);
   44.92  
   44.93 -  // It moves the regions from from_list to this list and empties
   44.94 -  // from_list. The new regions will appear in the same order as they
   44.95 -  // were in from_list and be linked in the beginning of this list.
   44.96 -  void add_as_head(FreeRegionList* from_list);
   44.97 -
   44.98 -  // It moves the regions from from_list to this list and empties
   44.99 -  // from_list. The new regions will appear in the same order as they
  44.100 -  // were in from_list and be linked in the end of this list.
  44.101 -  void add_as_tail(FreeRegionList* from_list);
  44.102 -
  44.103    // It empties the list by removing all regions from it.
  44.104    void remove_all();
  44.105  
  44.106 -  // It removes all regions in the list that are pending for removal
  44.107 -  // (i.e., they have been tagged with "pending_removal"). The list
  44.108 -  // must not be empty, target_count should reflect the exact number
  44.109 -  // of regions that are pending for removal in the list, and
  44.110 -  // target_count should be > 1 (currently, we never need to remove a
  44.111 -  // single region using this).
  44.112 -  void remove_all_pending(uint target_count);
  44.113 +  // Remove all (contiguous) regions from first to first + num_regions -1 from
  44.114 +  // this list.
  44.115 +  // Num_regions must be > 1.
  44.116 +  void remove_starting_at(HeapRegion* first, uint num_regions);
  44.117  
  44.118    virtual void verify();
  44.119  
  44.120 @@ -298,7 +262,7 @@
  44.121  };
  44.122  
  44.123  // Iterator class that provides a convenient way to iterate over the
  44.124 -// regions of a HeapRegionLinkedList instance.
  44.125 +// regions of a FreeRegionList.
  44.126  
  44.127  class FreeRegionListIterator : public StackObj {
  44.128  private:
  44.129 @@ -324,7 +288,7 @@
  44.130    }
  44.131  
  44.132    FreeRegionListIterator(FreeRegionList* list) : _curr(NULL), _list(list) {
  44.133 -    _curr = list->head();
  44.134 +    _curr = list->_head;
  44.135    }
  44.136  };
  44.137  
    45.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Fri Aug 22 13:24:04 2014 +0200
    45.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Tue Aug 26 13:38:33 2014 -0700
    45.3 @@ -30,7 +30,8 @@
    45.4  inline void HeapRegionSetBase::add(HeapRegion* hr) {
    45.5    check_mt_safety();
    45.6    assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u"));
    45.7 -  assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
    45.8 +  assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
    45.9 +  assert(hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
   45.10  
   45.11    _count.increment(1u, hr->capacity());
   45.12    hr->set_containing_set(this);
   45.13 @@ -40,7 +41,8 @@
   45.14  inline void HeapRegionSetBase::remove(HeapRegion* hr) {
   45.15    check_mt_safety();
   45.16    verify_region(hr);
   45.17 -  assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
   45.18 +  assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
   45.19 +  assert(hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
   45.20  
   45.21    hr->set_containing_set(NULL);
   45.22    assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition"));
   45.23 @@ -48,8 +50,7 @@
   45.24  }
   45.25  
   45.26  inline void FreeRegionList::add_ordered(HeapRegion* hr) {
   45.27 -  check_mt_safety();
   45.28 -  assert((length() == 0 && _head == NULL && _tail == NULL) ||
   45.29 +  assert((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) ||
   45.30           (length() >  0 && _head != NULL && _tail != NULL),
   45.31           hrs_ext_msg(this, "invariant"));
   45.32    // add() will verify the region and check mt safety.
   45.33 @@ -95,55 +96,48 @@
   45.34    _last = hr;
   45.35  }
   45.36  
   45.37 -inline void FreeRegionList::add_as_head(HeapRegion* hr) {
   45.38 -  assert((length() == 0 && _head == NULL && _tail == NULL) ||
   45.39 -         (length() >  0 && _head != NULL && _tail != NULL),
   45.40 -         hrs_ext_msg(this, "invariant"));
   45.41 -  // add() will verify the region and check mt safety.
   45.42 -  add(hr);
   45.43 -
   45.44 -  // Now link the region.
   45.45 -  if (_head != NULL) {
   45.46 -    hr->set_next(_head);
   45.47 -    _head->set_prev(hr);
   45.48 -  } else {
   45.49 -    _tail = hr;
   45.50 -  }
   45.51 -  _head = hr;
   45.52 -}
   45.53 -
   45.54 -inline void FreeRegionList::add_as_tail(HeapRegion* hr) {
   45.55 -  check_mt_safety();
   45.56 -  assert((length() == 0 && _head == NULL && _tail == NULL) ||
   45.57 -         (length() >  0 && _head != NULL && _tail != NULL),
   45.58 -         hrs_ext_msg(this, "invariant"));
   45.59 -  // add() will verify the region and check mt safety.
   45.60 -  add(hr);
   45.61 -
   45.62 -  // Now link the region.
   45.63 -  if (_tail != NULL) {
   45.64 -    _tail->set_next(hr);
   45.65 -    hr->set_prev(_tail);
   45.66 -  } else {
   45.67 -    _head = hr;
   45.68 -  }
   45.69 -  _tail = hr;
   45.70 -}
   45.71 -
   45.72 -inline HeapRegion* FreeRegionList::remove_head() {
   45.73 -  assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty"));
   45.74 -  assert(length() > 0 && _head != NULL && _tail != NULL,
   45.75 -         hrs_ext_msg(this, "invariant"));
   45.76 -
   45.77 -  // We need to unlink it first.
   45.78 -  HeapRegion* hr = _head;
   45.79 -  _head = hr->next();
   45.80 +inline HeapRegion* FreeRegionList::remove_from_head_impl() {
   45.81 +  HeapRegion* result = _head;
   45.82 +  _head = result->next();
   45.83    if (_head == NULL) {
   45.84      _tail = NULL;
   45.85    } else {
   45.86      _head->set_prev(NULL);
   45.87    }
   45.88 -  hr->set_next(NULL);
   45.89 +  result->set_next(NULL);
   45.90 +  return result;
   45.91 +}
   45.92 +
   45.93 +inline HeapRegion* FreeRegionList::remove_from_tail_impl() {
   45.94 +  HeapRegion* result = _tail;
   45.95 +
   45.96 +  _tail = result->prev();
   45.97 +  if (_tail == NULL) {
   45.98 +    _head = NULL;
   45.99 +  } else {
  45.100 +    _tail->set_next(NULL);
  45.101 +  }
  45.102 +  result->set_prev(NULL);
  45.103 +  return result;
  45.104 +}
  45.105 +
  45.106 +inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
  45.107 +  check_mt_safety();
  45.108 +  verify_optional();
  45.109 +
  45.110 +  if (is_empty()) {
  45.111 +    return NULL;
  45.112 +  }
  45.113 +  assert(length() > 0 && _head != NULL && _tail != NULL,
  45.114 +         hrs_ext_msg(this, "invariant"));
  45.115 +
  45.116 +  HeapRegion* hr;
  45.117 +
  45.118 +  if (from_head) {
  45.119 +    hr = remove_from_head_impl();
  45.120 +  } else {
  45.121 +    hr = remove_from_tail_impl();
  45.122 +  }
  45.123  
  45.124    if (_last == hr) {
  45.125      _last = NULL;
  45.126 @@ -154,56 +148,5 @@
  45.127    return hr;
  45.128  }
  45.129  
  45.130 -inline HeapRegion* FreeRegionList::remove_head_or_null() {
  45.131 -  check_mt_safety();
  45.132 -  if (!is_empty()) {
  45.133 -    return remove_head();
  45.134 -  } else {
  45.135 -    return NULL;
  45.136 -  }
  45.137 -}
  45.138 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
  45.139  
  45.140 -inline HeapRegion* FreeRegionList::remove_tail() {
  45.141 -  assert(!is_empty(), hrs_ext_msg(this, "The list should not be empty"));
  45.142 -  assert(length() > 0 && _head != NULL && _tail != NULL,
  45.143 -         hrs_ext_msg(this, "invariant"));
  45.144 -
  45.145 -  // We need to unlink it first
  45.146 -  HeapRegion* hr = _tail;
  45.147 -
  45.148 -  _tail = hr->prev();
  45.149 -  if (_tail == NULL) {
  45.150 -    _head = NULL;
  45.151 -  } else {
  45.152 -    _tail->set_next(NULL);
  45.153 -  }
  45.154 -  hr->set_prev(NULL);
  45.155 -
  45.156 -  if (_last == hr) {
  45.157 -    _last = NULL;
  45.158 -  }
  45.159 -
  45.160 -  // remove() will verify the region and check mt safety.
  45.161 -  remove(hr);
  45.162 -  return hr;
  45.163 -}
  45.164 -
  45.165 -inline HeapRegion* FreeRegionList::remove_tail_or_null() {
  45.166 -  check_mt_safety();
  45.167 -
  45.168 -  if (!is_empty()) {
  45.169 -    return remove_tail();
  45.170 -  } else {
  45.171 -    return NULL;
  45.172 -  }
  45.173 -}
  45.174 -
  45.175 -inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
  45.176 -  if (from_head) {
  45.177 -    return remove_head_or_null();
  45.178 -  } else {
  45.179 -    return remove_tail_or_null();
  45.180 -  }
  45.181 -}
  45.182 -
  45.183 -#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
    46.1 --- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Aug 22 13:24:04 2014 +0200
    46.2 +++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Tue Aug 26 13:38:33 2014 -0700
    46.3 @@ -43,10 +43,9 @@
    46.4    nonstatic_field(G1HeapRegionTable, _shift_by,         uint)                 \
    46.5                                                                                \
    46.6    nonstatic_field(HeapRegionSeq,   _regions,            G1HeapRegionTable)    \
    46.7 -  nonstatic_field(HeapRegionSeq,   _committed_length,   uint)                 \
    46.8 +  nonstatic_field(HeapRegionSeq,   _num_committed,      uint)                 \
    46.9                                                                                \
   46.10    nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
   46.11 -  nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
   46.12    nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
   46.13    nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   46.14    nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
    47.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Aug 22 13:24:04 2014 +0200
    47.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Aug 26 13:38:33 2014 -0700
    47.3 @@ -78,6 +78,7 @@
    47.4                          (HeapWord*)(heap_rs.base() + heap_rs.size()));
    47.5  
    47.6    CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
    47.7 +  barrier_set->initialize();
    47.8    _barrier_set = barrier_set;
    47.9    oopDesc::set_bs(_barrier_set);
   47.10    if (_barrier_set == NULL) {
    48.1 --- a/src/share/vm/memory/cardTableModRefBS.cpp	Fri Aug 22 13:24:04 2014 +0200
    48.2 +++ b/src/share/vm/memory/cardTableModRefBS.cpp	Tue Aug 26 13:38:33 2014 -0700
    48.3 @@ -44,13 +44,6 @@
    48.4  // enumerate ref fields that have been modified (since the last
    48.5  // enumeration.)
    48.6  
    48.7 -size_t CardTableModRefBS::cards_required(size_t covered_words)
    48.8 -{
    48.9 -  // Add one for a guard card, used to detect errors.
   48.10 -  const size_t words = align_size_up(covered_words, card_size_in_words);
   48.11 -  return words / card_size_in_words + 1;
   48.12 -}
   48.13 -
   48.14  size_t CardTableModRefBS::compute_byte_map_size()
   48.15  {
   48.16    assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
   48.17 @@ -64,27 +57,50 @@
   48.18                                       int max_covered_regions):
   48.19    ModRefBarrierSet(max_covered_regions),
   48.20    _whole_heap(whole_heap),
   48.21 -  _guard_index(cards_required(whole_heap.word_size()) - 1),
   48.22 -  _last_valid_index(_guard_index - 1),
   48.23 +  _guard_index(0),
   48.24 +  _guard_region(),
   48.25 +  _last_valid_index(0),
   48.26    _page_size(os::vm_page_size()),
   48.27 -  _byte_map_size(compute_byte_map_size())
   48.28 +  _byte_map_size(0),
   48.29 +  _covered(NULL),
   48.30 +  _committed(NULL),
   48.31 +  _cur_covered_regions(0),
   48.32 +  _byte_map(NULL),
   48.33 +  byte_map_base(NULL),
   48.34 +  // LNC functionality
   48.35 +  _lowest_non_clean(NULL),
   48.36 +  _lowest_non_clean_chunk_size(NULL),
   48.37 +  _lowest_non_clean_base_chunk_index(NULL),
   48.38 +  _last_LNC_resizing_collection(NULL)
   48.39  {
   48.40    _kind = BarrierSet::CardTableModRef;
   48.41  
   48.42 -  HeapWord* low_bound  = _whole_heap.start();
   48.43 -  HeapWord* high_bound = _whole_heap.end();
   48.44 -  assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
   48.45 -  assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
   48.46 +  assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
   48.47 +  assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
   48.48  
   48.49    assert(card_size <= 512, "card_size must be less than 512"); // why?
   48.50  
   48.51 -  _covered   = new MemRegion[max_covered_regions];
   48.52 -  _committed = new MemRegion[max_covered_regions];
   48.53 -  if (_covered == NULL || _committed == NULL) {
   48.54 -    vm_exit_during_initialization("couldn't alloc card table covered region set.");
   48.55 +  _covered   = new MemRegion[_max_covered_regions];
   48.56 +  if (_covered == NULL) {
   48.57 +    vm_exit_during_initialization("Could not allocate card table covered region set.");
   48.58 +  }
   48.59 +}
   48.60 +
   48.61 +void CardTableModRefBS::initialize() {
   48.62 +  _guard_index = cards_required(_whole_heap.word_size()) - 1;
   48.63 +  _last_valid_index = _guard_index - 1;
   48.64 +
   48.65 +  _byte_map_size = compute_byte_map_size();
   48.66 +
   48.67 +  HeapWord* low_bound  = _whole_heap.start();
   48.68 +  HeapWord* high_bound = _whole_heap.end();
   48.69 +
   48.70 +  _cur_covered_regions = 0;
   48.71 +  _committed = new MemRegion[_max_covered_regions];
   48.72 +  if (_committed == NULL) {
   48.73 +    vm_exit_during_initialization("Could not allocate card table committed region set.");
   48.74    }
   48.75  
   48.76 -  _cur_covered_regions = 0;
   48.77    const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
   48.78      MAX2(_page_size, (size_t) os::vm_allocation_granularity());
   48.79    ReservedSpace heap_rs(_byte_map_size, rs_align, false);
   48.80 @@ -114,20 +130,20 @@
   48.81                              !ExecMem, "card table last card");
   48.82    *guard_card = last_card;
   48.83  
   48.84 -   _lowest_non_clean =
   48.85 -    NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
   48.86 +  _lowest_non_clean =
   48.87 +    NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
   48.88    _lowest_non_clean_chunk_size =
   48.89 -    NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
   48.90 +    NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
   48.91    _lowest_non_clean_base_chunk_index =
   48.92 -    NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
   48.93 +    NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
   48.94    _last_LNC_resizing_collection =
   48.95 -    NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
   48.96 +    NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
   48.97    if (_lowest_non_clean == NULL
   48.98        || _lowest_non_clean_chunk_size == NULL
   48.99        || _lowest_non_clean_base_chunk_index == NULL
  48.100        || _last_LNC_resizing_collection == NULL)
  48.101      vm_exit_during_initialization("couldn't allocate an LNC array.");
  48.102 -  for (int i = 0; i < max_covered_regions; i++) {
  48.103 +  for (int i = 0; i < _max_covered_regions; i++) {
  48.104      _lowest_non_clean[i] = NULL;
  48.105      _lowest_non_clean_chunk_size[i] = 0;
  48.106      _last_LNC_resizing_collection[i] = -1;
  48.107 @@ -650,7 +666,7 @@
  48.108                                        jbyte val, bool val_equals) {
  48.109    jbyte* start    = byte_for(mr.start());
  48.110    jbyte* end      = byte_for(mr.last());
  48.111 -  bool   failures = false;
  48.112 +  bool failures = false;
  48.113    for (jbyte* curr = start; curr <= end; ++curr) {
  48.114      jbyte curr_val = *curr;
  48.115      bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
    49.1 --- a/src/share/vm/memory/cardTableModRefBS.hpp	Fri Aug 22 13:24:04 2014 +0200
    49.2 +++ b/src/share/vm/memory/cardTableModRefBS.hpp	Tue Aug 26 13:38:33 2014 -0700
    49.3 @@ -96,12 +96,12 @@
    49.4    // The declaration order of these const fields is important; see the
    49.5    // constructor before changing.
    49.6    const MemRegion _whole_heap;       // the region covered by the card table
    49.7 -  const size_t    _guard_index;      // index of very last element in the card
    49.8 +  size_t          _guard_index;      // index of very last element in the card
    49.9                                       // table; it is set to a guard value
   49.10                                       // (last_card) and should never be modified
   49.11 -  const size_t    _last_valid_index; // index of the last valid element
   49.12 +  size_t          _last_valid_index; // index of the last valid element
   49.13    const size_t    _page_size;        // page size used when mapping _byte_map
   49.14 -  const size_t    _byte_map_size;    // in bytes
   49.15 +  size_t          _byte_map_size;    // in bytes
   49.16    jbyte*          _byte_map;         // the card marking array
   49.17  
   49.18    int _cur_covered_regions;
   49.19 @@ -123,7 +123,12 @@
   49.20   protected:
   49.21    // Initialization utilities; covered_words is the size of the covered region
   49.22    // in, um, words.
   49.23 -  inline size_t cards_required(size_t covered_words);
   49.24 +  inline size_t cards_required(size_t covered_words) {
   49.25 +    // Add one for a guard card, used to detect errors.
   49.26 +    const size_t words = align_size_up(covered_words, card_size_in_words);
   49.27 +    return words / card_size_in_words + 1;
   49.28 +  }
   49.29 +
   49.30    inline size_t compute_byte_map_size();
   49.31  
   49.32    // Finds and return the index of the region, if any, to which the given
   49.33 @@ -137,7 +142,7 @@
   49.34    int find_covering_region_containing(HeapWord* addr);
   49.35  
   49.36    // Resize one of the regions covered by the remembered set.
   49.37 -  void resize_covered_region(MemRegion new_region);
   49.38 +  virtual void resize_covered_region(MemRegion new_region);
   49.39  
   49.40    // Returns the leftmost end of a committed region corresponding to a
   49.41    // covered region before covered region "ind", or else "NULL" if "ind" is
   49.42 @@ -282,6 +287,8 @@
   49.43    CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
   49.44    ~CardTableModRefBS();
   49.45  
   49.46 +  virtual void initialize();
   49.47 +
   49.48    // *** Barrier set functions.
   49.49  
   49.50    bool has_write_ref_pre_barrier() { return false; }
    50.1 --- a/src/share/vm/memory/cardTableRS.cpp	Fri Aug 22 13:24:04 2014 +0200
    50.2 +++ b/src/share/vm/memory/cardTableRS.cpp	Tue Aug 26 13:38:33 2014 -0700
    50.3 @@ -53,6 +53,7 @@
    50.4  #else
    50.5    _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
    50.6  #endif
    50.7 +  _ct_bs->initialize();
    50.8    set_bs(_ct_bs);
    50.9    _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
   50.10                           mtGC, 0, AllocFailStrategy::RETURN_NULL);
    51.1 --- a/src/share/vm/opto/c2_globals.hpp	Fri Aug 22 13:24:04 2014 +0200
    51.2 +++ b/src/share/vm/opto/c2_globals.hpp	Tue Aug 26 13:38:33 2014 -0700
    51.3 @@ -653,9 +653,6 @@
    51.4    product(bool, UseMathExactIntrinsics, true,                               \
    51.5            "Enables intrinsification of various java.lang.Math functions")   \
    51.6                                                                              \
    51.7 -  experimental(bool, ReplaceInParentMaps, false,                            \
    51.8 -          "Propagate type improvements in callers of inlinee if possible")  \
    51.9 -                                                                            \
   51.10    product(bool, UseTypeSpeculation, true,                                   \
   51.11            "Speculatively propagate types from profiles")                    \
   51.12                                                                              \
    52.1 --- a/src/share/vm/opto/callGenerator.cpp	Fri Aug 22 13:24:04 2014 +0200
    52.2 +++ b/src/share/vm/opto/callGenerator.cpp	Tue Aug 26 13:38:33 2014 -0700
    52.3 @@ -63,12 +63,12 @@
    52.4    }
    52.5  
    52.6    virtual bool      is_parse() const           { return true; }
    52.7 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
    52.8 +  virtual JVMState* generate(JVMState* jvms);
    52.9    int is_osr() { return _is_osr; }
   52.10  
   52.11  };
   52.12  
   52.13 -JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   52.14 +JVMState* ParseGenerator::generate(JVMState* jvms) {
   52.15    Compile* C = Compile::current();
   52.16  
   52.17    if (is_osr()) {
   52.18 @@ -80,7 +80,7 @@
   52.19      return NULL;  // bailing out of the compile; do not try to parse
   52.20    }
   52.21  
   52.22 -  Parse parser(jvms, method(), _expected_uses, parent_parser);
   52.23 +  Parse parser(jvms, method(), _expected_uses);
   52.24    // Grab signature for matching/allocation
   52.25  #ifdef ASSERT
   52.26    if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
   52.27 @@ -119,12 +119,12 @@
   52.28        _separate_io_proj(separate_io_proj)
   52.29    {
   52.30    }
   52.31 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
   52.32 +  virtual JVMState* generate(JVMState* jvms);
   52.33  
   52.34    CallStaticJavaNode* call_node() const { return _call_node; }
   52.35  };
   52.36  
   52.37 -JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   52.38 +JVMState* DirectCallGenerator::generate(JVMState* jvms) {
   52.39    GraphKit kit(jvms);
   52.40    bool is_static = method()->is_static();
   52.41    address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
   52.42 @@ -171,10 +171,10 @@
   52.43             vtable_index >= 0, "either invalid or usable");
   52.44    }
   52.45    virtual bool      is_virtual() const          { return true; }
   52.46 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
   52.47 +  virtual JVMState* generate(JVMState* jvms);
   52.48  };
   52.49  
   52.50 -JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
   52.51 +JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
   52.52    GraphKit kit(jvms);
   52.53    Node* receiver = kit.argument(0);
   52.54  
   52.55 @@ -276,7 +276,7 @@
   52.56    // Convert the CallStaticJava into an inline
   52.57    virtual void do_late_inline();
   52.58  
   52.59 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
   52.60 +  virtual JVMState* generate(JVMState* jvms) {
   52.61      Compile *C = Compile::current();
   52.62      C->print_inlining_skip(this);
   52.63  
   52.64 @@ -290,7 +290,7 @@
   52.65      // that the late inlining logic can distinguish between fall
   52.66      // through and exceptional uses of the memory and io projections
   52.67      // as is done for allocations and macro expansion.
   52.68 -    return DirectCallGenerator::generate(jvms, parent_parser);
   52.69 +    return DirectCallGenerator::generate(jvms);
   52.70    }
   52.71  
   52.72    virtual void print_inlining_late(const char* msg) {
   52.73 @@ -389,7 +389,7 @@
   52.74    }
   52.75  
   52.76    // Now perform the inling using the synthesized JVMState
   52.77 -  JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
   52.78 +  JVMState* new_jvms = _inline_cg->generate(jvms);
   52.79    if (new_jvms == NULL)  return;  // no change
   52.80    if (C->failing())      return;
   52.81  
   52.82 @@ -407,7 +407,7 @@
   52.83    C->env()->notice_inlined_method(_inline_cg->method());
   52.84    C->set_inlining_progress(true);
   52.85  
   52.86 -  kit.replace_call(call, result);
   52.87 +  kit.replace_call(call, result, true);
   52.88  }
   52.89  
   52.90  
   52.91 @@ -429,8 +429,8 @@
   52.92  
   52.93    virtual bool is_mh_late_inline() const { return true; }
   52.94  
   52.95 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
   52.96 -    JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
   52.97 +  virtual JVMState* generate(JVMState* jvms) {
   52.98 +    JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
   52.99      if (_input_not_const) {
  52.100        // inlining won't be possible so no need to enqueue right now.
  52.101        call_node()->set_generator(this);
  52.102 @@ -477,13 +477,13 @@
  52.103    LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
  52.104      LateInlineCallGenerator(method, inline_cg) {}
  52.105  
  52.106 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
  52.107 +  virtual JVMState* generate(JVMState* jvms) {
  52.108      Compile *C = Compile::current();
  52.109      C->print_inlining_skip(this);
  52.110  
  52.111      C->add_string_late_inline(this);
  52.112  
  52.113 -    JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
  52.114 +    JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
  52.115      return new_jvms;
  52.116    }
  52.117  
  52.118 @@ -500,13 +500,13 @@
  52.119    LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
  52.120      LateInlineCallGenerator(method, inline_cg) {}
  52.121  
  52.122 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
  52.123 +  virtual JVMState* generate(JVMState* jvms) {
  52.124      Compile *C = Compile::current();
  52.125      C->print_inlining_skip(this);
  52.126  
  52.127      C->add_boxing_late_inline(this);
  52.128  
  52.129 -    JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
  52.130 +    JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
  52.131      return new_jvms;
  52.132    }
  52.133  };
  52.134 @@ -542,7 +542,7 @@
  52.135    virtual bool      is_virtual() const          { return _is_virtual; }
  52.136    virtual bool      is_deferred() const         { return true; }
  52.137  
  52.138 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
  52.139 +  virtual JVMState* generate(JVMState* jvms);
  52.140  };
  52.141  
  52.142  
  52.143 @@ -552,12 +552,12 @@
  52.144    return new WarmCallGenerator(ci, if_cold, if_hot);
  52.145  }
  52.146  
  52.147 -JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
  52.148 +JVMState* WarmCallGenerator::generate(JVMState* jvms) {
  52.149    Compile* C = Compile::current();
  52.150    if (C->log() != NULL) {
  52.151      C->log()->elem("warm_call bci='%d'", jvms->bci());
  52.152    }
  52.153 -  jvms = _if_cold->generate(jvms, parent_parser);
  52.154 +  jvms = _if_cold->generate(jvms);
  52.155    if (jvms != NULL) {
  52.156      Node* m = jvms->map()->control();
  52.157      if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
  52.158 @@ -618,7 +618,7 @@
  52.159    virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
  52.160    virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
  52.161  
  52.162 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
  52.163 +  virtual JVMState* generate(JVMState* jvms);
  52.164  };
  52.165  
  52.166  
  52.167 @@ -630,7 +630,7 @@
  52.168  }
  52.169  
  52.170  
  52.171 -JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
  52.172 +JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
  52.173    GraphKit kit(jvms);
  52.174    PhaseGVN& gvn = kit.gvn();
  52.175    // We need an explicit receiver null_check before checking its type.
  52.176 @@ -648,6 +648,10 @@
  52.177      return kit.transfer_exceptions_into_jvms();
  52.178    }
  52.179  
  52.180 +  // Make a copy of the replaced nodes in case we need to restore them
  52.181 +  ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
  52.182 +  replaced_nodes.clone();
  52.183 +
  52.184    Node* exact_receiver = receiver;  // will get updated in place...
  52.185    Node* slow_ctl = kit.type_check_receiver(receiver,
  52.186                                             _predicted_receiver, _hit_prob,
  52.187 @@ -658,7 +662,7 @@
  52.188    { PreserveJVMState pjvms(&kit);
  52.189      kit.set_control(slow_ctl);
  52.190      if (!kit.stopped()) {
  52.191 -      slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
  52.192 +      slow_jvms = _if_missed->generate(kit.sync_jvms());
  52.193        if (kit.failing())
  52.194          return NULL;  // might happen because of NodeCountInliningCutoff
  52.195        assert(slow_jvms != NULL, "must be");
  52.196 @@ -679,12 +683,12 @@
  52.197    kit.replace_in_map(receiver, exact_receiver);
  52.198  
  52.199    // Make the hot call:
  52.200 -  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
  52.201 +  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
  52.202    if (new_jvms == NULL) {
  52.203      // Inline failed, so make a direct call.
  52.204      assert(_if_hit->is_inline(), "must have been a failed inline");
  52.205      CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
  52.206 -    new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
  52.207 +    new_jvms = cg->generate(kit.sync_jvms());
  52.208    }
  52.209    kit.add_exception_states_from(new_jvms);
  52.210    kit.set_jvms(new_jvms);
  52.211 @@ -701,6 +705,11 @@
  52.212      return kit.transfer_exceptions_into_jvms();
  52.213    }
  52.214  
  52.215 +  // There are 2 branches and the replaced nodes are only valid on
  52.216 +  // one: restore the replaced nodes to what they were before the
  52.217 +  // branch.
  52.218 +  kit.map()->set_replaced_nodes(replaced_nodes);
  52.219 +
  52.220    // Finish the diamond.
  52.221    kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
  52.222    RegionNode* region = new (kit.C) RegionNode(3);
  52.223 @@ -891,7 +900,7 @@
  52.224    virtual bool      is_inlined()   const    { return true; }
  52.225    virtual bool      is_intrinsic() const    { return true; }
  52.226  
  52.227 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
  52.228 +  virtual JVMState* generate(JVMState* jvms);
  52.229  };
  52.230  
  52.231  
  52.232 @@ -901,7 +910,7 @@
  52.233  }
  52.234  
  52.235  
  52.236 -JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
  52.237 +JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
  52.238    // The code we want to generate here is:
  52.239    //    if (receiver == NULL)
  52.240    //        uncommon_Trap
  52.241 @@ -961,7 +970,7 @@
  52.242      if (!kit.stopped()) {
  52.243        PreserveJVMState pjvms(&kit);
  52.244        // Generate intrinsic code:
  52.245 -      JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
  52.246 +      JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
  52.247        if (new_jvms == NULL) {
  52.248          // Intrinsic failed, use normal compilation path for this predicate.
  52.249          slow_region->add_req(kit.control());
  52.250 @@ -986,7 +995,7 @@
  52.251      PreserveJVMState pjvms(&kit);
  52.252      // Generate normal compilation code:
  52.253      kit.set_control(gvn.transform(slow_region));
  52.254 -    JVMState* new_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
  52.255 +    JVMState* new_jvms = _cg->generate(kit.sync_jvms());
  52.256      if (kit.failing())
  52.257        return NULL;  // might happen because of NodeCountInliningCutoff
  52.258      assert(new_jvms != NULL, "must be");
  52.259 @@ -1093,7 +1102,7 @@
  52.260    virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
  52.261    virtual bool      is_trap() const             { return true; }
  52.262  
  52.263 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
  52.264 +  virtual JVMState* generate(JVMState* jvms);
  52.265  };
  52.266  
  52.267  
  52.268 @@ -1105,7 +1114,7 @@
  52.269  }
  52.270  
  52.271  
  52.272 -JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
  52.273 +JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
  52.274    GraphKit kit(jvms);
  52.275    // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
  52.276    int nargs = method()->arg_size();
    53.1 --- a/src/share/vm/opto/callGenerator.hpp	Fri Aug 22 13:24:04 2014 +0200
    53.2 +++ b/src/share/vm/opto/callGenerator.hpp	Tue Aug 26 13:38:33 2014 -0700
    53.3 @@ -31,8 +31,6 @@
    53.4  #include "opto/type.hpp"
    53.5  #include "runtime/deoptimization.hpp"
    53.6  
    53.7 -class Parse;
    53.8 -
    53.9  //---------------------------CallGenerator-------------------------------------
   53.10  // The subclasses of this class handle generation of ideal nodes for
   53.11  // call sites and method entry points.
   53.12 @@ -112,7 +110,7 @@
   53.13    //
   53.14    // If the result is NULL, it means that this CallGenerator was unable
   53.15    // to handle the given call, and another CallGenerator should be consulted.
   53.16 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0;
   53.17 +  virtual JVMState* generate(JVMState* jvms) = 0;
   53.18  
   53.19    // How to generate a call site that is inlined:
   53.20    static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
    54.1 --- a/src/share/vm/opto/callnode.cpp	Fri Aug 22 13:24:04 2014 +0200
    54.2 +++ b/src/share/vm/opto/callnode.cpp	Tue Aug 26 13:38:33 2014 -0700
    54.3 @@ -1089,6 +1089,7 @@
    54.4  #ifndef PRODUCT
    54.5  void SafePointNode::dump_spec(outputStream *st) const {
    54.6    st->print(" SafePoint ");
    54.7 +  _replaced_nodes.dump(st);
    54.8  }
    54.9  #endif
   54.10  
    55.1 --- a/src/share/vm/opto/callnode.hpp	Fri Aug 22 13:24:04 2014 +0200
    55.2 +++ b/src/share/vm/opto/callnode.hpp	Tue Aug 26 13:38:33 2014 -0700
    55.3 @@ -30,6 +30,7 @@
    55.4  #include "opto/multnode.hpp"
    55.5  #include "opto/opcodes.hpp"
    55.6  #include "opto/phaseX.hpp"
    55.7 +#include "opto/replacednodes.hpp"
    55.8  #include "opto/type.hpp"
    55.9  
   55.10  // Portions of code courtesy of Clifford Click
   55.11 @@ -335,6 +336,7 @@
   55.12    OopMap*         _oop_map;   // Array of OopMap info (8-bit char) for GC
   55.13    JVMState* const _jvms;      // Pointer to list of JVM State objects
   55.14    const TypePtr*  _adr_type;  // What type of memory does this node produce?
   55.15 +  ReplacedNodes   _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
   55.16  
   55.17    // Many calls take *all* of memory as input,
   55.18    // but some produce a limited subset of that memory as output.
   55.19 @@ -426,6 +428,37 @@
   55.20    void               set_next_exception(SafePointNode* n);
   55.21    bool                   has_exceptions() const { return next_exception() != NULL; }
   55.22  
   55.23 +  // Helper methods to operate on replaced nodes
   55.24 +  ReplacedNodes replaced_nodes() const {
   55.25 +    return _replaced_nodes;
   55.26 +  }
   55.27 +
   55.28 +  void set_replaced_nodes(ReplacedNodes replaced_nodes) {
   55.29 +    _replaced_nodes = replaced_nodes;
   55.30 +  }
   55.31 +
   55.32 +  void clone_replaced_nodes() {
   55.33 +    _replaced_nodes.clone();
   55.34 +  }
   55.35 +  void record_replaced_node(Node* initial, Node* improved) {
   55.36 +    _replaced_nodes.record(initial, improved);
   55.37 +  }
   55.38 +  void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
   55.39 +    _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
   55.40 +  }
   55.41 +  void delete_replaced_nodes() {
   55.42 +    _replaced_nodes.reset();
   55.43 +  }
   55.44 +  void apply_replaced_nodes() {
   55.45 +    _replaced_nodes.apply(this);
   55.46 +  }
   55.47 +  void merge_replaced_nodes_with(SafePointNode* sfpt) {
   55.48 +    _replaced_nodes.merge_with(sfpt->_replaced_nodes);
   55.49 +  }
   55.50 +  bool has_replaced_nodes() const {
   55.51 +    return !_replaced_nodes.is_empty();
   55.52 +  }
   55.53 +
   55.54    // Standard Node stuff
   55.55    virtual int            Opcode() const;
   55.56    virtual bool           pinned() const { return true; }
    56.1 --- a/src/share/vm/opto/compile.cpp	Fri Aug 22 13:24:04 2014 +0200
    56.2 +++ b/src/share/vm/opto/compile.cpp	Tue Aug 26 13:38:33 2014 -0700
    56.3 @@ -391,6 +391,11 @@
    56.4    uint next = 0;
    56.5    while (next < useful.size()) {
    56.6      Node *n = useful.at(next++);
    56.7 +    if (n->is_SafePoint()) {
    56.8 +      // We're done with a parsing phase. Replaced nodes are not valid
    56.9 +      // beyond that point.
   56.10 +      n->as_SafePoint()->delete_replaced_nodes();
   56.11 +    }
   56.12      // Use raw traversal of out edges since this code removes out edges
   56.13      int max = n->outcnt();
   56.14      for (int j = 0; j < max; ++j) {
   56.15 @@ -670,7 +675,6 @@
   56.16                    _inlining_incrementally(false),
   56.17                    _print_inlining_list(NULL),
   56.18                    _print_inlining_idx(0),
   56.19 -                  _preserve_jvm_state(0),
   56.20                    _interpreter_frame_size(0) {
   56.21    C = this;
   56.22  
   56.23 @@ -782,7 +786,7 @@
   56.24        return;
   56.25      }
   56.26      JVMState* jvms = build_start_state(start(), tf());
   56.27 -    if ((jvms = cg->generate(jvms, NULL)) == NULL) {
   56.28 +    if ((jvms = cg->generate(jvms)) == NULL) {
   56.29        record_method_not_compilable("method parse failed");
   56.30        return;
   56.31      }
   56.32 @@ -977,7 +981,6 @@
   56.33      _inlining_incrementally(false),
   56.34      _print_inlining_list(NULL),
   56.35      _print_inlining_idx(0),
   56.36 -    _preserve_jvm_state(0),
   56.37      _allowed_reasons(0),
   56.38      _interpreter_frame_size(0) {
   56.39    C = this;
   56.40 @@ -1910,6 +1913,8 @@
   56.41      for_igvn()->clear();
   56.42      gvn->replace_with(&igvn);
   56.43  
   56.44 +    _late_inlines_pos = _late_inlines.length();
   56.45 +
   56.46      while (_boxing_late_inlines.length() > 0) {
   56.47        CallGenerator* cg = _boxing_late_inlines.pop();
   56.48        cg->do_late_inline();
   56.49 @@ -1973,8 +1978,8 @@
   56.50      if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
   56.51        if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
   56.52          // PhaseIdealLoop is expensive so we only try it once we are
   56.53 -        // out of loop and we only try it again if the previous helped
   56.54 -        // got the number of nodes down significantly
   56.55 +        // out of live nodes and we only try it again if the previous
   56.56 +        // helped got the number of nodes down significantly
   56.57          PhaseIdealLoop ideal_loop( igvn, false, true );
   56.58          if (failing())  return;
   56.59          low_live_nodes = live_nodes();
   56.60 @@ -2066,6 +2071,10 @@
   56.61      // Inline valueOf() methods now.
   56.62      inline_boxing_calls(igvn);
   56.63  
   56.64 +    if (AlwaysIncrementalInline) {
   56.65 +      inline_incrementally(igvn);
   56.66 +    }
   56.67 +
   56.68      print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
   56.69  
   56.70      if (failing())  return;
    57.1 --- a/src/share/vm/opto/compile.hpp	Fri Aug 22 13:24:04 2014 +0200
    57.2 +++ b/src/share/vm/opto/compile.hpp	Tue Aug 26 13:38:33 2014 -0700
    57.3 @@ -429,9 +429,6 @@
    57.4    // Remove the speculative part of types and clean up the graph
    57.5    void remove_speculative_types(PhaseIterGVN &igvn);
    57.6  
    57.7 -  // Are we within a PreserveJVMState block?
    57.8 -  int _preserve_jvm_state;
    57.9 -
   57.10    void* _replay_inline_data; // Pointer to data loaded from file
   57.11  
   57.12   public:
   57.13 @@ -1196,21 +1193,6 @@
   57.14  
   57.15    // Auxiliary method for randomized fuzzing/stressing
   57.16    static bool randomized_select(int count);
   57.17 -
   57.18 -  // enter a PreserveJVMState block
   57.19 -  void inc_preserve_jvm_state() {
   57.20 -    _preserve_jvm_state++;
   57.21 -  }
   57.22 -
   57.23 -  // exit a PreserveJVMState block
   57.24 -  void dec_preserve_jvm_state() {
   57.25 -    _preserve_jvm_state--;
   57.26 -    assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative");
   57.27 -  }
   57.28 -
   57.29 -  bool has_preserve_jvm_state() const {
   57.30 -    return _preserve_jvm_state > 0;
   57.31 -  }
   57.32  };
   57.33  
   57.34  #endif // SHARE_VM_OPTO_COMPILE_HPP
    58.1 --- a/src/share/vm/opto/doCall.cpp	Fri Aug 22 13:24:04 2014 +0200
    58.2 +++ b/src/share/vm/opto/doCall.cpp	Tue Aug 26 13:38:33 2014 -0700
    58.3 @@ -523,7 +523,7 @@
    58.4    // because exceptions don't return to the call site.)
    58.5    profile_call(receiver);
    58.6  
    58.7 -  JVMState* new_jvms = cg->generate(jvms, this);
    58.8 +  JVMState* new_jvms = cg->generate(jvms);
    58.9    if (new_jvms == NULL) {
   58.10      // When inlining attempt fails (e.g., too many arguments),
   58.11      // it may contaminate the current compile state, making it
   58.12 @@ -537,7 +537,7 @@
   58.13      // intrinsic was expecting to optimize. Should always be possible to
   58.14      // get a normal java call that may inline in that case
   58.15      cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
   58.16 -    if ((new_jvms = cg->generate(jvms, this)) == NULL) {
   58.17 +    if ((new_jvms = cg->generate(jvms)) == NULL) {
   58.18        guarantee(failing(), "call failed to generate:  calls should work");
   58.19        return;
   58.20      }
    59.1 --- a/src/share/vm/opto/graphKit.cpp	Fri Aug 22 13:24:04 2014 +0200
    59.2 +++ b/src/share/vm/opto/graphKit.cpp	Tue Aug 26 13:38:33 2014 -0700
    59.3 @@ -428,6 +428,7 @@
    59.4        }
    59.5      }
    59.6    }
    59.7 +  phi_map->merge_replaced_nodes_with(ex_map);
    59.8  }
    59.9  
   59.10  //--------------------------use_exception_state--------------------------------
   59.11 @@ -641,7 +642,6 @@
   59.12    _map    = kit->map();   // preserve the map
   59.13    _sp     = kit->sp();
   59.14    kit->set_map(clone_map ? kit->clone_map() : NULL);
   59.15 -  Compile::current()->inc_preserve_jvm_state();
   59.16  #ifdef ASSERT
   59.17    _bci    = kit->bci();
   59.18    Parse* parser = kit->is_Parse();
   59.19 @@ -659,7 +659,6 @@
   59.20  #endif
   59.21    kit->set_map(_map);
   59.22    kit->set_sp(_sp);
   59.23 -  Compile::current()->dec_preserve_jvm_state();
   59.24  }
   59.25  
   59.26  
   59.27 @@ -1398,60 +1397,17 @@
   59.28    // on the map.  This includes locals, stack, and monitors
   59.29    // of the current (innermost) JVM state.
   59.30  
   59.31 -  if (!ReplaceInParentMaps) {
   59.32 +  // don't let inconsistent types from profiling escape this
   59.33 +  // method
   59.34 +
   59.35 +  const Type* told = _gvn.type(old);
   59.36 +  const Type* tnew = _gvn.type(neww);
   59.37 +
   59.38 +  if (!tnew->higher_equal(told)) {
   59.39      return;
   59.40    }
   59.41  
   59.42 -  // PreserveJVMState doesn't do a deep copy so we can't modify
   59.43 -  // parents
   59.44 -  if (Compile::current()->has_preserve_jvm_state()) {
   59.45 -    return;
   59.46 -  }
   59.47 -
   59.48 -  Parse* parser = is_Parse();
   59.49 -  bool progress = true;
   59.50 -  Node* ctrl = map()->in(0);
   59.51 -  // Follow the chain of parsers and see whether the update can be
   59.52 -  // done in the map of callers. We can do the replace for a caller if
   59.53 -  // the current control post dominates the control of a caller.
   59.54 -  while (parser != NULL && parser->caller() != NULL && progress) {
   59.55 -    progress = false;
   59.56 -    Node* parent_map = parser->caller()->map();
   59.57 -    assert(parser->exits().map()->jvms()->depth() == parser->caller()->depth(), "map mismatch");
   59.58 -
   59.59 -    Node* parent_ctrl = parent_map->in(0);
   59.60 -
   59.61 -    while (parent_ctrl->is_Region()) {
   59.62 -      Node* n = parent_ctrl->as_Region()->is_copy();
   59.63 -      if (n == NULL) {
   59.64 -        break;
   59.65 -      }
   59.66 -      parent_ctrl = n;
   59.67 -    }
   59.68 -
   59.69 -    for (;;) {
   59.70 -      if (ctrl == parent_ctrl) {
   59.71 -        // update the map of the exits which is the one that will be
   59.72 -        // used when compilation resume after inlining
   59.73 -        parser->exits().map()->replace_edge(old, neww);
   59.74 -        progress = true;
   59.75 -        break;
   59.76 -      }
   59.77 -      if (ctrl->is_Proj() && ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
   59.78 -        ctrl = ctrl->in(0)->in(0);
   59.79 -      } else if (ctrl->is_Region()) {
   59.80 -        Node* n = ctrl->as_Region()->is_copy();
   59.81 -        if (n == NULL) {
   59.82 -          break;
   59.83 -        }
   59.84 -        ctrl = n;
   59.85 -      } else {
   59.86 -        break;
   59.87 -      }
   59.88 -    }
   59.89 -
   59.90 -    parser = parser->parent_parser();
   59.91 -  }
   59.92 +  map()->record_replaced_node(old, neww);
   59.93  }
   59.94  
   59.95  
   59.96 @@ -1855,12 +1811,16 @@
   59.97  
   59.98  
   59.99  // Replace the call with the current state of the kit.
  59.100 -void GraphKit::replace_call(CallNode* call, Node* result) {
  59.101 +void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
  59.102    JVMState* ejvms = NULL;
  59.103    if (has_exceptions()) {
  59.104      ejvms = transfer_exceptions_into_jvms();
  59.105    }
  59.106  
  59.107 +  ReplacedNodes replaced_nodes = map()->replaced_nodes();
  59.108 +  ReplacedNodes replaced_nodes_exception;
  59.109 +  Node* ex_ctl = top();
  59.110 +
  59.111    SafePointNode* final_state = stop();
  59.112  
  59.113    // Find all the needed outputs of this call
  59.114 @@ -1877,6 +1837,10 @@
  59.115      C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
  59.116    }
  59.117    if (callprojs.fallthrough_memproj != NULL) {
  59.118 +    if (final_mem->is_MergeMem()) {
  59.119 +      // Parser's exits MergeMem was not transformed but may be optimized
  59.120 +      final_mem = _gvn.transform(final_mem);
  59.121 +    }
  59.122      C->gvn_replace_by(callprojs.fallthrough_memproj,   final_mem);
  59.123    }
  59.124    if (callprojs.fallthrough_ioproj != NULL) {
  59.125 @@ -1908,10 +1872,13 @@
  59.126  
  59.127      // Load my combined exception state into the kit, with all phis transformed:
  59.128      SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
  59.129 +    replaced_nodes_exception = ex_map->replaced_nodes();
  59.130  
  59.131      Node* ex_oop = ekit.use_exception_state(ex_map);
  59.132 +
  59.133      if (callprojs.catchall_catchproj != NULL) {
  59.134        C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
  59.135 +      ex_ctl = ekit.control();
  59.136      }
  59.137      if (callprojs.catchall_memproj != NULL) {
  59.138        C->gvn_replace_by(callprojs.catchall_memproj,   ekit.reset_memory());
  59.139 @@ -1944,6 +1911,13 @@
  59.140        _gvn.transform(wl.pop());
  59.141      }
  59.142    }
  59.143 +
  59.144 +  if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
  59.145 +    replaced_nodes.apply(C, final_ctl);
  59.146 +  }
  59.147 +  if (!ex_ctl->is_top() && do_replaced_nodes) {
  59.148 +    replaced_nodes_exception.apply(C, ex_ctl);
  59.149 +  }
  59.150  }
  59.151  
  59.152  
    60.1 --- a/src/share/vm/opto/graphKit.hpp	Fri Aug 22 13:24:04 2014 +0200
    60.2 +++ b/src/share/vm/opto/graphKit.hpp	Tue Aug 26 13:38:33 2014 -0700
    60.3 @@ -685,7 +685,7 @@
    60.4    // Replace the call with the current state of the kit.  Requires
    60.5    // that the call was generated with separate io_projs so that
    60.6    // exceptional control flow can be handled properly.
    60.7 -  void replace_call(CallNode* call, Node* result);
    60.8 +  void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false);
    60.9  
   60.10    // helper functions for statistics
   60.11    void increment_counter(address counter_addr);   // increment a debug counter
    61.1 --- a/src/share/vm/opto/ifnode.cpp	Fri Aug 22 13:24:04 2014 +0200
    61.2 +++ b/src/share/vm/opto/ifnode.cpp	Tue Aug 26 13:38:33 2014 -0700
    61.3 @@ -503,7 +503,7 @@
    61.4    jint  off = 0;
    61.5    if (l->is_top()) {
    61.6      return 0;
    61.7 -  } else if (l->is_Add()) {
    61.8 +  } else if (l->Opcode() == Op_AddI) {
    61.9      if ((off = l->in(1)->find_int_con(0)) != 0) {
   61.10        ind = l->in(2);
   61.11      } else if ((off = l->in(2)->find_int_con(0)) != 0) {
    62.1 --- a/src/share/vm/opto/library_call.cpp	Fri Aug 22 13:24:04 2014 +0200
    62.2 +++ b/src/share/vm/opto/library_call.cpp	Tue Aug 26 13:38:33 2014 -0700
    62.3 @@ -66,7 +66,7 @@
    62.4    virtual bool is_predicated() const { return _predicates_count > 0; }
    62.5    virtual int  predicates_count() const { return _predicates_count; }
    62.6    virtual bool does_virtual_dispatch()   const { return _does_virtual_dispatch; }
    62.7 -  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
    62.8 +  virtual JVMState* generate(JVMState* jvms);
    62.9    virtual Node* generate_predicate(JVMState* jvms, int predicate);
   62.10    vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
   62.11  };
   62.12 @@ -614,7 +614,7 @@
   62.13    // Nothing to do here.
   62.14  }
   62.15  
   62.16 -JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
   62.17 +JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
   62.18    LibraryCallKit kit(jvms, this);
   62.19    Compile* C = kit.C;
   62.20    int nodes = C->unique();
    63.1 --- a/src/share/vm/opto/node.cpp	Fri Aug 22 13:24:04 2014 +0200
    63.2 +++ b/src/share/vm/opto/node.cpp	Tue Aug 26 13:38:33 2014 -0700
    63.3 @@ -527,6 +527,9 @@
    63.4    if (n->is_Call()) {
    63.5      n->as_Call()->clone_jvms(C);
    63.6    }
    63.7 +  if (n->is_SafePoint()) {
    63.8 +    n->as_SafePoint()->clone_replaced_nodes();
    63.9 +  }
   63.10    return n;                     // Return the clone
   63.11  }
   63.12  
   63.13 @@ -622,6 +625,9 @@
   63.14    if (is_expensive()) {
   63.15      compile->remove_expensive_node(this);
   63.16    }
   63.17 +  if (is_SafePoint()) {
   63.18 +    as_SafePoint()->delete_replaced_nodes();
   63.19 +  }
   63.20  #ifdef ASSERT
   63.21    // We will not actually delete the storage, but we'll make the node unusable.
   63.22    *(address*)this = badAddress;  // smash the C++ vtbl, probably
    64.1 --- a/src/share/vm/opto/parse.hpp	Fri Aug 22 13:24:04 2014 +0200
    64.2 +++ b/src/share/vm/opto/parse.hpp	Tue Aug 26 13:38:33 2014 -0700
    64.3 @@ -357,12 +357,13 @@
    64.4    int _est_switch_depth;        // Debugging SwitchRanges.
    64.5  #endif
    64.6  
    64.7 -  // parser for the caller of the method of this object
    64.8 -  Parse* const _parent;
    64.9 +  bool         _first_return;                  // true if return is the first to be parsed
   64.10 +  bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
   64.11 +  uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
   64.12  
   64.13   public:
   64.14    // Constructor
   64.15 -  Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent);
   64.16 +  Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
   64.17  
   64.18    virtual Parse* is_Parse() const { return (Parse*)this; }
   64.19  
   64.20 @@ -419,8 +420,6 @@
   64.21      return block()->successor_for_bci(bci);
   64.22    }
   64.23  
   64.24 -  Parse* parent_parser() const { return _parent; }
   64.25 -
   64.26   private:
   64.27    // Create a JVMS & map for the initial state of this method.
   64.28    SafePointNode* create_entry_map();
    65.1 --- a/src/share/vm/opto/parse1.cpp	Fri Aug 22 13:24:04 2014 +0200
    65.2 +++ b/src/share/vm/opto/parse1.cpp	Tue Aug 26 13:38:33 2014 -0700
    65.3 @@ -381,8 +381,8 @@
    65.4  
    65.5  //------------------------------Parse------------------------------------------
    65.6  // Main parser constructor.
    65.7 -Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent)
    65.8 -  : _exits(caller), _parent(parent)
    65.9 +Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
   65.10 +  : _exits(caller)
   65.11  {
   65.12    // Init some variables
   65.13    _caller = caller;
   65.14 @@ -395,6 +395,9 @@
   65.15    _entry_bci = InvocationEntryBci;
   65.16    _tf = NULL;
   65.17    _block = NULL;
   65.18 +  _first_return = true;
   65.19 +  _replaced_nodes_for_exceptions = false;
   65.20 +  _new_idx = C->unique();
   65.21    debug_only(_block_count = -1);
   65.22    debug_only(_blocks = (Block*)-1);
   65.23  #ifndef PRODUCT
   65.24 @@ -895,6 +898,10 @@
   65.25    for (uint i = 0; i < TypeFunc::Parms; i++) {
   65.26      caller.map()->set_req(i, ex_map->in(i));
   65.27    }
   65.28 +  if (ex_map->has_replaced_nodes()) {
   65.29 +    _replaced_nodes_for_exceptions = true;
   65.30 +  }
   65.31 +  caller.map()->transfer_replaced_nodes_from(ex_map, _new_idx);
   65.32    // ...and the exception:
   65.33    Node*          ex_oop        = saved_ex_oop(ex_map);
   65.34    SafePointNode* caller_ex_map = caller.make_exception_state(ex_oop);
   65.35 @@ -963,7 +970,7 @@
   65.36    bool do_synch = method()->is_synchronized() && GenerateSynchronizationCode;
   65.37  
   65.38    // record exit from a method if compiled while Dtrace is turned on.
   65.39 -  if (do_synch || C->env()->dtrace_method_probes()) {
   65.40 +  if (do_synch || C->env()->dtrace_method_probes() || _replaced_nodes_for_exceptions) {
   65.41      // First move the exception list out of _exits:
   65.42      GraphKit kit(_exits.transfer_exceptions_into_jvms());
   65.43      SafePointNode* normal_map = kit.map();  // keep this guy safe
   65.44 @@ -988,6 +995,9 @@
   65.45        if (C->env()->dtrace_method_probes()) {
   65.46          kit.make_dtrace_method_exit(method());
   65.47        }
   65.48 +      if (_replaced_nodes_for_exceptions) {
   65.49 +        kit.map()->apply_replaced_nodes();
   65.50 +      }
   65.51        // Done with exception-path processing.
   65.52        ex_map = kit.make_exception_state(ex_oop);
   65.53        assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity");
   65.54 @@ -1007,6 +1017,7 @@
   65.55        _exits.add_exception_state(ex_map);
   65.56      }
   65.57    }
   65.58 +  _exits.map()->apply_replaced_nodes();
   65.59  }
   65.60  
   65.61  //-----------------------------create_entry_map-------------------------------
   65.62 @@ -1021,6 +1032,9 @@
   65.63      return NULL;
   65.64    }
   65.65  
   65.66 +  // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
   65.67 +  _caller->map()->delete_replaced_nodes();
   65.68 +
   65.69    // If this is an inlined method, we may have to do a receiver null check.
   65.70    if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
   65.71      GraphKit kit(_caller);
   65.72 @@ -1044,6 +1058,8 @@
   65.73  
   65.74    SafePointNode* inmap = _caller->map();
   65.75    assert(inmap != NULL, "must have inmap");
   65.76 +  // In case of null check on receiver above
   65.77 +  map()->transfer_replaced_nodes_from(inmap, _new_idx);
   65.78  
   65.79    uint i;
   65.80  
   65.81 @@ -1673,6 +1689,8 @@
   65.82        set_control(r->nonnull_req());
   65.83      }
   65.84  
   65.85 +    map()->merge_replaced_nodes_with(newin);
   65.86 +
   65.87      // newin has been subsumed into the lazy merge, and is now dead.
   65.88      set_block(save_block);
   65.89  
   65.90 @@ -2077,6 +2095,13 @@
   65.91      phi->add_req(value);
   65.92    }
   65.93  
   65.94 +  if (_first_return) {
   65.95 +    _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
   65.96 +    _first_return = false;
   65.97 +  } else {
   65.98 +    _exits.map()->merge_replaced_nodes_with(map());
   65.99 +  }
  65.100 +
  65.101    stop_and_kill_map();          // This CFG path dies here
  65.102  }
  65.103  
    66.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    66.2 +++ b/src/share/vm/opto/replacednodes.cpp	Tue Aug 26 13:38:33 2014 -0700
    66.3 @@ -0,0 +1,219 @@
    66.4 +/*
    66.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    66.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    66.7 + *
    66.8 + * This code is free software; you can redistribute it and/or modify it
    66.9 + * under the terms of the GNU General Public License version 2 only, as
   66.10 + * published by the Free Software Foundation.
   66.11 + *
   66.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   66.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   66.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   66.15 + * version 2 for more details (a copy is included in the LICENSE file that
   66.16 + * accompanied this code).
   66.17 + *
   66.18 + * You should have received a copy of the GNU General Public License version
   66.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   66.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   66.21 + *
   66.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   66.23 + * or visit www.oracle.com if you need additional information or have any
   66.24 + * questions.
   66.25 + *
   66.26 + */
   66.27 +
   66.28 +#include "precompiled.hpp"
   66.29 +#include "opto/cfgnode.hpp"
   66.30 +#include "opto/phaseX.hpp"
   66.31 +#include "opto/replacednodes.hpp"
   66.32 +
   66.33 +void ReplacedNodes::allocate_if_necessary() {
   66.34 +  if (_replaced_nodes == NULL) {
   66.35 +    _replaced_nodes = new GrowableArray<ReplacedNode>();
   66.36 +  }
   66.37 +}
   66.38 +
   66.39 +bool ReplacedNodes::is_empty() const {
   66.40 +  return _replaced_nodes == NULL || _replaced_nodes->length() == 0;
   66.41 +}
   66.42 +
   66.43 +bool ReplacedNodes::has_node(const ReplacedNode& r) const {
   66.44 +  return _replaced_nodes->find(r) != -1;
   66.45 +}
   66.46 +
   66.47 +bool ReplacedNodes::has_target_node(Node* n) const {
   66.48 +  for (int i = 0; i < _replaced_nodes->length(); i++) {
   66.49 +    if (_replaced_nodes->at(i).improved() == n) {
   66.50 +      return true;
   66.51 +    }
   66.52 +  }
   66.53 +  return false;
   66.54 +}
   66.55 +
   66.56 +// Record replaced node if not seen before
   66.57 +void ReplacedNodes::record(Node* initial, Node* improved) {
   66.58 +  allocate_if_necessary();
   66.59 +  ReplacedNode r(initial, improved);
   66.60 +  if (!has_node(r)) {
   66.61 +    _replaced_nodes->push(r);
   66.62 +  }
   66.63 +}
   66.64 +
   66.65 +// Copy replaced nodes from one map to another. idx is used to
   66.66 +// identify nodes that are too new to be of interest in the target
   66.67 +// node list.
   66.68 +void ReplacedNodes::transfer_from(const ReplacedNodes& other, uint idx) {
   66.69 +  if (other.is_empty()) {
   66.70 +    return;
   66.71 +  }
   66.72 +  allocate_if_necessary();
   66.73 +  for (int i = 0; i < other._replaced_nodes->length(); i++) {
   66.74 +    ReplacedNode replaced = other._replaced_nodes->at(i);
   66.75 +    // Only transfer the nodes that can actually be useful
   66.76 +    if (!has_node(replaced) && (replaced.initial()->_idx < idx || has_target_node(replaced.initial()))) {
   66.77 +      _replaced_nodes->push(replaced);
   66.78 +    }
   66.79 +  }
   66.80 +}
   66.81 +
   66.82 +void ReplacedNodes::clone() {
   66.83 +  if (_replaced_nodes != NULL) {
   66.84 +    GrowableArray<ReplacedNode>* replaced_nodes_clone = new GrowableArray<ReplacedNode>();
   66.85 +    replaced_nodes_clone->appendAll(_replaced_nodes);
   66.86 +    _replaced_nodes = replaced_nodes_clone;
   66.87 +  }
   66.88 +}
   66.89 +
   66.90 +void ReplacedNodes::reset() {
   66.91 +  if (_replaced_nodes != NULL) {
   66.92 +    _replaced_nodes->clear();
   66.93 +  }
   66.94 +}
   66.95 +
   66.96 +// Perfom node replacement (used when returning to caller)
   66.97 +void ReplacedNodes::apply(Node* n) {
   66.98 +  if (is_empty()) {
   66.99 +    return;
  66.100 +  }
  66.101 +  for (int i = 0; i < _replaced_nodes->length(); i++) {
  66.102 +    ReplacedNode replaced = _replaced_nodes->at(i);
  66.103 +    n->replace_edge(replaced.initial(), replaced.improved());
  66.104 +  }
  66.105 +}
  66.106 +
  66.107 +static void enqueue_use(Node* n, Node* use, Unique_Node_List& work) {
  66.108 +  if (use->is_Phi()) {
  66.109 +    Node* r = use->in(0);
  66.110 +    assert(r->is_Region(), "Phi should have Region");
  66.111 +    for (uint i = 1; i < use->req(); i++) {
  66.112 +      if (use->in(i) == n) {
  66.113 +        work.push(r->in(i));
  66.114 +      }
  66.115 +    }
  66.116 +  } else {
  66.117 +    work.push(use);
  66.118 +  }
  66.119 +}
  66.120 +
  66.121 +// Perfom node replacement following late inlining
  66.122 +void ReplacedNodes::apply(Compile* C, Node* ctl) {
  66.123 +  // ctl is the control on exit of the method that was late inlined
  66.124 +  if (is_empty()) {
  66.125 +    return;
  66.126 +  }
  66.127 +  for (int i = 0; i < _replaced_nodes->length(); i++) {
  66.128 +    ReplacedNode replaced = _replaced_nodes->at(i);
  66.129 +    Node* initial = replaced.initial();
  66.130 +    Node* improved = replaced.improved();
  66.131 +    assert (ctl != NULL && !ctl->is_top(), "replaced node should have actual control");
  66.132 +
  66.133 +    ResourceMark rm;
  66.134 +    Unique_Node_List work;
  66.135 +    // Go over all the uses of the node that is considered for replacement...
  66.136 +    for (DUIterator j = initial->outs(); initial->has_out(j); j++) {
  66.137 +      Node* use = initial->out(j);
  66.138 +
  66.139 +      if (use == improved || use->outcnt() == 0) {
  66.140 +        continue;
  66.141 +      }
  66.142 +      work.clear();
  66.143 +      enqueue_use(initial, use, work);
  66.144 +      bool replace = true;
  66.145 +      // Check that this use is dominated by ctl. Go ahead with the
  66.146 +      // replacement if it is.
  66.147 +      while (work.size() != 0 && replace) {
  66.148 +        Node* n = work.pop();
  66.149 +        if (use->outcnt() == 0) {
  66.150 +          continue;
  66.151 +        }
  66.152 +        if (n->is_CFG() || (n->in(0) != NULL && !n->in(0)->is_top())) {
  66.153 +          int depth = 0;
  66.154 +          Node *m = n;
  66.155 +          if (!n->is_CFG()) {
  66.156 +            n = n->in(0);
  66.157 +          }
  66.158 +          assert(n->is_CFG(), "should be CFG now");
  66.159 +          while(n != ctl) {
  66.160 +            n = IfNode::up_one_dom(n);
  66.161 +            depth++;
  66.162 +            // limit search depth
  66.163 +            if (depth >= 100 || n == NULL) {
  66.164 +              replace = false;
  66.165 +              break;
  66.166 +            }
  66.167 +          }
  66.168 +        } else {
  66.169 +          for (DUIterator k = n->outs(); n->has_out(k); k++) {
  66.170 +            enqueue_use(n, n->out(k), work);
  66.171 +          }
  66.172 +        }
  66.173 +      }
  66.174 +      if (replace) {
  66.175 +        bool is_in_table = C->initial_gvn()->hash_delete(use);
  66.176 +        int replaced = use->replace_edge(initial, improved);
  66.177 +        if (is_in_table) {
  66.178 +          C->initial_gvn()->hash_find_insert(use);
  66.179 +        }
  66.180 +        C->record_for_igvn(use);
  66.181 +
  66.182 +        assert(replaced > 0, "inconsistent");
  66.183 +        --j;
  66.184 +      }
  66.185 +    }
  66.186 +  }
  66.187 +}
  66.188 +
  66.189 +void ReplacedNodes::dump(outputStream *st) const {
  66.190 +  if (!is_empty()) {
  66.191 +    tty->print("replaced nodes: ");
  66.192 +    for (int i = 0; i < _replaced_nodes->length(); i++) {
  66.193 +      tty->print("%d->%d", _replaced_nodes->at(i).initial()->_idx, _replaced_nodes->at(i).improved()->_idx);
  66.194 +      if (i < _replaced_nodes->length()-1) {
  66.195 +        tty->print(",");
  66.196 +      }
  66.197 +    }
  66.198 +  }
  66.199 +}
  66.200 +
  66.201 +// Merge 2 list of replaced node at a point where control flow paths merge
  66.202 +void ReplacedNodes::merge_with(const ReplacedNodes& other) {
  66.203 +  if (is_empty()) {
  66.204 +    return;
  66.205 +  }
  66.206 +  if (other.is_empty()) {
  66.207 +    reset();
  66.208 +    return;
  66.209 +  }
  66.210 +  int shift = 0;
  66.211 +  int len = _replaced_nodes->length();
  66.212 +  for (int i = 0; i < len; i++) {
  66.213 +    if (!other.has_node(_replaced_nodes->at(i))) {
  66.214 +      shift++;
  66.215 +    } else if (shift > 0) {
  66.216 +      _replaced_nodes->at_put(i-shift, _replaced_nodes->at(i));
  66.217 +    }
  66.218 +  }
  66.219 +  if (shift > 0) {
  66.220 +    _replaced_nodes->trunc_to(len - shift);
  66.221 +  }
  66.222 +}
    67.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    67.2 +++ b/src/share/vm/opto/replacednodes.hpp	Tue Aug 26 13:38:33 2014 -0700
    67.3 @@ -0,0 +1,81 @@
    67.4 +/*
    67.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    67.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    67.7 + *
    67.8 + * This code is free software; you can redistribute it and/or modify it
    67.9 + * under the terms of the GNU General Public License version 2 only, as
   67.10 + * published by the Free Software Foundation.
   67.11 + *
   67.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   67.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   67.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   67.15 + * version 2 for more details (a copy is included in the LICENSE file that
   67.16 + * accompanied this code).
   67.17 + *
   67.18 + * You should have received a copy of the GNU General Public License version
   67.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   67.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   67.21 + *
   67.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   67.23 + * or visit www.oracle.com if you need additional information or have any
   67.24 + * questions.
   67.25 + *
   67.26 + */
   67.27 +
   67.28 +#ifndef SHARE_VM_OPTO_REPLACEDNODES_HPP
   67.29 +#define SHARE_VM_OPTO_REPLACEDNODES_HPP
   67.30 +
   67.31 +#include "opto/connode.hpp"
   67.32 +
   67.33 +// During parsing, when a node is "improved",
   67.34 +// GraphKit::replace_in_map() is called to update the current map so
   67.35 +// that the improved node is used from that point
   67.36 +// on. GraphKit::replace_in_map() doesn't operate on the callers maps
   67.37 +// and so some optimization opportunities may be lost. The
   67.38 +// ReplacedNodes class addresses that problem.
   67.39 +//
   67.40 +// A ReplacedNodes object is a list of pair of nodes. Every
   67.41 +// SafePointNode carries a ReplacedNodes object. Every time
   67.42 +// GraphKit::replace_in_map() is called, a new pair of nodes is pushed
   67.43 +// on the list of replaced nodes. When control flow paths merge, their
   67.44 +// replaced nodes are also merged. When parsing exits a method to
   67.45 +// return to a caller, the replaced nodes on the exit path are used to
   67.46 +// update the caller's map.
   67.47 +class ReplacedNodes VALUE_OBJ_CLASS_SPEC {
   67.48 + private:
   67.49 +  class ReplacedNode VALUE_OBJ_CLASS_SPEC {
   67.50 +  private:
   67.51 +    Node* _initial;
   67.52 +    Node* _improved;
   67.53 +  public:
   67.54 +    ReplacedNode() : _initial(NULL), _improved(NULL) {}
   67.55 +    ReplacedNode(Node* initial, Node* improved) : _initial(initial), _improved(improved) {}
   67.56 +    Node* initial() const  { return _initial; }
   67.57 +    Node* improved() const { return _improved; }
   67.58 +
   67.59 +    bool operator==(const ReplacedNode& other) {
   67.60 +      return _initial == other._initial && _improved == other._improved;
   67.61 +    }
   67.62 +  };
   67.63 +  GrowableArray<ReplacedNode>* _replaced_nodes;
   67.64 +
   67.65 +  void allocate_if_necessary();
   67.66 +  bool has_node(const ReplacedNode& r) const;
   67.67 +  bool has_target_node(Node* n) const;
   67.68 +
   67.69 + public:
   67.70 +  ReplacedNodes()
   67.71 +    : _replaced_nodes(NULL) {}
   67.72 +
   67.73 +  void clone();
   67.74 +  void record(Node* initial, Node* improved);
   67.75 +  void transfer_from(const ReplacedNodes& other, uint idx);
   67.76 +  void reset();
   67.77 +  void apply(Node* n);
   67.78 +  void merge_with(const ReplacedNodes& other);
   67.79 +  bool is_empty() const;
   67.80 +  void dump(outputStream *st) const;
   67.81 +  void apply(Compile* C, Node* ctl);
   67.82 +};
   67.83 +
   67.84 +#endif // SHARE_VM_OPTO_REPLACEDNODES_HPP
    68.1 --- a/src/share/vm/prims/jni.cpp	Fri Aug 22 13:24:04 2014 +0200
    68.2 +++ b/src/share/vm/prims/jni.cpp	Tue Aug 26 13:38:33 2014 -0700
    68.3 @@ -292,15 +292,6 @@
    68.4        "Bug in native code: jfieldID offset must address interior of object");
    68.5  }
    68.6  
    68.7 -// Pick a reasonable higher bound for local capacity requested
    68.8 -// for EnsureLocalCapacity and PushLocalFrame.  We don't want it too
    68.9 -// high because a test (or very unusual application) may try to allocate
   68.10 -// that many handles and run out of swap space.  An implementation is
   68.11 -// permitted to allocate more handles than the ensured capacity, so this
   68.12 -// value is set high enough to prevent compatibility problems.
   68.13 -const int MAX_REASONABLE_LOCAL_CAPACITY = 4*K;
   68.14 -
   68.15 -
   68.16  // Wrapper to trace JNI functions
   68.17  
   68.18  #ifdef ASSERT
   68.19 @@ -880,7 +871,8 @@
   68.20                                     env, capacity);
   68.21  #endif /* USDT2 */
   68.22    //%note jni_11
   68.23 -  if (capacity < 0 || capacity > MAX_REASONABLE_LOCAL_CAPACITY) {
   68.24 +  if (capacity < 0 ||
   68.25 +      ((MaxJNILocalCapacity > 0) && (capacity > MaxJNILocalCapacity))) {
   68.26  #ifndef USDT2
   68.27      DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, JNI_ERR);
   68.28  #else /* USDT2 */
   68.29 @@ -1039,7 +1031,8 @@
   68.30                                          env, capacity);
   68.31  #endif /* USDT2 */
   68.32    jint ret;
   68.33 -  if (capacity >= 0 && capacity <= MAX_REASONABLE_LOCAL_CAPACITY) {
   68.34 +  if (capacity >= 0 &&
   68.35 +      ((MaxJNILocalCapacity <= 0) || (capacity <= MaxJNILocalCapacity))) {
   68.36      ret = JNI_OK;
   68.37    } else {
   68.38      ret = JNI_ERR;
   68.39 @@ -5089,6 +5082,7 @@
   68.40  void TestG1BiasedArray_test();
   68.41  void TestBufferingOopClosure_test();
   68.42  void TestCodeCacheRemSet_test();
   68.43 +void FreeRegionList_test();
   68.44  #endif
   68.45  
   68.46  void execute_internal_vm_tests() {
   68.47 @@ -5119,6 +5113,9 @@
   68.48      run_unit_test(HeapRegionRemSet::test_prt());
   68.49      run_unit_test(TestBufferingOopClosure_test());
   68.50      run_unit_test(TestCodeCacheRemSet_test());
   68.51 +    if (UseG1GC) {
   68.52 +      run_unit_test(FreeRegionList_test());
   68.53 +    }
   68.54  #endif
   68.55      tty->print_cr("All internal VM tests passed");
   68.56    }
    69.1 --- a/src/share/vm/prims/whitebox.cpp	Fri Aug 22 13:24:04 2014 +0200
    69.2 +++ b/src/share/vm/prims/whitebox.cpp	Tue Aug 26 13:38:33 2014 -0700
    69.3 @@ -231,7 +231,7 @@
    69.4  
    69.5  WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
    69.6    G1CollectedHeap* g1 = G1CollectedHeap::heap();
    69.7 -  size_t nr = g1->free_regions();
    69.8 +  size_t nr = g1->num_free_regions();
    69.9    return (jlong)nr;
   69.10  WB_END
   69.11  
    70.1 --- a/src/share/vm/runtime/arguments.cpp	Fri Aug 22 13:24:04 2014 +0200
    70.2 +++ b/src/share/vm/runtime/arguments.cpp	Tue Aug 26 13:38:33 2014 -0700
    70.3 @@ -3800,10 +3800,6 @@
    70.4      // nothing to use the profiling, turn if off
    70.5      FLAG_SET_DEFAULT(TypeProfileLevel, 0);
    70.6    }
    70.7 -  if (UseTypeSpeculation && FLAG_IS_DEFAULT(ReplaceInParentMaps)) {
    70.8 -    // Doing the replace in parent maps helps speculation
    70.9 -    FLAG_SET_DEFAULT(ReplaceInParentMaps, true);
   70.10 -  }
   70.11  #endif
   70.12  
   70.13    if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) {
    71.1 --- a/src/share/vm/runtime/globals.hpp	Fri Aug 22 13:24:04 2014 +0200
    71.2 +++ b/src/share/vm/runtime/globals.hpp	Tue Aug 26 13:38:33 2014 -0700
    71.3 @@ -1216,6 +1216,11 @@
    71.4    product(bool, UseFastJNIAccessors, true,                                  \
    71.5            "Use optimized versions of Get<Primitive>Field")                  \
    71.6                                                                              \
    71.7 +  product(intx, MaxJNILocalCapacity, 65536,                                 \
    71.8 +          "Maximum allowable local JNI handle capacity to "                 \
    71.9 +          "EnsureLocalCapacity() and PushLocalFrame(), "                    \
   71.10 +          "where <= 0 is unlimited, default: 65536")                        \
   71.11 +                                                                            \
   71.12    product(bool, EagerXrunInit, false,                                       \
   71.13            "Eagerly initialize -Xrun libraries; allows startup profiling, "  \
   71.14            "but not all -Xrun libraries may support the state of the VM "    \
   71.15 @@ -1940,6 +1945,10 @@
   71.16            "not just one of the generations (e.g., G1). A value of 0 "       \
   71.17            "denotes 'do constant GC cycles'.")                               \
   71.18                                                                              \
   71.19 +  manageable(intx, CMSTriggerInterval, -1,                                  \
   71.20 +          "Commence a CMS collection cycle (at least) every so many "       \
   71.21 +          "milliseconds (0 permanently, -1 disabled)")                      \
   71.22 +                                                                            \
   71.23    product(bool, UseCMSInitiatingOccupancyOnly, false,                       \
   71.24            "Only use occupancy as a criterion for starting a CMS collection")\
   71.25                                                                              \
    72.1 --- a/src/share/vm/utilities/growableArray.hpp	Fri Aug 22 13:24:04 2014 +0200
    72.2 +++ b/src/share/vm/utilities/growableArray.hpp	Tue Aug 26 13:38:33 2014 -0700
    72.3 @@ -349,6 +349,7 @@
    72.4  
    72.5    // inserts the given element before the element at index i
    72.6    void insert_before(const int idx, const E& elem) {
    72.7 +    assert(0 <= idx && idx <= _len, "illegal index");
    72.8      check_nesting();
    72.9      if (_len == _max) grow(_len);
   72.10      for (int j = _len - 1; j >= idx; j--) {
   72.11 @@ -360,7 +361,7 @@
   72.12  
   72.13    void appendAll(const GrowableArray<E>* l) {
   72.14      for (int i = 0; i < l->_len; i++) {
   72.15 -      raw_at_put_grow(_len, l->_data[i], 0);
   72.16 +      raw_at_put_grow(_len, l->_data[i], E());
   72.17      }
   72.18    }
   72.19  
    73.1 --- a/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Fri Aug 22 13:24:04 2014 +0200
    73.2 +++ b/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Tue Aug 26 13:38:33 2014 -0700
    73.3 @@ -45,6 +45,7 @@
    73.4    private static OutputAnalyzer run(boolean enableUnloading) throws Exception {
    73.5      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
    73.6        "-Xbootclasspath/a:.",
    73.7 +      "-XX:+UnlockDiagnosticVMOptions",
    73.8        "-XX:+WhiteBoxAPI",
    73.9        "-XX:MetaspaceSize=" + MetaspaceSize,
   73.10        "-Xmn" + YoungGenSize,
    74.1 --- a/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Fri Aug 22 13:24:04 2014 +0200
    74.2 +++ b/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Tue Aug 26 13:38:33 2014 -0700
    74.3 @@ -45,6 +45,7 @@
    74.4    private static OutputAnalyzer run(boolean enableUnloading) throws Exception {
    74.5      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
    74.6        "-Xbootclasspath/a:.",
    74.7 +      "-XX:+UnlockDiagnosticVMOptions",
    74.8        "-XX:+WhiteBoxAPI",
    74.9        "-XX:MetaspaceSize=" + MetaspaceSize,
   74.10        "-Xmn" + YoungGenSize,
    75.1 --- a/test/gc/g1/TestEagerReclaimHumongousRegions2.java	Fri Aug 22 13:24:04 2014 +0200
    75.2 +++ b/test/gc/g1/TestEagerReclaimHumongousRegions2.java	Tue Aug 26 13:38:33 2014 -0700
    75.3 @@ -46,6 +46,8 @@
    75.4  }
    75.5  
    75.6  class ReclaimRegionFast {
    75.7 +    public static final long MAX_MILLIS_FOR_RUN = 50 * 1000; // The maximum runtime for the actual test.
    75.8 +
    75.9      public static final int M = 1024*1024;
   75.10  
   75.11      public static LinkedList<Object> garbageList = new LinkedList<Object>();
   75.12 @@ -83,7 +85,14 @@
   75.13  
   75.14          Object ref_from_stack = large1;
   75.15  
   75.16 +        long start_millis = System.currentTimeMillis();
   75.17 +
   75.18          for (int i = 0; i < 20; i++) {
   75.19 +            long current_millis = System.currentTimeMillis();
   75.20 +            if ((current_millis - start_millis) > MAX_MILLIS_FOR_RUN) {
   75.21 +              System.out.println("Finishing test because maximum runtime exceeded");
   75.22 +              break;
   75.23 +            }
   75.24              // A set of large objects that will be reclaimed eagerly - and hopefully marked.
   75.25              large1 = new int[M - 20];
   75.26              large2 = new int[M - 20];

mercurial