Merge

Mon, 26 Apr 2010 18:01:55 -0400

author
tonyp
date
Mon, 26 Apr 2010 18:01:55 -0400
changeset 1830
454ff03c0daf
parent 1821
ba07d5be2d51
parent 1829
1316cec51b4d
child 1834
90acda19b80f
child 1843
615a9d95d265

Merge

     1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Wed Apr 21 01:13:15 2010 -0700
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Mon Apr 26 18:01:55 2010 -0400
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * Copyright 2007-2010 Sun Microsystems, Inc.  All Rights Reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -32,11 +32,10 @@
    1.11    ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; }
    1.12  
    1.13    void initialize_gc_policy_counters();
    1.14 -#if 1
    1.15 +
    1.16    virtual void initialize_size_policy(size_t init_eden_size,
    1.17                                        size_t init_promo_size,
    1.18                                        size_t init_survivor_size);
    1.19 -#endif
    1.20  
    1.21    // Returns true if the incremental mode is enabled.
    1.22    virtual bool has_soft_ended_eden();
     2.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Apr 21 01:13:15 2010 -0700
     2.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Apr 26 18:01:55 2010 -0400
     2.3 @@ -1,5 +1,5 @@
     2.4  /*
     2.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     2.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
     2.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.8   *
     2.9   * This code is free software; you can redistribute it and/or modify it
    2.10 @@ -1815,8 +1815,19 @@
    2.11      do_compaction_work(clear_all_soft_refs);
    2.12  
    2.13      // Has the GC time limit been exceeded?
    2.14 -    check_gc_time_limit();
    2.15 -
    2.16 +    DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
    2.17 +    size_t max_eden_size = young_gen->max_capacity() -
    2.18 +                           young_gen->to()->capacity() -
    2.19 +                           young_gen->from()->capacity();
    2.20 +    GenCollectedHeap* gch = GenCollectedHeap::heap();
    2.21 +    GCCause::Cause gc_cause = gch->gc_cause();
    2.22 +    size_policy()->check_gc_overhead_limit(_young_gen->used(),
    2.23 +                                           young_gen->eden()->used(),
    2.24 +                                           _cmsGen->max_capacity(),
    2.25 +                                           max_eden_size,
    2.26 +                                           full,
    2.27 +                                           gc_cause,
    2.28 +                                           gch->collector_policy());
    2.29    } else {
    2.30      do_mark_sweep_work(clear_all_soft_refs, first_state,
    2.31        should_start_over);
    2.32 @@ -1828,55 +1839,6 @@
    2.33    return;
    2.34  }
    2.35  
    2.36 -void CMSCollector::check_gc_time_limit() {
    2.37 -
    2.38 -  // Ignore explicit GC's.  Exiting here does not set the flag and
    2.39 -  // does not reset the count.  Updating of the averages for system
    2.40 -  // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
    2.41 -  GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
    2.42 -  if (GCCause::is_user_requested_gc(gc_cause) ||
    2.43 -      GCCause::is_serviceability_requested_gc(gc_cause)) {
    2.44 -    return;
    2.45 -  }
    2.46 -
    2.47 -  // Calculate the fraction of the CMS generation was freed during
    2.48 -  // the last collection.
    2.49 -  // Only consider the STW compacting cost for now.
    2.50 -  //
    2.51 -  // Note that the gc time limit test only works for the collections
    2.52 -  // of the young gen + tenured gen and not for collections of the
    2.53 -  // permanent gen.  That is because the calculation of the space
    2.54 -  // freed by the collection is the free space in the young gen +
    2.55 -  // tenured gen.
    2.56 -
    2.57 -  double fraction_free =
    2.58 -    ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
    2.59 -  if ((100.0 * size_policy()->compacting_gc_cost()) >
    2.60 -         ((double) GCTimeLimit) &&
    2.61 -        ((fraction_free * 100) < GCHeapFreeLimit)) {
    2.62 -    size_policy()->inc_gc_time_limit_count();
    2.63 -    if (UseGCOverheadLimit &&
    2.64 -        (size_policy()->gc_time_limit_count() >
    2.65 -         AdaptiveSizePolicyGCTimeLimitThreshold)) {
    2.66 -      size_policy()->set_gc_time_limit_exceeded(true);
    2.67 -      // Avoid consecutive OOM due to the gc time limit by resetting
    2.68 -      // the counter.
    2.69 -      size_policy()->reset_gc_time_limit_count();
    2.70 -      if (PrintGCDetails) {
    2.71 -        gclog_or_tty->print_cr("      GC is exceeding overhead limit "
    2.72 -          "of %d%%", GCTimeLimit);
    2.73 -      }
    2.74 -    } else {
    2.75 -      if (PrintGCDetails) {
    2.76 -        gclog_or_tty->print_cr("      GC would exceed overhead limit "
    2.77 -          "of %d%%", GCTimeLimit);
    2.78 -      }
    2.79 -    }
    2.80 -  } else {
    2.81 -    size_policy()->reset_gc_time_limit_count();
    2.82 -  }
    2.83 -}
    2.84 -
    2.85  // Resize the perm generation and the tenured generation
    2.86  // after obtaining the free list locks for the
    2.87  // two generations.
    2.88 @@ -6182,6 +6144,11 @@
    2.89        }
    2.90        curAddr = chunk.end();
    2.91      }
    2.92 +    // A successful mostly concurrent collection has been done.
    2.93 +    // Because only the full (i.e., concurrent mode failure) collections
    2.94 +    // are being measured for gc overhead limits, clean the "near" flag
    2.95 +    // and count.
    2.96 +    sp->reset_gc_overhead_limit_count();
    2.97      _collectorState = Idling;
    2.98    } else {
    2.99      // already have the lock
     3.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Apr 21 01:13:15 2010 -0700
     3.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Apr 26 18:01:55 2010 -0400
     3.3 @@ -1,5 +1,5 @@
     3.4  /*
     3.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
     3.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.8   *
     3.9   * This code is free software; you can redistribute it and/or modify it
    3.10 @@ -570,10 +570,6 @@
    3.11    ConcurrentMarkSweepPolicy* _collector_policy;
    3.12    ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
    3.13  
    3.14 -  // Check whether the gc time limit has been
    3.15 -  // exceeded and set the size policy flag
    3.16 -  // appropriately.
    3.17 -  void check_gc_time_limit();
    3.18    // XXX Move these to CMSStats ??? FIX ME !!!
    3.19    elapsedTimer _inter_sweep_timer;   // time between sweeps
    3.20    elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
     4.1 --- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Apr 21 01:13:15 2010 -0700
     4.2 +++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Mon Apr 26 18:01:55 2010 -0400
     4.3 @@ -1,5 +1,5 @@
     4.4  /*
     4.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     4.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
     4.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4.8   *
     4.9   * This code is free software; you can redistribute it and/or modify it
    4.10 @@ -69,9 +69,9 @@
    4.11    G1CollectorPolicy* g1p = g1h->g1_policy();
    4.12    if (g1p->adaptive_young_list_length()) {
    4.13      int regions_visited = 0;
    4.14 -    g1h->young_list_rs_length_sampling_init();
    4.15 -    while (g1h->young_list_rs_length_sampling_more()) {
    4.16 -      g1h->young_list_rs_length_sampling_next();
    4.17 +    g1h->young_list()->rs_length_sampling_init();
    4.18 +    while (g1h->young_list()->rs_length_sampling_more()) {
    4.19 +      g1h->young_list()->rs_length_sampling_next();
    4.20        ++regions_visited;
    4.21  
    4.22        // we try to yield every time we visit 10 regions
    4.23 @@ -162,6 +162,7 @@
    4.24    if (_worker_id >= cg1r()->worker_thread_num()) {
    4.25      run_young_rs_sampling();
    4.26      terminate();
    4.27 +    return;
    4.28    }
    4.29  
    4.30    _vtime_start = os::elapsedVTime();
     5.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Apr 21 01:13:15 2010 -0700
     5.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Apr 26 18:01:55 2010 -0400
     5.3 @@ -767,7 +767,8 @@
     5.4    _has_aborted = false;
     5.5  
     5.6    if (G1PrintReachableAtInitialMark) {
     5.7 -    print_reachable(true, "before");
     5.8 +    print_reachable("at-cycle-start",
     5.9 +                    true /* use_prev_marking */, true /* all */);
    5.10    }
    5.11  
    5.12    // Initialise marking structures. This has to be done in a STW phase.
    5.13 @@ -1979,19 +1980,21 @@
    5.14  
    5.15  #ifndef PRODUCT
    5.16  
    5.17 -class ReachablePrinterOopClosure: public OopClosure {
    5.18 +class PrintReachableOopClosure: public OopClosure {
    5.19  private:
    5.20    G1CollectedHeap* _g1h;
    5.21    CMBitMapRO*      _bitmap;
    5.22    outputStream*    _out;
    5.23    bool             _use_prev_marking;
    5.24 +  bool             _all;
    5.25  
    5.26  public:
    5.27 -  ReachablePrinterOopClosure(CMBitMapRO*   bitmap,
    5.28 -                             outputStream* out,
    5.29 -                             bool          use_prev_marking) :
    5.30 +  PrintReachableOopClosure(CMBitMapRO*   bitmap,
    5.31 +                           outputStream* out,
    5.32 +                           bool          use_prev_marking,
    5.33 +                           bool          all) :
    5.34      _g1h(G1CollectedHeap::heap()),
    5.35 -    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
    5.36 +    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
    5.37  
    5.38    void do_oop(narrowOop* p) { do_oop_work(p); }
    5.39    void do_oop(      oop* p) { do_oop_work(p); }
    5.40 @@ -2001,9 +2004,11 @@
    5.41      const char* str = NULL;
    5.42      const char* str2 = "";
    5.43  
    5.44 -    if (!_g1h->is_in_g1_reserved(obj))
    5.45 -      str = "outside G1 reserved";
    5.46 -    else {
    5.47 +    if (obj == NULL) {
    5.48 +      str = "";
    5.49 +    } else if (!_g1h->is_in_g1_reserved(obj)) {
    5.50 +      str = " O";
    5.51 +    } else {
    5.52        HeapRegion* hr  = _g1h->heap_region_containing(obj);
    5.53        guarantee(hr != NULL, "invariant");
    5.54        bool over_tams = false;
    5.55 @@ -2012,74 +2017,67 @@
    5.56        } else {
    5.57          over_tams = hr->obj_allocated_since_next_marking(obj);
    5.58        }
    5.59 +      bool marked = _bitmap->isMarked((HeapWord*) obj);
    5.60  
    5.61        if (over_tams) {
    5.62 -        str = "over TAMS";
    5.63 -        if (_bitmap->isMarked((HeapWord*) obj)) {
    5.64 +        str = " >";
    5.65 +        if (marked) {
    5.66            str2 = " AND MARKED";
    5.67          }
    5.68 -      } else if (_bitmap->isMarked((HeapWord*) obj)) {
    5.69 -        str = "marked";
    5.70 +      } else if (marked) {
    5.71 +        str = " M";
    5.72        } else {
    5.73 -        str = "#### NOT MARKED ####";
    5.74 +        str = " NOT";
    5.75        }
    5.76      }
    5.77  
    5.78 -    _out->print_cr("    "PTR_FORMAT" contains "PTR_FORMAT" %s%s",
    5.79 +    _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
    5.80                     p, (void*) obj, str, str2);
    5.81    }
    5.82  };
    5.83  
    5.84 -class ReachablePrinterClosure: public BitMapClosure {
    5.85 +class PrintReachableObjectClosure : public ObjectClosure {
    5.86  private:
    5.87    CMBitMapRO*   _bitmap;
    5.88    outputStream* _out;
    5.89    bool          _use_prev_marking;
    5.90 +  bool          _all;
    5.91 +  HeapRegion*   _hr;
    5.92  
    5.93  public:
    5.94 -  ReachablePrinterClosure(CMBitMapRO*   bitmap,
    5.95 -                          outputStream* out,
    5.96 -                          bool          use_prev_marking) :
    5.97 -    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
    5.98 -
    5.99 -  bool do_bit(size_t offset) {
   5.100 -    HeapWord* addr = _bitmap->offsetToHeapWord(offset);
   5.101 -    ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking);
   5.102 -
   5.103 -    _out->print_cr("  obj "PTR_FORMAT", offset %10d (marked)", addr, offset);
   5.104 -    oop(addr)->oop_iterate(&oopCl);
   5.105 -    _out->print_cr("");
   5.106 -
   5.107 -    return true;
   5.108 +  PrintReachableObjectClosure(CMBitMapRO*   bitmap,
   5.109 +                              outputStream* out,
   5.110 +                              bool          use_prev_marking,
   5.111 +                              bool          all,
   5.112 +                              HeapRegion*   hr) :
   5.113 +    _bitmap(bitmap), _out(out),
   5.114 +    _use_prev_marking(use_prev_marking), _all(all), _hr(hr) { }
   5.115 +
   5.116 +  void do_object(oop o) {
   5.117 +    bool over_tams;
   5.118 +    if (_use_prev_marking) {
   5.119 +      over_tams = _hr->obj_allocated_since_prev_marking(o);
   5.120 +    } else {
   5.121 +      over_tams = _hr->obj_allocated_since_next_marking(o);
   5.122 +    }
   5.123 +    bool marked = _bitmap->isMarked((HeapWord*) o);
   5.124 +    bool print_it = _all || over_tams || marked;
   5.125 +
   5.126 +    if (print_it) {
   5.127 +      _out->print_cr(" "PTR_FORMAT"%s",
   5.128 +                     o, (over_tams) ? " >" : (marked) ? " M" : "");
   5.129 +      PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all);
   5.130 +      o->oop_iterate(&oopCl);
   5.131 +    }
   5.132    }
   5.133  };
   5.134  
   5.135 -class ObjInRegionReachablePrinterClosure : public ObjectClosure {
   5.136 +class PrintReachableRegionClosure : public HeapRegionClosure {
   5.137  private:
   5.138    CMBitMapRO*   _bitmap;
   5.139    outputStream* _out;
   5.140    bool          _use_prev_marking;
   5.141 -
   5.142 -public:
   5.143 -  ObjInRegionReachablePrinterClosure(CMBitMapRO*   bitmap,
   5.144 -                                     outputStream* out,
   5.145 -                                     bool          use_prev_marking) :
   5.146 -    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
   5.147 -
   5.148 -  void do_object(oop o) {
   5.149 -    ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking);
   5.150 -
   5.151 -    _out->print_cr("  obj "PTR_FORMAT" (over TAMS)", (void*) o);
   5.152 -    o->oop_iterate(&oopCl);
   5.153 -    _out->print_cr("");
   5.154 -  }
   5.155 -};
   5.156 -
   5.157 -class RegionReachablePrinterClosure : public HeapRegionClosure {
   5.158 -private:
   5.159 -  CMBitMapRO*   _bitmap;
   5.160 -  outputStream* _out;
   5.161 -  bool          _use_prev_marking;
   5.162 +  bool          _all;
   5.163  
   5.164  public:
   5.165    bool doHeapRegion(HeapRegion* hr) {
   5.166 @@ -2094,22 +2092,35 @@
   5.167      }
   5.168      _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
   5.169                     "TAMS: "PTR_FORMAT, b, e, t, p);
   5.170 -    _out->print_cr("");
   5.171 -
   5.172 -    ObjInRegionReachablePrinterClosure ocl(_bitmap, _out, _use_prev_marking);
   5.173 -    hr->object_iterate_mem_careful(MemRegion(p, t), &ocl);
   5.174 +    _out->cr();
   5.175 +
   5.176 +    HeapWord* from = b;
   5.177 +    HeapWord* to   = t;
   5.178 +
   5.179 +    if (to > from) {
   5.180 +      _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
   5.181 +      _out->cr();
   5.182 +      PrintReachableObjectClosure ocl(_bitmap, _out,
   5.183 +                                      _use_prev_marking, _all, hr);
   5.184 +      hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
   5.185 +      _out->cr();
   5.186 +    }
   5.187  
   5.188      return false;
   5.189    }
   5.190  
   5.191 -  RegionReachablePrinterClosure(CMBitMapRO*   bitmap,
   5.192 -                                outputStream* out,
   5.193 -                                bool          use_prev_marking) :
   5.194 -    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
   5.195 +  PrintReachableRegionClosure(CMBitMapRO*   bitmap,
   5.196 +                              outputStream* out,
   5.197 +                              bool          use_prev_marking,
   5.198 +                              bool          all) :
   5.199 +    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
   5.200  };
   5.201  
   5.202 -void ConcurrentMark::print_reachable(bool use_prev_marking, const char* str) {
   5.203 -  gclog_or_tty->print_cr("== Doing reachable object dump... ");
   5.204 +void ConcurrentMark::print_reachable(const char* str,
   5.205 +                                     bool use_prev_marking,
   5.206 +                                     bool all) {
   5.207 +  gclog_or_tty->cr();
   5.208 +  gclog_or_tty->print_cr("== Doing heap dump... ");
   5.209  
   5.210    if (G1PrintReachableBaseFile == NULL) {
   5.211      gclog_or_tty->print_cr("  #### error: no base file defined");
   5.212 @@ -2144,19 +2155,14 @@
   5.213    out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS");
   5.214    out->cr();
   5.215  
   5.216 -  RegionReachablePrinterClosure rcl(bitmap, out, use_prev_marking);
   5.217 -  out->print_cr("--- ITERATING OVER REGIONS WITH TAMS < TOP");
   5.218 +  out->print_cr("--- ITERATING OVER REGIONS");
   5.219    out->cr();
   5.220 +  PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all);
   5.221    _g1h->heap_region_iterate(&rcl);
   5.222    out->cr();
   5.223  
   5.224 -  ReachablePrinterClosure cl(bitmap, out, use_prev_marking);
   5.225 -  out->print_cr("--- ITERATING OVER MARKED OBJECTS ON THE BITMAP");
   5.226 -  out->cr();
   5.227 -  bitmap->iterate(&cl);
   5.228 -  out->cr();
   5.229 -
   5.230    gclog_or_tty->print_cr("  done");
   5.231 +  gclog_or_tty->flush();
   5.232  }
   5.233  
   5.234  #endif // PRODUCT
     6.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Apr 21 01:13:15 2010 -0700
     6.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Mon Apr 26 18:01:55 2010 -0400
     6.3 @@ -1,5 +1,5 @@
     6.4  /*
     6.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     6.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
     6.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.8   *
     6.9   * This code is free software; you can redistribute it and/or modify it
    6.10 @@ -652,11 +652,24 @@
    6.11    // we do nothing.
    6.12    void markAndGrayObjectIfNecessary(oop p);
    6.13  
    6.14 -  // This iterates over the marking bitmap (either prev or next) and
    6.15 -  // prints out all objects that are marked on the bitmap and indicates
    6.16 -  // whether what they point to is also marked or not. It also iterates
    6.17 -  // the objects over TAMS (either prev or next).
    6.18 -  void print_reachable(bool use_prev_marking, const char* str);
    6.19 +  // It iterates over the heap and for each object it comes across it
    6.20 +  // will dump the contents of its reference fields, as well as
    6.21 +  // liveness information for the object and its referents. The dump
    6.22 +  // will be written to a file with the following name:
    6.23 +  // G1PrintReachableBaseFile + "." + str. use_prev_marking decides
    6.24 +  // whether the prev (use_prev_marking == true) or next
    6.25 +  // (use_prev_marking == false) marking information will be used to
    6.26 +  // determine the liveness of each object / referent. If all is true,
    6.27 +  // all objects in the heap will be dumped, otherwise only the live
    6.28 +  // ones. In the dump the following symbols / abbreviations are used:
    6.29 +  //   M : an explicitly live object (its bitmap bit is set)
    6.30 +  //   > : an implicitly live object (over tams)
    6.31 +  //   O : an object outside the G1 heap (typically: in the perm gen)
    6.32 +  //   NOT : a reference field whose referent is not live
    6.33 +  //   AND MARKED : indicates that an object is both explicitly and
    6.34 +  //   implicitly live (it should be one or the other, not both)
    6.35 +  void print_reachable(const char* str,
    6.36 +                       bool use_prev_marking, bool all) PRODUCT_RETURN;
    6.37  
    6.38    // Clear the next marking bitmap (will be called concurrently).
    6.39    void clearNextBitmap();
    6.40 @@ -720,6 +733,19 @@
    6.41    // to determine whether any heap regions are located above the finger.
    6.42    void registerCSetRegion(HeapRegion* hr);
    6.43  
    6.44 +  // Registers the maximum region-end associated with a set of
    6.45 +  // regions with CM. Again this is used to determine whether any
    6.46 +  // heap regions are located above the finger.
    6.47 +  void register_collection_set_finger(HeapWord* max_finger) {
    6.48 +    // max_finger is the highest heap region end of the regions currently
    6.49 +    // contained in the collection set. If this value is larger than
    6.50 +    // _min_finger then we need to gray objects.
    6.51 +    // This routine is like registerCSetRegion but for an entire
    6.52 +    // collection of regions.
    6.53 +    if (max_finger > _min_finger)
    6.54 +      _should_gray_objects = true;
    6.55 +  }
    6.56 +
    6.57    // Returns "true" if at least one mark has been completed.
    6.58    bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }
    6.59  
     7.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Apr 21 01:13:15 2010 -0700
     7.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Apr 26 18:01:55 2010 -0400
     7.3 @@ -1,5 +1,5 @@
     7.4  /*
     7.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     7.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
     7.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.8   *
     7.9   * This code is free software; you can redistribute it and/or modify it
    7.10 @@ -30,7 +30,7 @@
    7.11  // turn it on so that the contents of the young list (scan-only /
    7.12  // to-be-collected) are printed at "strategic" points before / during
    7.13  // / after the collection --- this is useful for debugging
    7.14 -#define SCAN_ONLY_VERBOSE 0
    7.15 +#define YOUNG_LIST_VERBOSE 0
    7.16  // CURRENT STATUS
    7.17  // This file is under construction.  Search for "FIXME".
    7.18  
    7.19 @@ -133,8 +133,7 @@
    7.20  
    7.21  YoungList::YoungList(G1CollectedHeap* g1h)
    7.22    : _g1h(g1h), _head(NULL),
    7.23 -    _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
    7.24 -    _length(0), _scan_only_length(0),
    7.25 +    _length(0),
    7.26      _last_sampled_rs_lengths(0),
    7.27      _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
    7.28  {
    7.29 @@ -166,48 +165,6 @@
    7.30    ++_survivor_length;
    7.31  }
    7.32  
    7.33 -HeapRegion* YoungList::pop_region() {
    7.34 -  while (_head != NULL) {
    7.35 -    assert( length() > 0, "list should not be empty" );
    7.36 -    HeapRegion* ret = _head;
    7.37 -    _head = ret->get_next_young_region();
    7.38 -    ret->set_next_young_region(NULL);
    7.39 -    --_length;
    7.40 -    assert(ret->is_young(), "region should be very young");
    7.41 -
    7.42 -    // Replace 'Survivor' region type with 'Young'. So the region will
    7.43 -    // be treated as a young region and will not be 'confused' with
    7.44 -    // newly created survivor regions.
    7.45 -    if (ret->is_survivor()) {
    7.46 -      ret->set_young();
    7.47 -    }
    7.48 -
    7.49 -    if (!ret->is_scan_only()) {
    7.50 -      return ret;
    7.51 -    }
    7.52 -
    7.53 -    // scan-only, we'll add it to the scan-only list
    7.54 -    if (_scan_only_tail == NULL) {
    7.55 -      guarantee( _scan_only_head == NULL, "invariant" );
    7.56 -
    7.57 -      _scan_only_head = ret;
    7.58 -      _curr_scan_only = ret;
    7.59 -    } else {
    7.60 -      guarantee( _scan_only_head != NULL, "invariant" );
    7.61 -      _scan_only_tail->set_next_young_region(ret);
    7.62 -    }
    7.63 -    guarantee( ret->get_next_young_region() == NULL, "invariant" );
    7.64 -    _scan_only_tail = ret;
    7.65 -
    7.66 -    // no need to be tagged as scan-only any more
    7.67 -    ret->set_young();
    7.68 -
    7.69 -    ++_scan_only_length;
    7.70 -  }
    7.71 -  assert( length() == 0, "list should be empty" );
    7.72 -  return NULL;
    7.73 -}
    7.74 -
    7.75  void YoungList::empty_list(HeapRegion* list) {
    7.76    while (list != NULL) {
    7.77      HeapRegion* next = list->get_next_young_region();
    7.78 @@ -225,12 +182,6 @@
    7.79    _head = NULL;
    7.80    _length = 0;
    7.81  
    7.82 -  empty_list(_scan_only_head);
    7.83 -  _scan_only_head = NULL;
    7.84 -  _scan_only_tail = NULL;
    7.85 -  _scan_only_length = 0;
    7.86 -  _curr_scan_only = NULL;
    7.87 -
    7.88    empty_list(_survivor_head);
    7.89    _survivor_head = NULL;
    7.90    _survivor_tail = NULL;
    7.91 @@ -248,11 +199,11 @@
    7.92    HeapRegion* curr = _head;
    7.93    HeapRegion* last = NULL;
    7.94    while (curr != NULL) {
    7.95 -    if (!curr->is_young() || curr->is_scan_only()) {
    7.96 +    if (!curr->is_young()) {
    7.97        gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
    7.98 -                             "incorrectly tagged (%d, %d)",
    7.99 +                             "incorrectly tagged (y: %d, surv: %d)",
   7.100                               curr->bottom(), curr->end(),
   7.101 -                             curr->is_young(), curr->is_scan_only());
   7.102 +                             curr->is_young(), curr->is_survivor());
   7.103        ret = false;
   7.104      }
   7.105      ++length;
   7.106 @@ -267,47 +218,10 @@
   7.107                             length, _length);
   7.108    }
   7.109  
   7.110 -  bool scan_only_ret = true;
   7.111 -  length = 0;
   7.112 -  curr = _scan_only_head;
   7.113 -  last = NULL;
   7.114 -  while (curr != NULL) {
   7.115 -    if (!curr->is_young() || curr->is_scan_only()) {
   7.116 -      gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
   7.117 -                             "incorrectly tagged (%d, %d)",
   7.118 -                             curr->bottom(), curr->end(),
   7.119 -                             curr->is_young(), curr->is_scan_only());
   7.120 -      scan_only_ret = false;
   7.121 -    }
   7.122 -    ++length;
   7.123 -    last = curr;
   7.124 -    curr = curr->get_next_young_region();
   7.125 -  }
   7.126 -  scan_only_ret = scan_only_ret && (length == _scan_only_length);
   7.127 -
   7.128 -  if ( (last != _scan_only_tail) ||
   7.129 -       (_scan_only_head == NULL && _scan_only_tail != NULL) ||
   7.130 -       (_scan_only_head != NULL && _scan_only_tail == NULL) ) {
   7.131 -     gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
   7.132 -     scan_only_ret = false;
   7.133 -  }
   7.134 -
   7.135 -  if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
   7.136 -    gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
   7.137 -    scan_only_ret = false;
   7.138 -   }
   7.139 -
   7.140 -  if (!scan_only_ret) {
   7.141 -    gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
   7.142 -    gclog_or_tty->print_cr("###   list has %d entries, _scan_only_length is %d",
   7.143 -                  length, _scan_only_length);
   7.144 -  }
   7.145 -
   7.146 -  return ret && scan_only_ret;
   7.147 +  return ret;
   7.148  }
   7.149  
   7.150 -bool YoungList::check_list_empty(bool ignore_scan_only_list,
   7.151 -                                 bool check_sample) {
   7.152 +bool YoungList::check_list_empty(bool check_sample) {
   7.153    bool ret = true;
   7.154  
   7.155    if (_length != 0) {
   7.156 @@ -327,28 +241,7 @@
   7.157      gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   7.158    }
   7.159  
   7.160 -  if (ignore_scan_only_list)
   7.161 -    return ret;
   7.162 -
   7.163 -  bool scan_only_ret = true;
   7.164 -  if (_scan_only_length != 0) {
   7.165 -    gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
   7.166 -                  _scan_only_length);
   7.167 -    scan_only_ret = false;
   7.168 -  }
   7.169 -  if (_scan_only_head != NULL) {
   7.170 -    gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
   7.171 -     scan_only_ret = false;
   7.172 -  }
   7.173 -  if (_scan_only_tail != NULL) {
   7.174 -    gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
   7.175 -    scan_only_ret = false;
   7.176 -  }
   7.177 -  if (!scan_only_ret) {
   7.178 -    gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
   7.179 -  }
   7.180 -
   7.181 -  return ret && scan_only_ret;
   7.182 +  return ret;
   7.183  }
   7.184  
   7.185  void
   7.186 @@ -365,7 +258,18 @@
   7.187  void
   7.188  YoungList::rs_length_sampling_next() {
   7.189    assert( _curr != NULL, "invariant" );
   7.190 -  _sampled_rs_lengths += _curr->rem_set()->occupied();
   7.191 +  size_t rs_length = _curr->rem_set()->occupied();
   7.192 +
   7.193 +  _sampled_rs_lengths += rs_length;
   7.194 +
   7.195 +  // The current region may not yet have been added to the
   7.196 +  // incremental collection set (it gets added when it is
   7.197 +  // retired as the current allocation region).
   7.198 +  if (_curr->in_collection_set()) {
   7.199 +    // Update the collection set policy information for this region
   7.200 +    _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
   7.201 +  }
   7.202 +
   7.203    _curr = _curr->get_next_young_region();
   7.204    if (_curr == NULL) {
   7.205      _last_sampled_rs_lengths = _sampled_rs_lengths;
   7.206 @@ -375,54 +279,46 @@
   7.207  
   7.208  void
   7.209  YoungList::reset_auxilary_lists() {
   7.210 -  // We could have just "moved" the scan-only list to the young list.
   7.211 -  // However, the scan-only list is ordered according to the region
   7.212 -  // age in descending order, so, by moving one entry at a time, we
   7.213 -  // ensure that it is recreated in ascending order.
   7.214 -
   7.215    guarantee( is_empty(), "young list should be empty" );
   7.216    assert(check_list_well_formed(), "young list should be well formed");
   7.217  
   7.218    // Add survivor regions to SurvRateGroup.
   7.219    _g1h->g1_policy()->note_start_adding_survivor_regions();
   7.220    _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   7.221 +
   7.222    for (HeapRegion* curr = _survivor_head;
   7.223         curr != NULL;
   7.224         curr = curr->get_next_young_region()) {
   7.225      _g1h->g1_policy()->set_region_survivors(curr);
   7.226 +
   7.227 +    // The region is a non-empty survivor so let's add it to
   7.228 +    // the incremental collection set for the next evacuation
   7.229 +    // pause.
   7.230 +    _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   7.231    }
   7.232    _g1h->g1_policy()->note_stop_adding_survivor_regions();
   7.233  
   7.234 +  _head   = _survivor_head;
   7.235 +  _length = _survivor_length;
   7.236    if (_survivor_head != NULL) {
   7.237 -    _head           = _survivor_head;
   7.238 -    _length         = _survivor_length + _scan_only_length;
   7.239 -    _survivor_tail->set_next_young_region(_scan_only_head);
   7.240 -  } else {
   7.241 -    _head           = _scan_only_head;
   7.242 -    _length         = _scan_only_length;
   7.243 -  }
   7.244 -
   7.245 -  for (HeapRegion* curr = _scan_only_head;
   7.246 -       curr != NULL;
   7.247 -       curr = curr->get_next_young_region()) {
   7.248 -    curr->recalculate_age_in_surv_rate_group();
   7.249 -  }
   7.250 -  _scan_only_head   = NULL;
   7.251 -  _scan_only_tail   = NULL;
   7.252 -  _scan_only_length = 0;
   7.253 -  _curr_scan_only   = NULL;
   7.254 -
   7.255 -  _survivor_head    = NULL;
   7.256 -  _survivor_tail   = NULL;
   7.257 -  _survivor_length  = 0;
   7.258 +    assert(_survivor_tail != NULL, "cause it shouldn't be");
   7.259 +    assert(_survivor_length > 0, "invariant");
   7.260 +    _survivor_tail->set_next_young_region(NULL);
   7.261 +  }
   7.262 +
   7.263 +  // Don't clear the survivor list handles until the start of
   7.264 +  // the next evacuation pause - we need it in order to re-tag
   7.265 +  // the survivor regions from this evacuation pause as 'young'
   7.266 +  // at the start of the next.
   7.267 +
   7.268    _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   7.269  
   7.270    assert(check_list_well_formed(), "young list should be well formed");
   7.271  }
   7.272  
   7.273  void YoungList::print() {
   7.274 -  HeapRegion* lists[] = {_head,   _scan_only_head, _survivor_head};
   7.275 -  const char* names[] = {"YOUNG", "SCAN-ONLY",     "SURVIVOR"};
   7.276 +  HeapRegion* lists[] = {_head,   _survivor_head};
   7.277 +  const char* names[] = {"YOUNG", "SURVIVOR"};
   7.278  
   7.279    for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   7.280      gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   7.281 @@ -431,7 +327,7 @@
   7.282        gclog_or_tty->print_cr("  empty");
   7.283      while (curr != NULL) {
   7.284        gclog_or_tty->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
   7.285 -                             "age: %4d, y: %d, s-o: %d, surv: %d",
   7.286 +                             "age: %4d, y: %d, surv: %d",
   7.287                               curr->bottom(), curr->end(),
   7.288                               curr->top(),
   7.289                               curr->prev_top_at_mark_start(),
   7.290 @@ -439,7 +335,6 @@
   7.291                               curr->top_at_conc_mark_count(),
   7.292                               curr->age_in_surv_rate_group_cond(),
   7.293                               curr->is_young(),
   7.294 -                             curr->is_scan_only(),
   7.295                               curr->is_survivor());
   7.296        curr = curr->get_next_young_region();
   7.297      }
   7.298 @@ -707,6 +602,12 @@
   7.299      // region below.
   7.300      if (_cur_alloc_region != NULL) {
   7.301        // We're finished with the _cur_alloc_region.
   7.302 +      // As we're builing (at least the young portion) of the collection
   7.303 +      // set incrementally we'll add the current allocation region to
   7.304 +      // the collection set here.
   7.305 +      if (_cur_alloc_region->is_young()) {
   7.306 +        g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
   7.307 +      }
   7.308        _summary_bytes_used += _cur_alloc_region->used();
   7.309        _cur_alloc_region = NULL;
   7.310      }
   7.311 @@ -820,6 +721,12 @@
   7.312        _free_regions++;
   7.313        free_region(_cur_alloc_region);
   7.314      } else {
   7.315 +      // As we're builing (at least the young portion) of the collection
   7.316 +      // set incrementally we'll add the current allocation region to
   7.317 +      // the collection set here.
   7.318 +      if (_cur_alloc_region->is_young()) {
   7.319 +        g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
   7.320 +      }
   7.321        _summary_bytes_used += _cur_alloc_region->used();
   7.322      }
   7.323      _cur_alloc_region = NULL;
   7.324 @@ -913,20 +820,25 @@
   7.325    }
   7.326  
   7.327    if (full && DisableExplicitGC) {
   7.328 -    gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
   7.329      return;
   7.330    }
   7.331  
   7.332    assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   7.333    assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
   7.334  
   7.335 +  const bool do_clear_all_soft_refs = clear_all_soft_refs ||
   7.336 +                           collector_policy()->should_clear_all_soft_refs();
   7.337 +
   7.338 +  ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
   7.339 +
   7.340    {
   7.341      IsGCActiveMark x;
   7.342  
   7.343      // Timing
   7.344      gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   7.345      TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   7.346 -    TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
   7.347 +    TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
   7.348 +                PrintGC, true, gclog_or_tty);
   7.349  
   7.350      TraceMemoryManagerStats tms(true /* fullGC */);
   7.351  
   7.352 @@ -970,6 +882,15 @@
   7.353      g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
   7.354      tear_down_region_lists();
   7.355      set_used_regions_to_need_zero_fill();
   7.356 +
   7.357 +    // We may have added regions to the current incremental collection
   7.358 +    // set between the last GC or pause and now. We need to clear the
   7.359 +    // incremental collection set and then start rebuilding it afresh
   7.360 +    // after this full GC.
   7.361 +    abandon_collection_set(g1_policy()->inc_cset_head());
   7.362 +    g1_policy()->clear_incremental_cset();
   7.363 +    g1_policy()->stop_incremental_cset_building();
   7.364 +
   7.365      if (g1_policy()->in_young_gc_mode()) {
   7.366        empty_young_list();
   7.367        g1_policy()->set_full_young_gcs(true);
   7.368 @@ -985,12 +906,12 @@
   7.369      ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
   7.370  
   7.371      ref_processor()->enable_discovery();
   7.372 -    ref_processor()->setup_policy(clear_all_soft_refs);
   7.373 +    ref_processor()->setup_policy(do_clear_all_soft_refs);
   7.374  
   7.375      // Do collection work
   7.376      {
   7.377        HandleMark hm;  // Discard invalid handles created during gc
   7.378 -      G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
   7.379 +      G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
   7.380      }
   7.381      // Because freeing humongous regions may have added some unclean
   7.382      // regions, it is necessary to tear down again before rebuilding.
   7.383 @@ -1053,6 +974,15 @@
   7.384        perm()->compute_new_size();
   7.385      }
   7.386  
   7.387 +    // Start a new incremental collection set for the next pause
   7.388 +    assert(g1_policy()->collection_set() == NULL, "must be");
   7.389 +    g1_policy()->start_incremental_cset_building();
   7.390 +
   7.391 +    // Clear the _cset_fast_test bitmap in anticipation of adding
   7.392 +    // regions to the incremental collection set for the next
   7.393 +    // evacuation pause.
   7.394 +    clear_cset_fast_test();
   7.395 +
   7.396      double end = os::elapsedTime();
   7.397      g1_policy()->record_full_collection_end();
   7.398  
   7.399 @@ -1071,7 +1001,9 @@
   7.400  
   7.401    if (g1_policy()->in_young_gc_mode()) {
   7.402      _young_list->reset_sampled_info();
   7.403 -    assert( check_young_list_empty(false, false),
   7.404 +    // At this point there should be no regions in the
   7.405 +    // entire heap tagged as young.
   7.406 +    assert( check_young_list_empty(true /* check_heap */),
   7.407              "young list should be empty at this point");
   7.408    }
   7.409  
   7.410 @@ -1208,6 +1140,9 @@
   7.411      return result;
   7.412    }
   7.413  
   7.414 +  assert(!collector_policy()->should_clear_all_soft_refs(),
   7.415 +    "Flag should have been handled and cleared prior to this point");
   7.416 +
   7.417    // What else?  We might try synchronous finalization later.  If the total
   7.418    // space available is large enough for the allocation, then a more
   7.419    // complete compaction phase than we've tried so far might be
   7.420 @@ -1565,6 +1500,20 @@
   7.421  
   7.422    _g1h = this;
   7.423  
   7.424 +   _in_cset_fast_test_length = max_regions();
   7.425 +   _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
   7.426 +
   7.427 +   // We're biasing _in_cset_fast_test to avoid subtracting the
   7.428 +   // beginning of the heap every time we want to index; basically
   7.429 +   // it's the same with what we do with the card table.
   7.430 +   _in_cset_fast_test = _in_cset_fast_test_base -
   7.431 +                ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
   7.432 +
   7.433 +   // Clear the _cset_fast_test bitmap in anticipation of adding
   7.434 +   // regions to the incremental collection set for the first
   7.435 +   // evacuation pause.
   7.436 +   clear_cset_fast_test();
   7.437 +
   7.438    // Create the ConcurrentMark data structure and thread.
   7.439    // (Must do this late, so that "max_regions" is defined.)
   7.440    _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
   7.441 @@ -2185,8 +2134,10 @@
   7.442      assert(o != NULL, "Huh?");
   7.443      if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
   7.444        o->oop_iterate(&isLive);
   7.445 -      if (!_hr->obj_allocated_since_prev_marking(o))
   7.446 -        _live_bytes += (o->size() * HeapWordSize);
   7.447 +      if (!_hr->obj_allocated_since_prev_marking(o)) {
   7.448 +        size_t obj_size = o->size();    // Make sure we don't overflow
   7.449 +        _live_bytes += (obj_size * HeapWordSize);
   7.450 +      }
   7.451      }
   7.452    }
   7.453    size_t live_bytes() { return _live_bytes; }
   7.454 @@ -2388,8 +2339,8 @@
   7.455        print_on(gclog_or_tty, true /* extended */);
   7.456        gclog_or_tty->print_cr("");
   7.457        if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
   7.458 -        concurrent_mark()->print_reachable(use_prev_marking,
   7.459 -                                           "failed-verification");
   7.460 +        concurrent_mark()->print_reachable("at-verification-failure",
   7.461 +                                           use_prev_marking, false /* all */);
   7.462        }
   7.463        gclog_or_tty->flush();
   7.464      }
   7.465 @@ -2741,25 +2692,19 @@
   7.466        double start_time_sec = os::elapsedTime();
   7.467        size_t start_used_bytes = used();
   7.468  
   7.469 +#if YOUNG_LIST_VERBOSE
   7.470 +      gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
   7.471 +      _young_list->print();
   7.472 +      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
   7.473 +#endif // YOUNG_LIST_VERBOSE
   7.474 +
   7.475        g1_policy()->record_collection_pause_start(start_time_sec,
   7.476                                                   start_used_bytes);
   7.477  
   7.478 -      guarantee(_in_cset_fast_test == NULL, "invariant");
   7.479 -      guarantee(_in_cset_fast_test_base == NULL, "invariant");
   7.480 -      _in_cset_fast_test_length = max_regions();
   7.481 -      _in_cset_fast_test_base =
   7.482 -                             NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
   7.483 -      memset(_in_cset_fast_test_base, false,
   7.484 -                                     _in_cset_fast_test_length * sizeof(bool));
   7.485 -      // We're biasing _in_cset_fast_test to avoid subtracting the
   7.486 -      // beginning of the heap every time we want to index; basically
   7.487 -      // it's the same with what we do with the card table.
   7.488 -      _in_cset_fast_test = _in_cset_fast_test_base -
   7.489 -              ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
   7.490 -
   7.491 -#if SCAN_ONLY_VERBOSE
   7.492 +#if YOUNG_LIST_VERBOSE
   7.493 +      gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
   7.494        _young_list->print();
   7.495 -#endif // SCAN_ONLY_VERBOSE
   7.496 +#endif // YOUNG_LIST_VERBOSE
   7.497  
   7.498        if (g1_policy()->during_initial_mark_pause()) {
   7.499          concurrent_mark()->checkpointRootsInitialPre();
   7.500 @@ -2786,12 +2731,15 @@
   7.501        if (mark_in_progress())
   7.502          concurrent_mark()->newCSet();
   7.503  
   7.504 -      // Now choose the CS.
   7.505 -      g1_policy()->choose_collection_set();
   7.506 -
   7.507 -      // We may abandon a pause if we find no region that will fit in the MMU
   7.508 -      // pause.
   7.509 -      bool abandoned = (g1_policy()->collection_set() == NULL);
   7.510 +#if YOUNG_LIST_VERBOSE
   7.511 +      gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
   7.512 +      _young_list->print();
   7.513 +      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
   7.514 +#endif // YOUNG_LIST_VERBOSE
   7.515 +
   7.516 +      // Now choose the CS. We may abandon a pause if we find no
   7.517 +      // region that will fit in the MMU pause.
   7.518 +      bool abandoned = g1_policy()->choose_collection_set();
   7.519  
   7.520        // Nothing to do if we were unable to choose a collection set.
   7.521        if (!abandoned) {
   7.522 @@ -2809,40 +2757,64 @@
   7.523  
   7.524          // Actually do the work...
   7.525          evacuate_collection_set();
   7.526 +
   7.527          free_collection_set(g1_policy()->collection_set());
   7.528          g1_policy()->clear_collection_set();
   7.529  
   7.530 -        FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
   7.531 -        // this is more for peace of mind; we're nulling them here and
   7.532 -        // we're expecting them to be null at the beginning of the next GC
   7.533 -        _in_cset_fast_test = NULL;
   7.534 -        _in_cset_fast_test_base = NULL;
   7.535 -
   7.536          cleanup_surviving_young_words();
   7.537  
   7.538 +        // Start a new incremental collection set for the next pause.
   7.539 +        g1_policy()->start_incremental_cset_building();
   7.540 +
   7.541 +        // Clear the _cset_fast_test bitmap in anticipation of adding
   7.542 +        // regions to the incremental collection set for the next
   7.543 +        // evacuation pause.
   7.544 +        clear_cset_fast_test();
   7.545 +
   7.546          if (g1_policy()->in_young_gc_mode()) {
   7.547            _young_list->reset_sampled_info();
   7.548 -          assert(check_young_list_empty(true),
   7.549 -                 "young list should be empty");
   7.550 -
   7.551 -#if SCAN_ONLY_VERBOSE
   7.552 +
   7.553 +          // Don't check the whole heap at this point as the
   7.554 +          // GC alloc regions from this pause have been tagged
   7.555 +          // as survivors and moved on to the survivor list.
   7.556 +          // Survivor regions will fail the !is_young() check.
   7.557 +          assert(check_young_list_empty(false /* check_heap */),
   7.558 +              "young list should be empty");
   7.559 +
   7.560 +#if YOUNG_LIST_VERBOSE
   7.561 +          gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
   7.562            _young_list->print();
   7.563 -#endif // SCAN_ONLY_VERBOSE
   7.564 +#endif // YOUNG_LIST_VERBOSE
   7.565  
   7.566            g1_policy()->record_survivor_regions(_young_list->survivor_length(),
   7.567                                            _young_list->first_survivor_region(),
   7.568                                            _young_list->last_survivor_region());
   7.569 +
   7.570            _young_list->reset_auxilary_lists();
   7.571          }
   7.572        } else {
   7.573 -        if (_in_cset_fast_test != NULL) {
   7.574 -          assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
   7.575 -          FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
   7.576 -          //  this is more for peace of mind; we're nulling them here and
   7.577 -          // we're expecting them to be null at the beginning of the next GC
   7.578 -          _in_cset_fast_test = NULL;
   7.579 -          _in_cset_fast_test_base = NULL;
   7.580 -        }
   7.581 +        // We have abandoned the current collection. This can only happen
   7.582 +        // if we're not doing young or partially young collections, and
   7.583 +        // we didn't find an old region that we're able to collect within
   7.584 +        // the allowed time.
   7.585 +
   7.586 +        assert(g1_policy()->collection_set() == NULL, "should be");
   7.587 +        assert(_young_list->length() == 0, "because it should be");
   7.588 +
   7.589 +        // This should be a no-op.
   7.590 +        abandon_collection_set(g1_policy()->inc_cset_head());
   7.591 +
   7.592 +        g1_policy()->clear_incremental_cset();
   7.593 +        g1_policy()->stop_incremental_cset_building();
   7.594 +
   7.595 +        // Start a new incremental collection set for the next pause.
   7.596 +        g1_policy()->start_incremental_cset_building();
   7.597 +
   7.598 +        // Clear the _cset_fast_test bitmap in anticipation of adding
   7.599 +        // regions to the incremental collection set for the next
   7.600 +        // evacuation pause.
   7.601 +        clear_cset_fast_test();
   7.602 +
   7.603          // This looks confusing, because the DPT should really be empty
   7.604          // at this point -- since we have not done any collection work,
   7.605          // there should not be any derived pointers in the table to update;
   7.606 @@ -2876,9 +2848,11 @@
   7.607          doConcurrentMark();
   7.608        }
   7.609  
   7.610 -#if SCAN_ONLY_VERBOSE
   7.611 +#if YOUNG_LIST_VERBOSE
   7.612 +      gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
   7.613        _young_list->print();
   7.614 -#endif // SCAN_ONLY_VERBOSE
   7.615 +      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
   7.616 +#endif // YOUNG_LIST_VERBOSE
   7.617  
   7.618        double end_time_sec = os::elapsedTime();
   7.619        double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
   7.620 @@ -2936,6 +2910,25 @@
   7.621    }
   7.622  }
   7.623  
   7.624 +size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
   7.625 +{
   7.626 +  size_t gclab_word_size;
   7.627 +  switch (purpose) {
   7.628 +    case GCAllocForSurvived:
   7.629 +      gclab_word_size = YoungPLABSize;
   7.630 +      break;
   7.631 +    case GCAllocForTenured:
   7.632 +      gclab_word_size = OldPLABSize;
   7.633 +      break;
   7.634 +    default:
   7.635 +      assert(false, "unknown GCAllocPurpose");
   7.636 +      gclab_word_size = OldPLABSize;
   7.637 +      break;
   7.638 +  }
   7.639 +  return gclab_word_size;
   7.640 +}
   7.641 +
   7.642 +
   7.643  void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
   7.644    assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
   7.645    // make sure we don't call set_gc_alloc_region() multiple times on
   7.646 @@ -3109,6 +3102,11 @@
   7.647      } else {
   7.648        // the region was retained from the last collection
   7.649        ++_gc_alloc_region_counts[ap];
   7.650 +      if (G1PrintHeapRegions) {
   7.651 +        gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
   7.652 +                               "top "PTR_FORMAT,
   7.653 +                               alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
   7.654 +      }
   7.655      }
   7.656  
   7.657      if (alloc_region != NULL) {
   7.658 @@ -3665,6 +3663,8 @@
   7.659      _g1_rem(g1h->g1_rem_set()),
   7.660      _hash_seed(17), _queue_num(queue_num),
   7.661      _term_attempts(0),
   7.662 +    _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
   7.663 +    _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
   7.664      _age_table(false),
   7.665  #if G1_DETAILED_STATS
   7.666      _pushes(0), _pops(0), _steals(0),
   7.667 @@ -3691,6 +3691,9 @@
   7.668  
   7.669    _overflowed_refs = new OverflowQueue(10);
   7.670  
   7.671 +  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
   7.672 +  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
   7.673 +
   7.674    _start = os::elapsedTime();
   7.675  }
   7.676  
   7.677 @@ -3988,16 +3991,13 @@
   7.678  
   7.679      OopsInHeapRegionClosure        *scan_root_cl;
   7.680      OopsInHeapRegionClosure        *scan_perm_cl;
   7.681 -    OopsInHeapRegionClosure        *scan_so_cl;
   7.682  
   7.683      if (_g1h->g1_policy()->during_initial_mark_pause()) {
   7.684        scan_root_cl = &scan_mark_root_cl;
   7.685        scan_perm_cl = &scan_mark_perm_cl;
   7.686 -      scan_so_cl   = &scan_mark_heap_rs_cl;
   7.687      } else {
   7.688        scan_root_cl = &only_scan_root_cl;
   7.689        scan_perm_cl = &only_scan_perm_cl;
   7.690 -      scan_so_cl   = &only_scan_heap_rs_cl;
   7.691      }
   7.692  
   7.693      pss.start_strong_roots();
   7.694 @@ -4005,7 +4005,6 @@
   7.695                                    SharedHeap::SO_AllClasses,
   7.696                                    scan_root_cl,
   7.697                                    &push_heap_rs_cl,
   7.698 -                                  scan_so_cl,
   7.699                                    scan_perm_cl,
   7.700                                    i);
   7.701      pss.end_strong_roots();
   7.702 @@ -4067,7 +4066,6 @@
   7.703                          SharedHeap::ScanningOption so,
   7.704                          OopClosure* scan_non_heap_roots,
   7.705                          OopsInHeapRegionClosure* scan_rs,
   7.706 -                        OopsInHeapRegionClosure* scan_so,
   7.707                          OopsInGenClosure* scan_perm,
   7.708                          int worker_i) {
   7.709    // First scan the strong roots, including the perm gen.
   7.710 @@ -4087,6 +4085,7 @@
   7.711                         &buf_scan_non_heap_roots,
   7.712                         &eager_scan_code_roots,
   7.713                         &buf_scan_perm);
   7.714 +
   7.715    // Finish up any enqueued closure apps.
   7.716    buf_scan_non_heap_roots.done();
   7.717    buf_scan_perm.done();
   7.718 @@ -4109,9 +4108,6 @@
   7.719  
   7.720    // XXX What should this be doing in the parallel case?
   7.721    g1_policy()->record_collection_pause_end_CH_strong_roots();
   7.722 -  if (scan_so != NULL) {
   7.723 -    scan_scan_only_set(scan_so, worker_i);
   7.724 -  }
   7.725    // Now scan the complement of the collection set.
   7.726    if (scan_rs != NULL) {
   7.727      g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
   7.728 @@ -4125,54 +4121,6 @@
   7.729  }
   7.730  
   7.731  void
   7.732 -G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
   7.733 -                                       OopsInHeapRegionClosure* oc,
   7.734 -                                       int worker_i) {
   7.735 -  HeapWord* startAddr = r->bottom();
   7.736 -  HeapWord* endAddr = r->used_region().end();
   7.737 -
   7.738 -  oc->set_region(r);
   7.739 -
   7.740 -  HeapWord* p = r->bottom();
   7.741 -  HeapWord* t = r->top();
   7.742 -  guarantee( p == r->next_top_at_mark_start(), "invariant" );
   7.743 -  while (p < t) {
   7.744 -    oop obj = oop(p);
   7.745 -    p += obj->oop_iterate(oc);
   7.746 -  }
   7.747 -}
   7.748 -
   7.749 -void
   7.750 -G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
   7.751 -                                    int worker_i) {
   7.752 -  double start = os::elapsedTime();
   7.753 -
   7.754 -  BufferingOopsInHeapRegionClosure boc(oc);
   7.755 -
   7.756 -  FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
   7.757 -  FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
   7.758 -
   7.759 -  OopsInHeapRegionClosure *foc;
   7.760 -  if (g1_policy()->during_initial_mark_pause())
   7.761 -    foc = &scan_and_mark;
   7.762 -  else
   7.763 -    foc = &scan_only;
   7.764 -
   7.765 -  HeapRegion* hr;
   7.766 -  int n = 0;
   7.767 -  while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
   7.768 -    scan_scan_only_region(hr, foc, worker_i);
   7.769 -    ++n;
   7.770 -  }
   7.771 -  boc.done();
   7.772 -
   7.773 -  double closure_app_s = boc.closure_app_seconds();
   7.774 -  g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
   7.775 -  double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
   7.776 -  g1_policy()->record_scan_only_time(worker_i, ms, n);
   7.777 -}
   7.778 -
   7.779 -void
   7.780  G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
   7.781                                         OopClosure* non_root_closure) {
   7.782    CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
   7.783 @@ -4370,17 +4318,14 @@
   7.784  class G1ParCleanupCTTask : public AbstractGangTask {
   7.785    CardTableModRefBS* _ct_bs;
   7.786    G1CollectedHeap* _g1h;
   7.787 -  HeapRegion* volatile _so_head;
   7.788    HeapRegion* volatile _su_head;
   7.789  public:
   7.790    G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
   7.791                       G1CollectedHeap* g1h,
   7.792 -                     HeapRegion* scan_only_list,
   7.793                       HeapRegion* survivor_list) :
   7.794      AbstractGangTask("G1 Par Cleanup CT Task"),
   7.795      _ct_bs(ct_bs),
   7.796      _g1h(g1h),
   7.797 -    _so_head(scan_only_list),
   7.798      _su_head(survivor_list)
   7.799    { }
   7.800  
   7.801 @@ -4389,14 +4334,13 @@
   7.802      while (r = _g1h->pop_dirty_cards_region()) {
   7.803        clear_cards(r);
   7.804      }
   7.805 -    // Redirty the cards of the scan-only and survivor regions.
   7.806 -    dirty_list(&this->_so_head);
   7.807 +    // Redirty the cards of the survivor regions.
   7.808      dirty_list(&this->_su_head);
   7.809    }
   7.810  
   7.811    void clear_cards(HeapRegion* r) {
   7.812 -    // Cards for Survivor and Scan-Only regions will be dirtied later.
   7.813 -    if (!r->is_scan_only() && !r->is_survivor()) {
   7.814 +    // Cards for Survivor regions will be dirtied later.
   7.815 +    if (!r->is_survivor()) {
   7.816        _ct_bs->clear(MemRegion(r->bottom(), r->end()));
   7.817      }
   7.818    }
   7.819 @@ -4429,7 +4373,7 @@
   7.820    virtual bool doHeapRegion(HeapRegion* r)
   7.821    {
   7.822      MemRegion mr(r->bottom(), r->end());
   7.823 -    if (r->is_scan_only() || r->is_survivor()) {
   7.824 +    if (r->is_survivor()) {
   7.825        _ct_bs->verify_dirty_region(mr);
   7.826      } else {
   7.827        _ct_bs->verify_clean_region(mr);
   7.828 @@ -4445,8 +4389,8 @@
   7.829  
   7.830    // Iterate over the dirty cards region list.
   7.831    G1ParCleanupCTTask cleanup_task(ct_bs, this,
   7.832 -                                  _young_list->first_scan_only_region(),
   7.833                                    _young_list->first_survivor_region());
   7.834 +
   7.835    if (ParallelGCThreads > 0) {
   7.836      set_par_threads(workers()->total_workers());
   7.837      workers()->run_task(&cleanup_task);
   7.838 @@ -4462,12 +4406,12 @@
   7.839        }
   7.840        r->set_next_dirty_cards_region(NULL);
   7.841      }
   7.842 -    // now, redirty the cards of the scan-only and survivor regions
   7.843 +    // now, redirty the cards of the survivor regions
   7.844      // (it seemed faster to do it this way, instead of iterating over
   7.845      // all regions and then clearing / dirtying as appropriate)
   7.846 -    dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
   7.847      dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
   7.848    }
   7.849 +
   7.850    double elapsed = os::elapsedTime() - start;
   7.851    g1_policy()->record_clear_ct_time( elapsed * 1000.0);
   7.852  #ifndef PRODUCT
   7.853 @@ -4488,6 +4432,11 @@
   7.854    double young_time_ms     = 0.0;
   7.855    double non_young_time_ms = 0.0;
   7.856  
   7.857 +  // Since the collection set is a superset of the the young list,
   7.858 +  // all we need to do to clear the young list is clear its
   7.859 +  // head and length, and unlink any young regions in the code below
   7.860 +  _young_list->clear();
   7.861 +
   7.862    G1CollectorPolicy* policy = g1_policy();
   7.863  
   7.864    double start_sec = os::elapsedTime();
   7.865 @@ -4531,6 +4480,12 @@
   7.866        guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
   7.867        size_t words_survived = _surviving_young_words[index];
   7.868        cur->record_surv_words_in_group(words_survived);
   7.869 +
   7.870 +      // At this point the we have 'popped' cur from the collection set
   7.871 +      // (linked via next_in_collection_set()) but it is still in the
   7.872 +      // young list (linked via next_young_region()). Clear the
   7.873 +      // _next_young_region field.
   7.874 +      cur->set_next_young_region(NULL);
   7.875      } else {
   7.876        int index = cur->young_index_in_cset();
   7.877        guarantee( index == -1, "invariant" );
   7.878 @@ -4546,7 +4501,6 @@
   7.879               "Should not have empty regions in a CS.");
   7.880        free_region(cur);
   7.881      } else {
   7.882 -      guarantee( !cur->is_scan_only(), "should not be scan only" );
   7.883        cur->uninstall_surv_rate_group();
   7.884        if (cur->is_young())
   7.885          cur->set_young_index_in_cset(-1);
   7.886 @@ -4570,6 +4524,27 @@
   7.887    policy->record_non_young_free_cset_time_ms(non_young_time_ms);
   7.888  }
   7.889  
   7.890 +// This routine is similar to the above but does not record
   7.891 +// any policy statistics or update free lists; we are abandoning
   7.892 +// the current incremental collection set in preparation of a
   7.893 +// full collection. After the full GC we will start to build up
   7.894 +// the incremental collection set again.
   7.895 +// This is only called when we're doing a full collection
   7.896 +// and is immediately followed by the tearing down of the young list.
   7.897 +
   7.898 +void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
   7.899 +  HeapRegion* cur = cs_head;
   7.900 +
   7.901 +  while (cur != NULL) {
   7.902 +    HeapRegion* next = cur->next_in_collection_set();
   7.903 +    assert(cur->in_collection_set(), "bad CS");
   7.904 +    cur->set_next_in_collection_set(NULL);
   7.905 +    cur->set_in_collection_set(false);
   7.906 +    cur->set_young_index_in_cset(-1);
   7.907 +    cur = next;
   7.908 +  }
   7.909 +}
   7.910 +
   7.911  HeapRegion*
   7.912  G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
   7.913    assert(ZF_mon->owned_by_self(), "Precondition");
   7.914 @@ -4936,12 +4911,10 @@
   7.915    bool success() { return _success; }
   7.916  };
   7.917  
   7.918 -bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
   7.919 -                                             bool check_sample) {
   7.920 -  bool ret = true;
   7.921 -
   7.922 -  ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
   7.923 -  if (!ignore_scan_only_list) {
   7.924 +bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
   7.925 +  bool ret = _young_list->check_list_empty(check_sample);
   7.926 +
   7.927 +  if (check_heap) {
   7.928      NoYoungRegionsClosure closure;
   7.929      heap_region_iterate(&closure);
   7.930      ret = ret && closure.success();
     8.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Apr 21 01:13:15 2010 -0700
     8.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Apr 26 18:01:55 2010 -0400
     8.3 @@ -1,5 +1,5 @@
     8.4  /*
     8.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     8.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
     8.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.8   *
     8.9   * This code is free software; you can redistribute it and/or modify it
    8.10 @@ -81,33 +81,29 @@
    8.11  
    8.12    HeapRegion* _head;
    8.13  
    8.14 -  HeapRegion* _scan_only_head;
    8.15 -  HeapRegion* _scan_only_tail;
    8.16 +  HeapRegion* _survivor_head;
    8.17 +  HeapRegion* _survivor_tail;
    8.18 +
    8.19 +  HeapRegion* _curr;
    8.20 +
    8.21    size_t      _length;
    8.22 -  size_t      _scan_only_length;
    8.23 +  size_t      _survivor_length;
    8.24  
    8.25    size_t      _last_sampled_rs_lengths;
    8.26    size_t      _sampled_rs_lengths;
    8.27 -  HeapRegion* _curr;
    8.28 -  HeapRegion* _curr_scan_only;
    8.29  
    8.30 -  HeapRegion* _survivor_head;
    8.31 -  HeapRegion* _survivor_tail;
    8.32 -  size_t      _survivor_length;
    8.33 -
    8.34 -  void          empty_list(HeapRegion* list);
    8.35 +  void         empty_list(HeapRegion* list);
    8.36  
    8.37  public:
    8.38    YoungList(G1CollectedHeap* g1h);
    8.39  
    8.40 -  void          push_region(HeapRegion* hr);
    8.41 -  void          add_survivor_region(HeapRegion* hr);
    8.42 -  HeapRegion*   pop_region();
    8.43 -  void          empty_list();
    8.44 -  bool          is_empty() { return _length == 0; }
    8.45 -  size_t        length() { return _length; }
    8.46 -  size_t        scan_only_length() { return _scan_only_length; }
    8.47 -  size_t        survivor_length() { return _survivor_length; }
    8.48 +  void         push_region(HeapRegion* hr);
    8.49 +  void         add_survivor_region(HeapRegion* hr);
    8.50 +
    8.51 +  void         empty_list();
    8.52 +  bool         is_empty() { return _length == 0; }
    8.53 +  size_t       length() { return _length; }
    8.54 +  size_t       survivor_length() { return _survivor_length; }
    8.55  
    8.56    void rs_length_sampling_init();
    8.57    bool rs_length_sampling_more();
    8.58 @@ -120,22 +116,21 @@
    8.59  
    8.60    // for development purposes
    8.61    void reset_auxilary_lists();
    8.62 +  void clear() { _head = NULL; _length = 0; }
    8.63 +
    8.64 +  void clear_survivors() {
    8.65 +    _survivor_head    = NULL;
    8.66 +    _survivor_tail    = NULL;
    8.67 +    _survivor_length  = 0;
    8.68 +  }
    8.69 +
    8.70    HeapRegion* first_region() { return _head; }
    8.71 -  HeapRegion* first_scan_only_region() { return _scan_only_head; }
    8.72    HeapRegion* first_survivor_region() { return _survivor_head; }
    8.73    HeapRegion* last_survivor_region() { return _survivor_tail; }
    8.74 -  HeapRegion* par_get_next_scan_only_region() {
    8.75 -    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
    8.76 -    HeapRegion* ret = _curr_scan_only;
    8.77 -    if (ret != NULL)
    8.78 -      _curr_scan_only = ret->get_next_young_region();
    8.79 -    return ret;
    8.80 -  }
    8.81  
    8.82    // debugging
    8.83    bool          check_list_well_formed();
    8.84 -  bool          check_list_empty(bool ignore_scan_only_list,
    8.85 -                                 bool check_sample = true);
    8.86 +  bool          check_list_empty(bool check_sample = true);
    8.87    void          print();
    8.88  };
    8.89  
    8.90 @@ -232,6 +227,9 @@
    8.91    // current collection.
    8.92    HeapRegion* _gc_alloc_region_list;
    8.93  
    8.94 +  // Determines PLAB size for a particular allocation purpose.
    8.95 +  static size_t desired_plab_sz(GCAllocPurpose purpose);
    8.96 +
    8.97    // When called by par thread, require par_alloc_during_gc_lock() to be held.
    8.98    void push_gc_alloc_region(HeapRegion* hr);
    8.99  
   8.100 @@ -402,8 +400,7 @@
   8.101      assert(_in_cset_fast_test_base != NULL, "sanity");
   8.102      assert(r->in_collection_set(), "invariant");
   8.103      int index = r->hrs_index();
   8.104 -    assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length,
   8.105 -           "invariant");
   8.106 +    assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant");
   8.107      assert(!_in_cset_fast_test_base[index], "invariant");
   8.108      _in_cset_fast_test_base[index] = true;
   8.109    }
   8.110 @@ -428,6 +425,12 @@
   8.111      }
   8.112    }
   8.113  
   8.114 +  void clear_cset_fast_test() {
   8.115 +    assert(_in_cset_fast_test_base != NULL, "sanity");
   8.116 +    memset(_in_cset_fast_test_base, false,
   8.117 +        _in_cset_fast_test_length * sizeof(bool));
   8.118 +  }
   8.119 +
   8.120  protected:
   8.121  
   8.122    // Shrink the garbage-first heap by at most the given size (in bytes!).
   8.123 @@ -473,6 +476,10 @@
   8.124    // regions.
   8.125    void free_collection_set(HeapRegion* cs_head);
   8.126  
   8.127 +  // Abandon the current collection set without recording policy
   8.128 +  // statistics or updating free lists.
   8.129 +  void abandon_collection_set(HeapRegion* cs_head);
   8.130 +
   8.131    // Applies "scan_non_heap_roots" to roots outside the heap,
   8.132    // "scan_rs" to roots inside the heap (having done "set_region" to
   8.133    // indicate the region in which the root resides), and does "scan_perm"
   8.134 @@ -485,16 +492,9 @@
   8.135                                 SharedHeap::ScanningOption so,
   8.136                                 OopClosure* scan_non_heap_roots,
   8.137                                 OopsInHeapRegionClosure* scan_rs,
   8.138 -                               OopsInHeapRegionClosure* scan_so,
   8.139                                 OopsInGenClosure* scan_perm,
   8.140                                 int worker_i);
   8.141  
   8.142 -  void scan_scan_only_set(OopsInHeapRegionClosure* oc,
   8.143 -                          int worker_i);
   8.144 -  void scan_scan_only_region(HeapRegion* hr,
   8.145 -                             OopsInHeapRegionClosure* oc,
   8.146 -                             int worker_i);
   8.147 -
   8.148    // Apply "blk" to all the weak roots of the system.  These include
   8.149    // JNI weak roots, the code cache, system dictionary, symbol table,
   8.150    // string table, and referents of reachable weak refs.
   8.151 @@ -1133,36 +1133,14 @@
   8.152    void set_region_short_lived_locked(HeapRegion* hr);
   8.153    // add appropriate methods for any other surv rate groups
   8.154  
   8.155 -  void young_list_rs_length_sampling_init() {
   8.156 -    _young_list->rs_length_sampling_init();
   8.157 -  }
   8.158 -  bool young_list_rs_length_sampling_more() {
   8.159 -    return _young_list->rs_length_sampling_more();
   8.160 -  }
   8.161 -  void young_list_rs_length_sampling_next() {
   8.162 -    _young_list->rs_length_sampling_next();
   8.163 -  }
   8.164 -  size_t young_list_sampled_rs_lengths() {
   8.165 -    return _young_list->sampled_rs_lengths();
   8.166 -  }
   8.167 -
   8.168 -  size_t young_list_length()   { return _young_list->length(); }
   8.169 -  size_t young_list_scan_only_length() {
   8.170 -                                      return _young_list->scan_only_length(); }
   8.171 -
   8.172 -  HeapRegion* pop_region_from_young_list() {
   8.173 -    return _young_list->pop_region();
   8.174 -  }
   8.175 -
   8.176 -  HeapRegion* young_list_first_region() {
   8.177 -    return _young_list->first_region();
   8.178 -  }
   8.179 +  YoungList* young_list() { return _young_list; }
   8.180  
   8.181    // debugging
   8.182    bool check_young_list_well_formed() {
   8.183      return _young_list->check_list_well_formed();
   8.184    }
   8.185 -  bool check_young_list_empty(bool ignore_scan_only_list,
   8.186 +
   8.187 +  bool check_young_list_empty(bool check_heap,
   8.188                                bool check_sample = true);
   8.189  
   8.190    // *** Stuff related to concurrent marking.  It's not clear to me that so
   8.191 @@ -1367,12 +1345,18 @@
   8.192      return BitsPerWord << shifter();
   8.193    }
   8.194  
   8.195 -  static size_t gclab_word_size() {
   8.196 -    return G1ParallelGCAllocBufferSize / HeapWordSize;
   8.197 +  size_t gclab_word_size() const {
   8.198 +    return _gclab_word_size;
   8.199    }
   8.200  
   8.201 -  static size_t bitmap_size_in_bits() {
   8.202 -    size_t bits_in_bitmap = gclab_word_size() >> shifter();
   8.203 +  // Calculates actual GCLab size in words
   8.204 +  size_t gclab_real_word_size() const {
   8.205 +    return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
   8.206 +           / BitsPerWord;
   8.207 +  }
   8.208 +
   8.209 +  static size_t bitmap_size_in_bits(size_t gclab_word_size) {
   8.210 +    size_t bits_in_bitmap = gclab_word_size >> shifter();
   8.211      // We are going to ensure that the beginning of a word in this
   8.212      // bitmap also corresponds to the beginning of a word in the
   8.213      // global marking bitmap. To handle the case where a GCLab
   8.214 @@ -1382,13 +1366,13 @@
   8.215      return bits_in_bitmap + BitsPerWord - 1;
   8.216    }
   8.217  public:
   8.218 -  GCLabBitMap(HeapWord* heap_start)
   8.219 -    : BitMap(bitmap_size_in_bits()),
   8.220 +  GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
   8.221 +    : BitMap(bitmap_size_in_bits(gclab_word_size)),
   8.222        _cm(G1CollectedHeap::heap()->concurrent_mark()),
   8.223        _shifter(shifter()),
   8.224        _bitmap_word_covers_words(bitmap_word_covers_words()),
   8.225        _heap_start(heap_start),
   8.226 -      _gclab_word_size(gclab_word_size()),
   8.227 +      _gclab_word_size(gclab_word_size),
   8.228        _real_start_word(NULL),
   8.229        _real_end_word(NULL),
   8.230        _start_word(NULL)
   8.231 @@ -1483,7 +1467,7 @@
   8.232        mark_bitmap->mostly_disjoint_range_union(this,
   8.233                                  0, // always start from the start of the bitmap
   8.234                                  _start_word,
   8.235 -                                size_in_words());
   8.236 +                                gclab_real_word_size());
   8.237        _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
   8.238  
   8.239  #ifndef PRODUCT
   8.240 @@ -1495,9 +1479,10 @@
   8.241      }
   8.242    }
   8.243  
   8.244 -  static size_t bitmap_size_in_words() {
   8.245 -    return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
   8.246 +  size_t bitmap_size_in_words() const {
   8.247 +    return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
   8.248    }
   8.249 +
   8.250  };
   8.251  
   8.252  class G1ParGCAllocBuffer: public ParGCAllocBuffer {
   8.253 @@ -1507,10 +1492,10 @@
   8.254    GCLabBitMap _bitmap;
   8.255  
   8.256  public:
   8.257 -  G1ParGCAllocBuffer() :
   8.258 -    ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
   8.259 +  G1ParGCAllocBuffer(size_t gclab_word_size) :
   8.260 +    ParGCAllocBuffer(gclab_word_size),
   8.261      _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
   8.262 -    _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
   8.263 +    _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
   8.264      _retired(false)
   8.265    { }
   8.266  
   8.267 @@ -1549,8 +1534,10 @@
   8.268    typedef GrowableArray<StarTask> OverflowQueue;
   8.269    OverflowQueue* _overflowed_refs;
   8.270  
   8.271 -  G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
   8.272 -  ageTable           _age_table;
   8.273 +  G1ParGCAllocBuffer  _surviving_alloc_buffer;
   8.274 +  G1ParGCAllocBuffer  _tenured_alloc_buffer;
   8.275 +  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
   8.276 +  ageTable            _age_table;
   8.277  
   8.278    size_t           _alloc_buffer_waste;
   8.279    size_t           _undo_waste;
   8.280 @@ -1619,7 +1606,7 @@
   8.281    ageTable*         age_table()       { return &_age_table;       }
   8.282  
   8.283    G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
   8.284 -    return &_alloc_buffers[purpose];
   8.285 +    return _alloc_buffers[purpose];
   8.286    }
   8.287  
   8.288    size_t alloc_buffer_waste()                    { return _alloc_buffer_waste; }
   8.289 @@ -1684,15 +1671,15 @@
   8.290    HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
   8.291  
   8.292      HeapWord* obj = NULL;
   8.293 -    if (word_sz * 100 <
   8.294 -        (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
   8.295 -                                                  ParallelGCBufferWastePct) {
   8.296 +    size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
   8.297 +    if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
   8.298        G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
   8.299 +      assert(gclab_word_size == alloc_buf->word_sz(),
   8.300 +             "dynamic resizing is not supported");
   8.301        add_to_alloc_buffer_waste(alloc_buf->words_remaining());
   8.302        alloc_buf->retire(false, false);
   8.303  
   8.304 -      HeapWord* buf =
   8.305 -        _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
   8.306 +      HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
   8.307        if (buf == NULL) return NULL; // Let caller handle allocation failure.
   8.308        // Otherwise.
   8.309        alloc_buf->set_buf(buf);
   8.310 @@ -1786,9 +1773,9 @@
   8.311  
   8.312    void retire_alloc_buffers() {
   8.313      for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
   8.314 -      size_t waste = _alloc_buffers[ap].words_remaining();
   8.315 +      size_t waste = _alloc_buffers[ap]->words_remaining();
   8.316        add_to_alloc_buffer_waste(waste);
   8.317 -      _alloc_buffers[ap].retire(true, false);
   8.318 +      _alloc_buffers[ap]->retire(true, false);
   8.319      }
   8.320    }
   8.321  
     9.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Apr 21 01:13:15 2010 -0700
     9.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Apr 26 18:01:55 2010 -0400
     9.3 @@ -1,5 +1,5 @@
     9.4  /*
     9.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     9.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
     9.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.8   *
     9.9   * This code is free software; you can redistribute it and/or modify it
    9.10 @@ -42,10 +42,6 @@
    9.11    0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    9.12  };
    9.13  
    9.14 -static double cost_per_scan_only_region_ms_defaults[] = {
    9.15 -  1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    9.16 -};
    9.17 -
    9.18  // all the same
    9.19  static double fully_young_cards_per_entry_ratio_defaults[] = {
    9.20    1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    9.21 @@ -125,7 +121,6 @@
    9.22    _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.23    _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.24    _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.25 -  _cost_per_scan_only_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.26    _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.27    _partially_young_cards_per_entry_ratio_seq(
    9.28                                           new TruncatedSeq(TruncatedSeqLength)),
    9.29 @@ -133,7 +128,6 @@
    9.30    _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.31    _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.32    _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.33 -  _cost_per_scan_only_region_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.34    _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.35    _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    9.36    _non_young_other_cost_per_region_ms_seq(
    9.37 @@ -186,6 +180,22 @@
    9.38    _prev_collection_pause_used_at_end_bytes(0),
    9.39  
    9.40    _collection_set(NULL),
    9.41 +  _collection_set_size(0),
    9.42 +  _collection_set_bytes_used_before(0),
    9.43 +
    9.44 +  // Incremental CSet attributes
    9.45 +  _inc_cset_build_state(Inactive),
    9.46 +  _inc_cset_head(NULL),
    9.47 +  _inc_cset_tail(NULL),
    9.48 +  _inc_cset_size(0),
    9.49 +  _inc_cset_young_index(0),
    9.50 +  _inc_cset_bytes_used_before(0),
    9.51 +  _inc_cset_max_finger(NULL),
    9.52 +  _inc_cset_recorded_young_bytes(0),
    9.53 +  _inc_cset_recorded_rs_lengths(0),
    9.54 +  _inc_cset_predicted_elapsed_time_ms(0.0),
    9.55 +  _inc_cset_predicted_bytes_to_copy(0),
    9.56 +
    9.57  #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
    9.58  #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
    9.59  #endif // _MSC_VER
    9.60 @@ -209,13 +219,20 @@
    9.61    HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
    9.62    HeapRegionRemSet::setup_remset_size();
    9.63  
    9.64 +  // Verify PLAB sizes
    9.65 +  const uint region_size = HeapRegion::GrainWords;
    9.66 +  if (YoungPLABSize > region_size || OldPLABSize > region_size) {
    9.67 +    char buffer[128];
    9.68 +    jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
    9.69 +                 OldPLABSize > region_size ? "Old" : "Young", region_size);
    9.70 +    vm_exit_during_initialization(buffer);
    9.71 +  }
    9.72 +
    9.73    _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
    9.74    _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
    9.75  
    9.76    _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
    9.77    _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
    9.78 -  _par_last_scan_only_times_ms = new double[_parallel_gc_threads];
    9.79 -  _par_last_scan_only_regions_scanned = new double[_parallel_gc_threads];
    9.80  
    9.81    _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
    9.82    _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
    9.83 @@ -245,8 +262,6 @@
    9.84    _pending_card_diff_seq->add(0.0);
    9.85    _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
    9.86    _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
    9.87 -  _cost_per_scan_only_region_ms_seq->add(
    9.88 -                                 cost_per_scan_only_region_ms_defaults[index]);
    9.89    _fully_young_cards_per_entry_ratio_seq->add(
    9.90                              fully_young_cards_per_entry_ratio_defaults[index]);
    9.91    _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
    9.92 @@ -274,7 +289,7 @@
    9.93  
    9.94    // if G1FixedSurvivorSpaceSize is 0 which means the size is not
    9.95    // fixed, then _max_survivor_regions will be calculated at
    9.96 -  // calculate_young_list_target_config during initialization
    9.97 +  // calculate_young_list_target_length during initialization
    9.98    _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
    9.99  
   9.100    assert(GCTimeRatio > 0,
   9.101 @@ -348,15 +363,18 @@
   9.102        set_adaptive_young_list_length(false);
   9.103        _young_list_fixed_length = initial_region_num;
   9.104      }
   9.105 -     _free_regions_at_end_of_collection = _g1->free_regions();
   9.106 -     _scan_only_regions_at_end_of_collection = 0;
   9.107 -     calculate_young_list_min_length();
   9.108 -     guarantee( _young_list_min_length == 0, "invariant, not enough info" );
   9.109 -     calculate_young_list_target_config();
   9.110 -   } else {
   9.111 +    _free_regions_at_end_of_collection = _g1->free_regions();
   9.112 +    calculate_young_list_min_length();
   9.113 +    guarantee( _young_list_min_length == 0, "invariant, not enough info" );
   9.114 +    calculate_young_list_target_length();
   9.115 +  } else {
   9.116       _young_list_fixed_length = 0;
   9.117      _in_young_gc_mode = false;
   9.118    }
   9.119 +
   9.120 +  // We may immediately start allocating regions and placing them on the
   9.121 +  // collection set list. Initialize the per-collection set info
   9.122 +  start_incremental_cset_building();
   9.123  }
   9.124  
   9.125  // Create the jstat counters for the policy.
   9.126 @@ -376,112 +394,29 @@
   9.127      double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   9.128      double alloc_rate_ms = predict_alloc_rate_ms();
   9.129      int min_regions = (int) ceil(alloc_rate_ms * when_ms);
   9.130 -    int current_region_num = (int) _g1->young_list_length();
   9.131 +    int current_region_num = (int) _g1->young_list()->length();
   9.132      _young_list_min_length = min_regions + current_region_num;
   9.133    }
   9.134  }
   9.135  
   9.136 -void G1CollectorPolicy::calculate_young_list_target_config() {
   9.137 +void G1CollectorPolicy::calculate_young_list_target_length() {
   9.138    if (adaptive_young_list_length()) {
   9.139      size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   9.140 -    calculate_young_list_target_config(rs_lengths);
   9.141 +    calculate_young_list_target_length(rs_lengths);
   9.142    } else {
   9.143      if (full_young_gcs())
   9.144        _young_list_target_length = _young_list_fixed_length;
   9.145      else
   9.146        _young_list_target_length = _young_list_fixed_length / 2;
   9.147 +
   9.148      _young_list_target_length = MAX2(_young_list_target_length, (size_t)1);
   9.149 -    size_t so_length = calculate_optimal_so_length(_young_list_target_length);
   9.150 -    guarantee( so_length < _young_list_target_length, "invariant" );
   9.151 -    _young_list_so_prefix_length = so_length;
   9.152    }
   9.153    calculate_survivors_policy();
   9.154  }
   9.155  
   9.156 -// This method calculate the optimal scan-only set for a fixed young
   9.157 -// gen size. I couldn't work out how to reuse the more elaborate one,
   9.158 -// i.e. calculate_young_list_target_config(rs_length), as the loops are
   9.159 -// fundamentally different (the other one finds a config for different
   9.160 -// S-O lengths, whereas here we need to do the opposite).
   9.161 -size_t G1CollectorPolicy::calculate_optimal_so_length(
   9.162 -                                                    size_t young_list_length) {
   9.163 -  if (!G1UseScanOnlyPrefix)
   9.164 -    return 0;
   9.165 -
   9.166 -  if (_all_pause_times_ms->num() < 3) {
   9.167 -    // we won't use a scan-only set at the beginning to allow the rest
   9.168 -    // of the predictors to warm up
   9.169 -    return 0;
   9.170 -  }
   9.171 -
   9.172 -  if (_cost_per_scan_only_region_ms_seq->num() < 3) {
   9.173 -    // then, we'll only set the S-O set to 1 for a little bit of time,
   9.174 -    // to get enough information on the scanning cost
   9.175 -    return 1;
   9.176 -  }
   9.177 -
   9.178 -  size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   9.179 -  size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   9.180 -  size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   9.181 -  size_t scanned_cards;
   9.182 -  if (full_young_gcs())
   9.183 -    scanned_cards = predict_young_card_num(adj_rs_lengths);
   9.184 -  else
   9.185 -    scanned_cards = predict_non_young_card_num(adj_rs_lengths);
   9.186 -  double base_time_ms = predict_base_elapsed_time_ms(pending_cards,
   9.187 -                                                     scanned_cards);
   9.188 -
   9.189 -  size_t so_length = 0;
   9.190 -  double max_gc_eff = 0.0;
   9.191 -  for (size_t i = 0; i < young_list_length; ++i) {
   9.192 -    double gc_eff = 0.0;
   9.193 -    double pause_time_ms = 0.0;
   9.194 -    predict_gc_eff(young_list_length, i, base_time_ms,
   9.195 -                   &gc_eff, &pause_time_ms);
   9.196 -    if (gc_eff > max_gc_eff) {
   9.197 -      max_gc_eff = gc_eff;
   9.198 -      so_length = i;
   9.199 -    }
   9.200 -  }
   9.201 -
   9.202 -  // set it to 95% of the optimal to make sure we sample the "area"
   9.203 -  // around the optimal length to get up-to-date survival rate data
   9.204 -  return so_length * 950 / 1000;
   9.205 -}
   9.206 -
   9.207 -// This is a really cool piece of code! It finds the best
   9.208 -// target configuration (young length / scan-only prefix length) so
   9.209 -// that GC efficiency is maximized and that we also meet a pause
   9.210 -// time. It's a triple nested loop. These loops are explained below
   9.211 -// from the inside-out :-)
   9.212 -//
   9.213 -// (a) The innermost loop will try to find the optimal young length
   9.214 -// for a fixed S-O length. It uses a binary search to speed up the
   9.215 -// process. We assume that, for a fixed S-O length, as we add more
   9.216 -// young regions to the CSet, the GC efficiency will only go up (I'll
   9.217 -// skip the proof). So, using a binary search to optimize this process
   9.218 -// makes perfect sense.
   9.219 -//
   9.220 -// (b) The middle loop will fix the S-O length before calling the
   9.221 -// innermost one. It will vary it between two parameters, increasing
   9.222 -// it by a given increment.
   9.223 -//
   9.224 -// (c) The outermost loop will call the middle loop three times.
   9.225 -//   (1) The first time it will explore all possible S-O length values
   9.226 -//   from 0 to as large as it can get, using a coarse increment (to
   9.227 -//   quickly "home in" to where the optimal seems to be).
   9.228 -//   (2) The second time it will explore the values around the optimal
   9.229 -//   that was found by the first iteration using a fine increment.
   9.230 -//   (3) Once the optimal config has been determined by the second
   9.231 -//   iteration, we'll redo the calculation, but setting the S-O length
   9.232 -//   to 95% of the optimal to make sure we sample the "area"
   9.233 -//   around the optimal length to get up-to-date survival rate data
   9.234 -//
   9.235 -// Termination conditions for the iterations are several: the pause
   9.236 -// time is over the limit, we do not have enough to-space, etc.
   9.237 -
   9.238 -void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
   9.239 +void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
   9.240    guarantee( adaptive_young_list_length(), "pre-condition" );
   9.241 +  guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
   9.242  
   9.243    double start_time_sec = os::elapsedTime();
   9.244    size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
   9.245 @@ -495,285 +430,80 @@
   9.246      double survivor_regions_evac_time =
   9.247          predict_survivor_regions_evac_time();
   9.248  
   9.249 -    size_t min_so_length = 0;
   9.250 -    size_t max_so_length = 0;
   9.251 -
   9.252 -    if (G1UseScanOnlyPrefix) {
   9.253 -      if (_all_pause_times_ms->num() < 3) {
   9.254 -        // we won't use a scan-only set at the beginning to allow the rest
   9.255 -        // of the predictors to warm up
   9.256 -        min_so_length = 0;
   9.257 -        max_so_length = 0;
   9.258 -      } else if (_cost_per_scan_only_region_ms_seq->num() < 3) {
   9.259 -        // then, we'll only set the S-O set to 1 for a little bit of time,
   9.260 -        // to get enough information on the scanning cost
   9.261 -        min_so_length = 1;
   9.262 -        max_so_length = 1;
   9.263 -      } else if (_in_marking_window || _last_full_young_gc) {
   9.264 -        // no S-O prefix during a marking phase either, as at the end
   9.265 -        // of the marking phase we'll have to use a very small young
   9.266 -        // length target to fill up the rest of the CSet with
   9.267 -        // non-young regions and, if we have lots of scan-only regions
   9.268 -        // left-over, we will not be able to add any more non-young
   9.269 -        // regions.
   9.270 -        min_so_length = 0;
   9.271 -        max_so_length = 0;
   9.272 -      } else {
   9.273 -        // this is the common case; we'll never reach the maximum, we
   9.274 -        // one of the end conditions will fire well before that
   9.275 -        // (hopefully!)
   9.276 -        min_so_length = 0;
   9.277 -        max_so_length = _free_regions_at_end_of_collection - 1;
   9.278 -      }
   9.279 -    } else {
   9.280 -      // no S-O prefix, as the switch is not set, but we still need to
   9.281 -      // do one iteration to calculate the best young target that
   9.282 -      // meets the pause time; this way we reuse the same code instead
   9.283 -      // of replicating it
   9.284 -      min_so_length = 0;
   9.285 -      max_so_length = 0;
   9.286 -    }
   9.287 -
   9.288      double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   9.289      size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   9.290      size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   9.291 -    size_t scanned_cards;
   9.292 -    if (full_young_gcs())
   9.293 -      scanned_cards = predict_young_card_num(adj_rs_lengths);
   9.294 -    else
   9.295 -      scanned_cards = predict_non_young_card_num(adj_rs_lengths);
   9.296 -    // calculate this once, so that we don't have to recalculate it in
   9.297 -    // the innermost loop
   9.298 +    size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   9.299      double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
   9.300                            + survivor_regions_evac_time;
   9.301 +
   9.302      // the result
   9.303      size_t final_young_length = 0;
   9.304 -    size_t final_so_length = 0;
   9.305 -    double final_gc_eff = 0.0;
   9.306 -    // we'll also keep track of how many times we go into the inner loop
   9.307 -    // this is for profiling reasons
   9.308 -    size_t calculations = 0;
   9.309 -
   9.310 -    // this determines which of the three iterations the outer loop is in
   9.311 -    typedef enum {
   9.312 -      pass_type_coarse,
   9.313 -      pass_type_fine,
   9.314 -      pass_type_final
   9.315 -    } pass_type_t;
   9.316 -
   9.317 -    // range of the outer loop's iteration
   9.318 -    size_t from_so_length   = min_so_length;
   9.319 -    size_t to_so_length     = max_so_length;
   9.320 -    guarantee( from_so_length <= to_so_length, "invariant" );
   9.321 -
   9.322 -    // this will keep the S-O length that's found by the second
   9.323 -    // iteration of the outer loop; we'll keep it just in case the third
   9.324 -    // iteration fails to find something
   9.325 -    size_t fine_so_length   = 0;
   9.326 -
   9.327 -    // the increment step for the coarse (first) iteration
   9.328 -    size_t so_coarse_increments = 5;
   9.329 -
   9.330 -    // the common case, we'll start with the coarse iteration
   9.331 -    pass_type_t pass = pass_type_coarse;
   9.332 -    size_t so_length_incr = so_coarse_increments;
   9.333 -
   9.334 -    if (from_so_length == to_so_length) {
   9.335 -      // not point in doing the coarse iteration, we'll go directly into
   9.336 -      // the fine one (we essentially trying to find the optimal young
   9.337 -      // length for a fixed S-O length).
   9.338 -      so_length_incr = 1;
   9.339 -      pass = pass_type_final;
   9.340 -    } else if (to_so_length - from_so_length < 3 * so_coarse_increments) {
   9.341 -      // again, the range is too short so no point in foind the coarse
   9.342 -      // iteration either
   9.343 -      so_length_incr = 1;
   9.344 -      pass = pass_type_fine;
   9.345 +
   9.346 +    size_t init_free_regions =
   9.347 +      MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
   9.348 +
   9.349 +    // if we're still under the pause target...
   9.350 +    if (base_time_ms <= target_pause_time_ms) {
   9.351 +      // We make sure that the shortest young length that makes sense
   9.352 +      // fits within the target pause time.
   9.353 +      size_t min_young_length = 1;
   9.354 +
   9.355 +      if (predict_will_fit(min_young_length, base_time_ms,
   9.356 +                                     init_free_regions, target_pause_time_ms)) {
   9.357 +        // The shortest young length will fit within the target pause time;
   9.358 +        // we'll now check whether the absolute maximum number of young
   9.359 +        // regions will fit in the target pause time. If not, we'll do
   9.360 +        // a binary search between min_young_length and max_young_length
   9.361 +        size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
   9.362 +        size_t max_young_length = abs_max_young_length;
   9.363 +
   9.364 +        if (max_young_length > min_young_length) {
   9.365 +          // Let's check if the initial max young length will fit within the
   9.366 +          // target pause. If so then there is no need to search for a maximal
   9.367 +          // young length - we'll return the initial maximum
   9.368 +
   9.369 +          if (predict_will_fit(max_young_length, base_time_ms,
   9.370 +                                init_free_regions, target_pause_time_ms)) {
   9.371 +            // The maximum young length will satisfy the target pause time.
   9.372 +            // We are done so set min young length to this maximum length.
   9.373 +            // The code after the loop will then set final_young_length using
   9.374 +            // the value cached in the minimum length.
   9.375 +            min_young_length = max_young_length;
   9.376 +          } else {
   9.377 +            // The maximum possible number of young regions will not fit within
   9.378 +            // the target pause time so let's search....
   9.379 +
   9.380 +            size_t diff = (max_young_length - min_young_length) / 2;
   9.381 +            max_young_length = min_young_length + diff;
   9.382 +
   9.383 +            while (max_young_length > min_young_length) {
   9.384 +              if (predict_will_fit(max_young_length, base_time_ms,
   9.385 +                                        init_free_regions, target_pause_time_ms)) {
   9.386 +
   9.387 +                // The current max young length will fit within the target
   9.388 +                // pause time. Note we do not exit the loop here. By setting
   9.389 +                // min = max, and then increasing the max below means that
   9.390 +                // we will continue searching for an upper bound in the
   9.391 +                // range [max..max+diff]
   9.392 +                min_young_length = max_young_length;
   9.393 +              }
   9.394 +              diff = (max_young_length - min_young_length) / 2;
   9.395 +              max_young_length = min_young_length + diff;
   9.396 +            }
   9.397 +            // the above loop found a maximal young length that will fit
   9.398 +            // within the target pause time.
   9.399 +          }
   9.400 +          assert(min_young_length <= abs_max_young_length, "just checking");
   9.401 +        }
   9.402 +        final_young_length = min_young_length;
   9.403 +      }
   9.404      }
   9.405 -
   9.406 -    bool done = false;
   9.407 -    // this is the outermost loop
   9.408 -    while (!done) {
   9.409 -#ifdef TRACE_CALC_YOUNG_CONFIG
   9.410 -      // leave this in for debugging, just in case
   9.411 -      gclog_or_tty->print_cr("searching between " SIZE_FORMAT " and " SIZE_FORMAT
   9.412 -                             ", incr " SIZE_FORMAT ", pass %s",
   9.413 -                             from_so_length, to_so_length, so_length_incr,
   9.414 -                             (pass == pass_type_coarse) ? "coarse" :
   9.415 -                             (pass == pass_type_fine) ? "fine" : "final");
   9.416 -#endif // TRACE_CALC_YOUNG_CONFIG
   9.417 -
   9.418 -      size_t so_length = from_so_length;
   9.419 -      size_t init_free_regions =
   9.420 -        MAX2((size_t)0,
   9.421 -             _free_regions_at_end_of_collection +
   9.422 -             _scan_only_regions_at_end_of_collection - reserve_regions);
   9.423 -
   9.424 -      // this determines whether a configuration was found
   9.425 -      bool gc_eff_set = false;
   9.426 -      // this is the middle loop
   9.427 -      while (so_length <= to_so_length) {
   9.428 -        // base time, which excludes region-related time; again we
   9.429 -        // calculate it once to avoid recalculating it in the
   9.430 -        // innermost loop
   9.431 -        double base_time_with_so_ms =
   9.432 -                           base_time_ms + predict_scan_only_time_ms(so_length);
   9.433 -        // it's already over the pause target, go around
   9.434 -        if (base_time_with_so_ms > target_pause_time_ms)
   9.435 -          break;
   9.436 -
   9.437 -        size_t starting_young_length = so_length+1;
   9.438 -
   9.439 -        // we make sure that the short young length that makes sense
   9.440 -        // (one more than the S-O length) is feasible
   9.441 -        size_t min_young_length = starting_young_length;
   9.442 -        double min_gc_eff;
   9.443 -        bool min_ok;
   9.444 -        ++calculations;
   9.445 -        min_ok = predict_gc_eff(min_young_length, so_length,
   9.446 -                                base_time_with_so_ms,
   9.447 -                                init_free_regions, target_pause_time_ms,
   9.448 -                                &min_gc_eff);
   9.449 -
   9.450 -        if (min_ok) {
   9.451 -          // the shortest young length is indeed feasible; we'll know
   9.452 -          // set up the max young length and we'll do a binary search
   9.453 -          // between min_young_length and max_young_length
   9.454 -          size_t max_young_length = _free_regions_at_end_of_collection - 1;
   9.455 -          double max_gc_eff = 0.0;
   9.456 -          bool max_ok = false;
   9.457 -
   9.458 -          // the innermost loop! (finally!)
   9.459 -          while (max_young_length > min_young_length) {
   9.460 -            // we'll make sure that min_young_length is always at a
   9.461 -            // feasible config
   9.462 -            guarantee( min_ok, "invariant" );
   9.463 -
   9.464 -            ++calculations;
   9.465 -            max_ok = predict_gc_eff(max_young_length, so_length,
   9.466 -                                    base_time_with_so_ms,
   9.467 -                                    init_free_regions, target_pause_time_ms,
   9.468 -                                    &max_gc_eff);
   9.469 -
   9.470 -            size_t diff = (max_young_length - min_young_length) / 2;
   9.471 -            if (max_ok) {
   9.472 -              min_young_length = max_young_length;
   9.473 -              min_gc_eff = max_gc_eff;
   9.474 -              min_ok = true;
   9.475 -            }
   9.476 -            max_young_length = min_young_length + diff;
   9.477 -          }
   9.478 -
   9.479 -          // the innermost loop found a config
   9.480 -          guarantee( min_ok, "invariant" );
   9.481 -          if (min_gc_eff > final_gc_eff) {
   9.482 -            // it's the best config so far, so we'll keep it
   9.483 -            final_gc_eff = min_gc_eff;
   9.484 -            final_young_length = min_young_length;
   9.485 -            final_so_length = so_length;
   9.486 -            gc_eff_set = true;
   9.487 -          }
   9.488 -        }
   9.489 -
   9.490 -        // incremental the fixed S-O length and go around
   9.491 -        so_length += so_length_incr;
   9.492 -      }
   9.493 -
   9.494 -      // this is the end of the outermost loop and we need to decide
   9.495 -      // what to do during the next iteration
   9.496 -      if (pass == pass_type_coarse) {
   9.497 -        // we just did the coarse pass (first iteration)
   9.498 -
   9.499 -        if (!gc_eff_set)
   9.500 -          // we didn't find a feasible config so we'll just bail out; of
   9.501 -          // course, it might be the case that we missed it; but I'd say
   9.502 -          // it's a bit unlikely
   9.503 -          done = true;
   9.504 -        else {
   9.505 -          // We did find a feasible config with optimal GC eff during
   9.506 -          // the first pass. So the second pass we'll only consider the
   9.507 -          // S-O lengths around that config with a fine increment.
   9.508 -
   9.509 -          guarantee( so_length_incr == so_coarse_increments, "invariant" );
   9.510 -          guarantee( final_so_length >= min_so_length, "invariant" );
   9.511 -
   9.512 -#ifdef TRACE_CALC_YOUNG_CONFIG
   9.513 -          // leave this in for debugging, just in case
   9.514 -          gclog_or_tty->print_cr("  coarse pass: SO length " SIZE_FORMAT,
   9.515 -                                 final_so_length);
   9.516 -#endif // TRACE_CALC_YOUNG_CONFIG
   9.517 -
   9.518 -          from_so_length =
   9.519 -            (final_so_length - min_so_length > so_coarse_increments) ?
   9.520 -            final_so_length - so_coarse_increments + 1 : min_so_length;
   9.521 -          to_so_length =
   9.522 -            (max_so_length - final_so_length > so_coarse_increments) ?
   9.523 -            final_so_length + so_coarse_increments - 1 : max_so_length;
   9.524 -
   9.525 -          pass = pass_type_fine;
   9.526 -          so_length_incr = 1;
   9.527 -        }
   9.528 -      } else if (pass == pass_type_fine) {
   9.529 -        // we just finished the second pass
   9.530 -
   9.531 -        if (!gc_eff_set) {
   9.532 -          // we didn't find a feasible config (yes, it's possible;
   9.533 -          // notice that, sometimes, we go directly into the fine
   9.534 -          // iteration and skip the coarse one) so we bail out
   9.535 -          done = true;
   9.536 -        } else {
   9.537 -          // We did find a feasible config with optimal GC eff
   9.538 -          guarantee( so_length_incr == 1, "invariant" );
   9.539 -
   9.540 -          if (final_so_length == 0) {
   9.541 -            // The config is of an empty S-O set, so we'll just bail out
   9.542 -            done = true;
   9.543 -          } else {
   9.544 -            // we'll go around once more, setting the S-O length to 95%
   9.545 -            // of the optimal
   9.546 -            size_t new_so_length = 950 * final_so_length / 1000;
   9.547 -
   9.548 -#ifdef TRACE_CALC_YOUNG_CONFIG
   9.549 -            // leave this in for debugging, just in case
   9.550 -            gclog_or_tty->print_cr("  fine pass: SO length " SIZE_FORMAT
   9.551 -                                   ", setting it to " SIZE_FORMAT,
   9.552 -                                    final_so_length, new_so_length);
   9.553 -#endif // TRACE_CALC_YOUNG_CONFIG
   9.554 -
   9.555 -            from_so_length = new_so_length;
   9.556 -            to_so_length = new_so_length;
   9.557 -            fine_so_length = final_so_length;
   9.558 -
   9.559 -            pass = pass_type_final;
   9.560 -          }
   9.561 -        }
   9.562 -      } else if (pass == pass_type_final) {
   9.563 -        // we just finished the final (third) pass
   9.564 -
   9.565 -        if (!gc_eff_set)
   9.566 -          // we didn't find a feasible config, so we'll just use the one
   9.567 -          // we found during the second pass, which we saved
   9.568 -          final_so_length = fine_so_length;
   9.569 -
   9.570 -        // and we're done!
   9.571 -        done = true;
   9.572 -      } else {
   9.573 -        guarantee( false, "should never reach here" );
   9.574 -      }
   9.575 -
   9.576 -      // we now go around the outermost loop
   9.577 -    }
   9.578 +    // and we're done!
   9.579  
   9.580      // we should have at least one region in the target young length
   9.581      _young_list_target_length =
   9.582          MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
   9.583 -    if (final_so_length >= final_young_length)
   9.584 -      // and we need to ensure that the S-O length is not greater than
   9.585 -      // the target young length (this is being a bit careful)
   9.586 -      final_so_length = 0;
   9.587 -    _young_list_so_prefix_length = final_so_length;
   9.588 -    guarantee( !_in_marking_window || !_last_full_young_gc ||
   9.589 -               _young_list_so_prefix_length == 0, "invariant" );
   9.590  
   9.591      // let's keep an eye of how long we spend on this calculation
   9.592      // right now, I assume that we'll print it when we need it; we
   9.593 @@ -781,142 +511,91 @@
   9.594      double end_time_sec = os::elapsedTime();
   9.595      double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
   9.596  
   9.597 -#ifdef TRACE_CALC_YOUNG_CONFIG
   9.598 +#ifdef TRACE_CALC_YOUNG_LENGTH
   9.599      // leave this in for debugging, just in case
   9.600 -    gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT
   9.601 -                           ", SO = " SIZE_FORMAT ", "
   9.602 -                           "elapsed %1.2lf ms, calcs: " SIZE_FORMAT " (%s%s) "
   9.603 -                           SIZE_FORMAT SIZE_FORMAT,
   9.604 +    gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
   9.605 +                           "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
   9.606                             target_pause_time_ms,
   9.607 -                           _young_list_target_length - _young_list_so_prefix_length,
   9.608 -                           _young_list_so_prefix_length,
   9.609 +                           _young_list_target_length
   9.610                             elapsed_time_ms,
   9.611 -                           calculations,
   9.612                             full_young_gcs() ? "full" : "partial",
   9.613                             during_initial_mark_pause() ? " i-m" : "",
   9.614                             _in_marking_window,
   9.615                             _in_marking_window_im);
   9.616 -#endif // TRACE_CALC_YOUNG_CONFIG
   9.617 +#endif // TRACE_CALC_YOUNG_LENGTH
   9.618  
   9.619      if (_young_list_target_length < _young_list_min_length) {
   9.620 -      // bummer; this means that, if we do a pause when the optimal
   9.621 -      // config dictates, we'll violate the pause spacing target (the
   9.622 +      // bummer; this means that, if we do a pause when the maximal
   9.623 +      // length dictates, we'll violate the pause spacing target (the
   9.624        // min length was calculate based on the application's current
   9.625        // alloc rate);
   9.626  
   9.627        // so, we have to bite the bullet, and allocate the minimum
   9.628        // number. We'll violate our target, but we just can't meet it.
   9.629  
   9.630 -      size_t so_length = 0;
   9.631 -      // a note further up explains why we do not want an S-O length
   9.632 -      // during marking
   9.633 -      if (!_in_marking_window && !_last_full_young_gc)
   9.634 -        // but we can still try to see whether we can find an optimal
   9.635 -        // S-O length
   9.636 -        so_length = calculate_optimal_so_length(_young_list_min_length);
   9.637 -
   9.638 -#ifdef TRACE_CALC_YOUNG_CONFIG
   9.639 +#ifdef TRACE_CALC_YOUNG_LENGTH
   9.640        // leave this in for debugging, just in case
   9.641        gclog_or_tty->print_cr("adjusted target length from "
   9.642 -                             SIZE_FORMAT " to " SIZE_FORMAT
   9.643 -                             ", SO " SIZE_FORMAT,
   9.644 -                             _young_list_target_length, _young_list_min_length,
   9.645 -                             so_length);
   9.646 -#endif // TRACE_CALC_YOUNG_CONFIG
   9.647 -
   9.648 -      _young_list_target_length =
   9.649 -        MAX2(_young_list_min_length, (size_t)1);
   9.650 -      _young_list_so_prefix_length = so_length;
   9.651 +                             SIZE_FORMAT " to " SIZE_FORMAT,
   9.652 +                             _young_list_target_length, _young_list_min_length);
   9.653 +#endif // TRACE_CALC_YOUNG_LENGTH
   9.654 +
   9.655 +      _young_list_target_length = _young_list_min_length;
   9.656      }
   9.657    } else {
   9.658      // we are in a partially-young mode or we've run out of regions (due
   9.659      // to evacuation failure)
   9.660  
   9.661 -#ifdef TRACE_CALC_YOUNG_CONFIG
   9.662 +#ifdef TRACE_CALC_YOUNG_LENGTH
   9.663      // leave this in for debugging, just in case
   9.664      gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
   9.665 -                           ", SO " SIZE_FORMAT,
   9.666 -                           _young_list_min_length, 0);
   9.667 -#endif // TRACE_CALC_YOUNG_CONFIG
   9.668 -
   9.669 -    // we'll do the pause as soon as possible and with no S-O prefix
   9.670 -    // (see above for the reasons behind the latter)
   9.671 +                           _young_list_min_length);
   9.672 +#endif // TRACE_CALC_YOUNG_LENGTH
   9.673 +    // we'll do the pause as soon as possible by choosing the minimum
   9.674      _young_list_target_length =
   9.675        MAX2(_young_list_min_length, (size_t) 1);
   9.676 -    _young_list_so_prefix_length = 0;
   9.677    }
   9.678  
   9.679    _rs_lengths_prediction = rs_lengths;
   9.680  }
   9.681  
   9.682 -// This is used by: calculate_optimal_so_length(length). It returns
   9.683 -// the GC eff and predicted pause time for a particular config
   9.684 -void
   9.685 -G1CollectorPolicy::predict_gc_eff(size_t young_length,
   9.686 -                                  size_t so_length,
   9.687 -                                  double base_time_ms,
   9.688 -                                  double* ret_gc_eff,
   9.689 -                                  double* ret_pause_time_ms) {
   9.690 -  double so_time_ms = predict_scan_only_time_ms(so_length);
   9.691 -  double accum_surv_rate_adj = 0.0;
   9.692 -  if (so_length > 0)
   9.693 -    accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1));
   9.694 -  double accum_surv_rate =
   9.695 -    accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
   9.696 -  size_t bytes_to_copy =
   9.697 -    (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   9.698 -  double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   9.699 -  double young_other_time_ms =
   9.700 -                       predict_young_other_time_ms(young_length - so_length);
   9.701 -  double pause_time_ms =
   9.702 -                base_time_ms + so_time_ms + copy_time_ms + young_other_time_ms;
   9.703 -  size_t reclaimed_bytes =
   9.704 -    (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy;
   9.705 -  double gc_eff = (double) reclaimed_bytes / pause_time_ms;
   9.706 -
   9.707 -  *ret_gc_eff = gc_eff;
   9.708 -  *ret_pause_time_ms = pause_time_ms;
   9.709 -}
   9.710 -
   9.711 -// This is used by: calculate_young_list_target_config(rs_length). It
   9.712 -// returns the GC eff of a particular config. It returns false if that
   9.713 -// config violates any of the end conditions of the search in the
   9.714 -// calling method, or true upon success. The end conditions were put
   9.715 -// here since it's called twice and it was best not to replicate them
   9.716 -// in the caller. Also, passing the parameteres avoids having to
   9.717 -// recalculate them in the innermost loop.
   9.718 +// This is used by: calculate_young_list_target_length(rs_length). It
   9.719 +// returns true iff:
   9.720 +//   the predicted pause time for the given young list will not overflow
   9.721 +//   the target pause time
   9.722 +// and:
   9.723 +//   the predicted amount of surviving data will not overflow the
   9.724 +//   the amount of free space available for survivor regions.
   9.725 +//
   9.726  bool
   9.727 -G1CollectorPolicy::predict_gc_eff(size_t young_length,
   9.728 -                                  size_t so_length,
   9.729 -                                  double base_time_with_so_ms,
   9.730 -                                  size_t init_free_regions,
   9.731 -                                  double target_pause_time_ms,
   9.732 -                                  double* ret_gc_eff) {
   9.733 -  *ret_gc_eff = 0.0;
   9.734 +G1CollectorPolicy::predict_will_fit(size_t young_length,
   9.735 +                                    double base_time_ms,
   9.736 +                                    size_t init_free_regions,
   9.737 +                                    double target_pause_time_ms) {
   9.738  
   9.739    if (young_length >= init_free_regions)
   9.740      // end condition 1: not enough space for the young regions
   9.741      return false;
   9.742  
   9.743    double accum_surv_rate_adj = 0.0;
   9.744 -  if (so_length > 0)
   9.745 -    accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1));
   9.746    double accum_surv_rate =
   9.747      accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
   9.748 +
   9.749    size_t bytes_to_copy =
   9.750      (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   9.751 +
   9.752    double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   9.753 +
   9.754    double young_other_time_ms =
   9.755 -                       predict_young_other_time_ms(young_length - so_length);
   9.756 +                       predict_young_other_time_ms(young_length);
   9.757 +
   9.758    double pause_time_ms =
   9.759 -                   base_time_with_so_ms + copy_time_ms + young_other_time_ms;
   9.760 +                   base_time_ms + copy_time_ms + young_other_time_ms;
   9.761  
   9.762    if (pause_time_ms > target_pause_time_ms)
   9.763      // end condition 2: over the target pause time
   9.764      return false;
   9.765  
   9.766 -  size_t reclaimed_bytes =
   9.767 -    (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy;
   9.768    size_t free_bytes =
   9.769                   (init_free_regions - young_length) * HeapRegion::GrainBytes;
   9.770  
   9.771 @@ -925,9 +604,6 @@
   9.772      return false;
   9.773  
   9.774    // success!
   9.775 -  double gc_eff = (double) reclaimed_bytes / pause_time_ms;
   9.776 -  *ret_gc_eff = gc_eff;
   9.777 -
   9.778    return true;
   9.779  }
   9.780  
   9.781 @@ -944,11 +620,11 @@
   9.782  void G1CollectorPolicy::check_prediction_validity() {
   9.783    guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   9.784  
   9.785 -  size_t rs_lengths = _g1->young_list_sampled_rs_lengths();
   9.786 +  size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   9.787    if (rs_lengths > _rs_lengths_prediction) {
   9.788      // add 10% to avoid having to recalculate often
   9.789      size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   9.790 -    calculate_young_list_target_config(rs_lengths_prediction);
   9.791 +    calculate_young_list_target_length(rs_lengths_prediction);
   9.792    }
   9.793  }
   9.794  
   9.795 @@ -970,7 +646,7 @@
   9.796  
   9.797  #ifndef PRODUCT
   9.798  bool G1CollectorPolicy::verify_young_ages() {
   9.799 -  HeapRegion* head = _g1->young_list_first_region();
   9.800 +  HeapRegion* head = _g1->young_list()->first_region();
   9.801    return
   9.802      verify_young_ages(head, _short_lived_surv_rate_group);
   9.803    // also call verify_young_ages on any additional surv rate groups
   9.804 @@ -1047,7 +723,6 @@
   9.805    _in_marking_window = false;
   9.806    _in_marking_window_im = false;
   9.807  
   9.808 -  _short_lived_surv_rate_group->record_scan_only_prefix(0);
   9.809    _short_lived_surv_rate_group->start_adding_regions();
   9.810    // also call this on any additional surv rate groups
   9.811  
   9.812 @@ -1057,11 +732,10 @@
   9.813    _prev_region_num_tenured = _region_num_tenured;
   9.814  
   9.815    _free_regions_at_end_of_collection = _g1->free_regions();
   9.816 -  _scan_only_regions_at_end_of_collection = 0;
   9.817    // Reset survivors SurvRateGroup.
   9.818    _survivor_surv_rate_group->reset();
   9.819    calculate_young_list_min_length();
   9.820 -  calculate_young_list_target_config();
   9.821 +  calculate_young_list_target_length();
   9.822   }
   9.823  
   9.824  void G1CollectorPolicy::record_before_bytes(size_t bytes) {
   9.825 @@ -1110,8 +784,6 @@
   9.826    for (int i = 0; i < _parallel_gc_threads; ++i) {
   9.827      _par_last_ext_root_scan_times_ms[i] = -666.0;
   9.828      _par_last_mark_stack_scan_times_ms[i] = -666.0;
   9.829 -    _par_last_scan_only_times_ms[i] = -666.0;
   9.830 -    _par_last_scan_only_regions_scanned[i] = -666.0;
   9.831      _par_last_update_rs_start_times_ms[i] = -666.0;
   9.832      _par_last_update_rs_times_ms[i] = -666.0;
   9.833      _par_last_update_rs_processed_buffers[i] = -666.0;
   9.834 @@ -1134,47 +806,13 @@
   9.835    if (in_young_gc_mode())
   9.836      _last_young_gc_full = false;
   9.837  
   9.838 -
   9.839    // do that for any other surv rate groups
   9.840    _short_lived_surv_rate_group->stop_adding_regions();
   9.841 -  size_t short_lived_so_length = _young_list_so_prefix_length;
   9.842 -  _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
   9.843 -  tag_scan_only(short_lived_so_length);
   9.844    _survivors_age_table.clear();
   9.845  
   9.846    assert( verify_young_ages(), "region age verification" );
   9.847  }
   9.848  
   9.849 -void G1CollectorPolicy::tag_scan_only(size_t short_lived_scan_only_length) {
   9.850 -  // done in a way that it can be extended for other surv rate groups too...
   9.851 -
   9.852 -  HeapRegion* head = _g1->young_list_first_region();
   9.853 -  bool finished_short_lived = (short_lived_scan_only_length == 0);
   9.854 -
   9.855 -  if (finished_short_lived)
   9.856 -    return;
   9.857 -
   9.858 -  for (HeapRegion* curr = head;
   9.859 -       curr != NULL;
   9.860 -       curr = curr->get_next_young_region()) {
   9.861 -    SurvRateGroup* surv_rate_group = curr->surv_rate_group();
   9.862 -    int age = curr->age_in_surv_rate_group();
   9.863 -
   9.864 -    if (surv_rate_group == _short_lived_surv_rate_group) {
   9.865 -      if ((size_t)age < short_lived_scan_only_length)
   9.866 -        curr->set_scan_only();
   9.867 -      else
   9.868 -        finished_short_lived = true;
   9.869 -    }
   9.870 -
   9.871 -
   9.872 -    if (finished_short_lived)
   9.873 -      return;
   9.874 -  }
   9.875 -
   9.876 -  guarantee( false, "we should never reach here" );
   9.877 -}
   9.878 -
   9.879  void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
   9.880    _mark_closure_time_ms = mark_closure_time_ms;
   9.881  }
   9.882 @@ -1268,7 +906,7 @@
   9.883      _last_full_young_gc = true;
   9.884      _in_marking_window = false;
   9.885      if (adaptive_young_list_length())
   9.886 -      calculate_young_list_target_config();
   9.887 +      calculate_young_list_target_length();
   9.888    }
   9.889  }
   9.890  
   9.891 @@ -1503,6 +1141,7 @@
   9.892    size_t freed_bytes =
   9.893      _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
   9.894    size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
   9.895 +
   9.896    double survival_fraction =
   9.897      (double)surviving_bytes/
   9.898      (double)_collection_set_bytes_used_before;
   9.899 @@ -1590,9 +1229,6 @@
   9.900  
   9.901    double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
   9.902    double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
   9.903 -  double scan_only_time = avg_value(_par_last_scan_only_times_ms);
   9.904 -  double scan_only_regions_scanned =
   9.905 -    sum_of_values(_par_last_scan_only_regions_scanned);
   9.906    double update_rs_time = avg_value(_par_last_update_rs_times_ms);
   9.907    double update_rs_processed_buffers =
   9.908      sum_of_values(_par_last_update_rs_processed_buffers);
   9.909 @@ -1602,7 +1238,7 @@
   9.910  
   9.911    double parallel_other_time = _cur_collection_par_time_ms -
   9.912      (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
   9.913 -     scan_only_time + scan_rs_time + obj_copy_time + termination_time);
   9.914 +     scan_rs_time + obj_copy_time + termination_time);
   9.915    if (update_stats) {
   9.916      MainBodySummary* body_summary = summary->main_body_summary();
   9.917      guarantee(body_summary != NULL, "should not be null!");
   9.918 @@ -1613,7 +1249,6 @@
   9.919        body_summary->record_satb_drain_time_ms(0.0);
   9.920      body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
   9.921      body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
   9.922 -    body_summary->record_scan_only_time_ms(scan_only_time);
   9.923      body_summary->record_update_rs_time_ms(update_rs_time);
   9.924      body_summary->record_scan_rs_time_ms(scan_rs_time);
   9.925      body_summary->record_obj_copy_time_ms(obj_copy_time);
   9.926 @@ -1667,7 +1302,7 @@
   9.927      else
   9.928        other_time_ms -=
   9.929          update_rs_time +
   9.930 -        ext_root_scan_time + mark_stack_scan_time + scan_only_time +
   9.931 +        ext_root_scan_time + mark_stack_scan_time +
   9.932          scan_rs_time + obj_copy_time;
   9.933    }
   9.934  
   9.935 @@ -1692,9 +1327,6 @@
   9.936                            _par_last_update_rs_processed_buffers, true);
   9.937          print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
   9.938          print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
   9.939 -        print_par_stats(2, "Scan-Only Scanning", _par_last_scan_only_times_ms);
   9.940 -        print_par_buffers(3, "Scan-Only Regions",
   9.941 -                          _par_last_scan_only_regions_scanned, true);
   9.942          print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
   9.943          print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
   9.944          print_par_stats(2, "Termination", _par_last_termination_times_ms);
   9.945 @@ -1706,7 +1338,6 @@
   9.946                      (int)update_rs_processed_buffers);
   9.947          print_stats(1, "Ext Root Scanning", ext_root_scan_time);
   9.948          print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
   9.949 -        print_stats(1, "Scan-Only Scanning", scan_only_time);
   9.950          print_stats(1, "Scan RS", scan_rs_time);
   9.951          print_stats(1, "Object Copying", obj_copy_time);
   9.952        }
   9.953 @@ -1721,6 +1352,8 @@
   9.954      }
   9.955  #endif
   9.956      print_stats(1, "Other", other_time_ms);
   9.957 +    print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
   9.958 +
   9.959      for (int i = 0; i < _aux_num; ++i) {
   9.960        if (_cur_aux_times_set[i]) {
   9.961          char buffer[96];
   9.962 @@ -1806,16 +1439,6 @@
   9.963        _cost_per_card_ms_seq->add(cost_per_card_ms);
   9.964      }
   9.965  
   9.966 -    double cost_per_scan_only_region_ms = 0.0;
   9.967 -    if (scan_only_regions_scanned > 0.0) {
   9.968 -      cost_per_scan_only_region_ms =
   9.969 -        scan_only_time / scan_only_regions_scanned;
   9.970 -      if (_in_marking_window_im)
   9.971 -        _cost_per_scan_only_region_ms_during_cm_seq->add(cost_per_scan_only_region_ms);
   9.972 -      else
   9.973 -        _cost_per_scan_only_region_ms_seq->add(cost_per_scan_only_region_ms);
   9.974 -    }
   9.975 -
   9.976      size_t cards_scanned = _g1->cards_scanned();
   9.977  
   9.978      double cost_per_entry_ms = 0.0;
   9.979 @@ -1851,7 +1474,7 @@
   9.980      }
   9.981  
   9.982      double all_other_time_ms = pause_time_ms -
   9.983 -      (update_rs_time + scan_only_time + scan_rs_time + obj_copy_time +
   9.984 +      (update_rs_time + scan_rs_time + obj_copy_time +
   9.985         _mark_closure_time_ms + termination_time);
   9.986  
   9.987      double young_other_time_ms = 0.0;
   9.988 @@ -1898,11 +1521,10 @@
   9.989      if (PREDICTIONS_VERBOSE) {
   9.990        gclog_or_tty->print_cr("");
   9.991        gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
   9.992 -                    "REGIONS %d %d %d %d "
   9.993 +                    "REGIONS %d %d %d "
   9.994                      "PENDING_CARDS %d %d "
   9.995                      "CARDS_SCANNED %d %d "
   9.996                      "RS_LENGTHS %d %d "
   9.997 -                    "SCAN_ONLY_SCAN %1.6lf %1.6lf "
   9.998                      "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
   9.999                      "SURVIVAL_RATIO %1.6lf %1.6lf "
  9.1000                      "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
  9.1001 @@ -1915,12 +1537,10 @@
  9.1002                      (last_pause_included_initial_mark) ? 1 : 0,
  9.1003                      _recorded_region_num,
  9.1004                      _recorded_young_regions,
  9.1005 -                    _recorded_scan_only_regions,
  9.1006                      _recorded_non_young_regions,
  9.1007                      _predicted_pending_cards, _pending_cards,
  9.1008                      _predicted_cards_scanned, cards_scanned,
  9.1009                      _predicted_rs_lengths, _max_rs_lengths,
  9.1010 -                    _predicted_scan_only_scan_time_ms, scan_only_time,
  9.1011                      _predicted_rs_update_time_ms, update_rs_time,
  9.1012                      _predicted_rs_scan_time_ms, scan_rs_time,
  9.1013                      _predicted_survival_ratio, survival_ratio,
  9.1014 @@ -1945,14 +1565,12 @@
  9.1015    _in_marking_window = new_in_marking_window;
  9.1016    _in_marking_window_im = new_in_marking_window_im;
  9.1017    _free_regions_at_end_of_collection = _g1->free_regions();
  9.1018 -  _scan_only_regions_at_end_of_collection = _g1->young_list_length();
  9.1019    calculate_young_list_min_length();
  9.1020 -  calculate_young_list_target_config();
  9.1021 +  calculate_young_list_target_length();
  9.1022  
  9.1023    // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  9.1024    double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  9.1025    adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  9.1026 -
  9.1027    // </NEW PREDICTION>
  9.1028  
  9.1029    _target_pause_time_ms = -1.0;
  9.1030 @@ -2007,13 +1625,13 @@
  9.1031    guarantee( adjustment == 0 || adjustment == 1, "invariant" );
  9.1032  
  9.1033    G1CollectedHeap* g1h = G1CollectedHeap::heap();
  9.1034 -  size_t young_num = g1h->young_list_length();
  9.1035 +  size_t young_num = g1h->young_list()->length();
  9.1036    if (young_num == 0)
  9.1037      return 0.0;
  9.1038  
  9.1039    young_num += adjustment;
  9.1040    size_t pending_cards = predict_pending_cards();
  9.1041 -  size_t rs_lengths = g1h->young_list_sampled_rs_lengths() +
  9.1042 +  size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
  9.1043                        predict_rs_length_diff();
  9.1044    size_t card_num;
  9.1045    if (full_young_gcs())
  9.1046 @@ -2097,31 +1715,22 @@
  9.1047  void
  9.1048  G1CollectorPolicy::start_recording_regions() {
  9.1049    _recorded_rs_lengths            = 0;
  9.1050 -  _recorded_scan_only_regions     = 0;
  9.1051    _recorded_young_regions         = 0;
  9.1052    _recorded_non_young_regions     = 0;
  9.1053  
  9.1054  #if PREDICTIONS_VERBOSE
  9.1055 -  _predicted_rs_lengths           = 0;
  9.1056 -  _predicted_cards_scanned        = 0;
  9.1057 -
  9.1058    _recorded_marked_bytes          = 0;
  9.1059    _recorded_young_bytes           = 0;
  9.1060    _predicted_bytes_to_copy        = 0;
  9.1061 +  _predicted_rs_lengths           = 0;
  9.1062 +  _predicted_cards_scanned        = 0;
  9.1063  #endif // PREDICTIONS_VERBOSE
  9.1064  }
  9.1065  
  9.1066  void
  9.1067 -G1CollectorPolicy::record_cset_region(HeapRegion* hr, bool young) {
  9.1068 -  if (young) {
  9.1069 -    ++_recorded_young_regions;
  9.1070 -  } else {
  9.1071 -    ++_recorded_non_young_regions;
  9.1072 -  }
  9.1073 +G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
  9.1074  #if PREDICTIONS_VERBOSE
  9.1075 -  if (young) {
  9.1076 -    _recorded_young_bytes += hr->used();
  9.1077 -  } else {
  9.1078 +  if (!young) {
  9.1079      _recorded_marked_bytes += hr->max_live_bytes();
  9.1080    }
  9.1081    _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
  9.1082 @@ -2132,12 +1741,37 @@
  9.1083  }
  9.1084  
  9.1085  void
  9.1086 -G1CollectorPolicy::record_scan_only_regions(size_t scan_only_length) {
  9.1087 -  _recorded_scan_only_regions = scan_only_length;
  9.1088 +G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
  9.1089 +  assert(!hr->is_young(), "should not call this");
  9.1090 +  ++_recorded_non_young_regions;
  9.1091 +  record_cset_region_info(hr, false);
  9.1092 +}
  9.1093 +
  9.1094 +void
  9.1095 +G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
  9.1096 +  _recorded_young_regions = n_regions;
  9.1097 +}
  9.1098 +
  9.1099 +void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
  9.1100 +#if PREDICTIONS_VERBOSE
  9.1101 +  _recorded_young_bytes = bytes;
  9.1102 +#endif // PREDICTIONS_VERBOSE
  9.1103 +}
  9.1104 +
  9.1105 +void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  9.1106 +  _recorded_rs_lengths = rs_lengths;
  9.1107 +}
  9.1108 +
  9.1109 +void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
  9.1110 +  _predicted_bytes_to_copy = bytes;
  9.1111  }
  9.1112  
  9.1113  void
  9.1114  G1CollectorPolicy::end_recording_regions() {
  9.1115 +  // The _predicted_pause_time_ms field is referenced in code
  9.1116 +  // not under PREDICTIONS_VERBOSE. Let's initialize it.
  9.1117 +  _predicted_pause_time_ms = -1.0;
  9.1118 +
  9.1119  #if PREDICTIONS_VERBOSE
  9.1120    _predicted_pending_cards = predict_pending_cards();
  9.1121    _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
  9.1122 @@ -2148,8 +1782,6 @@
  9.1123        predict_non_young_card_num(_predicted_rs_lengths);
  9.1124    _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
  9.1125  
  9.1126 -  _predicted_scan_only_scan_time_ms =
  9.1127 -    predict_scan_only_time_ms(_recorded_scan_only_regions);
  9.1128    _predicted_rs_update_time_ms =
  9.1129      predict_rs_update_time_ms(_g1->pending_card_num());
  9.1130    _predicted_rs_scan_time_ms =
  9.1131 @@ -2164,7 +1796,6 @@
  9.1132      predict_non_young_other_time_ms(_recorded_non_young_regions);
  9.1133  
  9.1134    _predicted_pause_time_ms =
  9.1135 -    _predicted_scan_only_scan_time_ms +
  9.1136      _predicted_rs_update_time_ms +
  9.1137      _predicted_rs_scan_time_ms +
  9.1138      _predicted_object_copy_time_ms +
  9.1139 @@ -2454,8 +2085,6 @@
  9.1140                        body_summary->get_ext_root_scan_seq());
  9.1141          print_summary(2, "Mark Stack Scanning",
  9.1142                        body_summary->get_mark_stack_scan_seq());
  9.1143 -        print_summary(2, "Scan-Only Scanning",
  9.1144 -                      body_summary->get_scan_only_seq());
  9.1145          print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
  9.1146          print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
  9.1147          print_summary(2, "Termination", body_summary->get_termination_seq());
  9.1148 @@ -2465,7 +2094,6 @@
  9.1149              body_summary->get_update_rs_seq(),
  9.1150              body_summary->get_ext_root_scan_seq(),
  9.1151              body_summary->get_mark_stack_scan_seq(),
  9.1152 -            body_summary->get_scan_only_seq(),
  9.1153              body_summary->get_scan_rs_seq(),
  9.1154              body_summary->get_obj_copy_seq(),
  9.1155              body_summary->get_termination_seq()
  9.1156 @@ -2483,8 +2111,6 @@
  9.1157                        body_summary->get_ext_root_scan_seq());
  9.1158          print_summary(1, "Mark Stack Scanning",
  9.1159                        body_summary->get_mark_stack_scan_seq());
  9.1160 -        print_summary(1, "Scan-Only Scanning",
  9.1161 -                      body_summary->get_scan_only_seq());
  9.1162          print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
  9.1163          print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
  9.1164        }
  9.1165 @@ -2510,7 +2136,6 @@
  9.1166              body_summary->get_update_rs_seq(),
  9.1167              body_summary->get_ext_root_scan_seq(),
  9.1168              body_summary->get_mark_stack_scan_seq(),
  9.1169 -            body_summary->get_scan_only_seq(),
  9.1170              body_summary->get_scan_rs_seq(),
  9.1171              body_summary->get_obj_copy_seq()
  9.1172            };
  9.1173 @@ -2604,7 +2229,7 @@
  9.1174  G1CollectorPolicy::should_add_next_region_to_young_list() {
  9.1175    assert(in_young_gc_mode(), "should be in young GC mode");
  9.1176    bool ret;
  9.1177 -  size_t young_list_length = _g1->young_list_length();
  9.1178 +  size_t young_list_length = _g1->young_list()->length();
  9.1179    size_t young_list_max_length = _young_list_target_length;
  9.1180    if (G1FixedEdenSize) {
  9.1181      young_list_max_length -= _max_survivor_regions;
  9.1182 @@ -2667,7 +2292,7 @@
  9.1183    assert(_g1->regions_accounted_for(), "Region leakage!");
  9.1184    double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
  9.1185  
  9.1186 -  size_t young_list_length = _g1->young_list_length();
  9.1187 +  size_t young_list_length = _g1->young_list()->length();
  9.1188    size_t young_list_max_length = _young_list_target_length;
  9.1189    if (G1FixedEdenSize) {
  9.1190      young_list_max_length -= _max_survivor_regions;
  9.1191 @@ -2676,7 +2301,7 @@
  9.1192  
  9.1193    if (in_young_gc_mode()) {
  9.1194      if (reached_target_length) {
  9.1195 -      assert( young_list_length > 0 && _g1->young_list_length() > 0,
  9.1196 +      assert( young_list_length > 0 && _g1->young_list()->length() > 0,
  9.1197                "invariant" );
  9.1198        _target_pause_time_ms = max_pause_time_ms;
  9.1199        return true;
  9.1200 @@ -2937,22 +2562,24 @@
  9.1201    }
  9.1202  }
  9.1203  
  9.1204 -// Add the heap region to the collection set and return the conservative
  9.1205 -// estimate of the number of live bytes.
  9.1206 +// Add the heap region at the head of the non-incremental collection set
  9.1207  void G1CollectorPolicy::
  9.1208  add_to_collection_set(HeapRegion* hr) {
  9.1209 +  assert(_inc_cset_build_state == Active, "Precondition");
  9.1210 +  assert(!hr->is_young(), "non-incremental add of young region");
  9.1211 +
  9.1212    if (G1PrintHeapRegions) {
  9.1213 -    gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], "
  9.1214 -                  "top "PTR_FORMAT", young %s",
  9.1215 -                  hr->hrs_index(), hr->bottom(), hr->end(),
  9.1216 -                  hr->top(), (hr->is_young()) ? "YES" : "NO");
  9.1217 +    gclog_or_tty->print_cr("added region to cset "
  9.1218 +                           "%d:["PTR_FORMAT", "PTR_FORMAT"], "
  9.1219 +                           "top "PTR_FORMAT", %s",
  9.1220 +                           hr->hrs_index(), hr->bottom(), hr->end(),
  9.1221 +                           hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
  9.1222    }
  9.1223  
  9.1224    if (_g1->mark_in_progress())
  9.1225      _g1->concurrent_mark()->registerCSetRegion(hr);
  9.1226  
  9.1227 -  assert(!hr->in_collection_set(),
  9.1228 -              "should not already be in the CSet");
  9.1229 +  assert(!hr->in_collection_set(), "should not already be in the CSet");
  9.1230    hr->set_in_collection_set(true);
  9.1231    hr->set_next_in_collection_set(_collection_set);
  9.1232    _collection_set = hr;
  9.1233 @@ -2961,10 +2588,230 @@
  9.1234    _g1->register_region_with_in_cset_fast_test(hr);
  9.1235  }
  9.1236  
  9.1237 -void
  9.1238 -G1CollectorPolicy_BestRegionsFirst::
  9.1239 -choose_collection_set() {
  9.1240 -  double non_young_start_time_sec;
  9.1241 +// Initialize the per-collection-set information
  9.1242 +void G1CollectorPolicy::start_incremental_cset_building() {
  9.1243 +  assert(_inc_cset_build_state == Inactive, "Precondition");
  9.1244 +
  9.1245 +  _inc_cset_head = NULL;
  9.1246 +  _inc_cset_tail = NULL;
  9.1247 +  _inc_cset_size = 0;
  9.1248 +  _inc_cset_bytes_used_before = 0;
  9.1249 +
  9.1250 +  if (in_young_gc_mode()) {
  9.1251 +    _inc_cset_young_index = 0;
  9.1252 +  }
  9.1253 +
  9.1254 +  _inc_cset_max_finger = 0;
  9.1255 +  _inc_cset_recorded_young_bytes = 0;
  9.1256 +  _inc_cset_recorded_rs_lengths = 0;
  9.1257 +  _inc_cset_predicted_elapsed_time_ms = 0;
  9.1258 +  _inc_cset_predicted_bytes_to_copy = 0;
  9.1259 +  _inc_cset_build_state = Active;
  9.1260 +}
  9.1261 +
  9.1262 +void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  9.1263 +  // This routine is used when:
  9.1264 +  // * adding survivor regions to the incremental cset at the end of an
  9.1265 +  //   evacuation pause,
  9.1266 +  // * adding the current allocation region to the incremental cset
  9.1267 +  //   when it is retired, and
  9.1268 +  // * updating existing policy information for a region in the
  9.1269 +  //   incremental cset via young list RSet sampling.
  9.1270 +  // Therefore this routine may be called at a safepoint by the
  9.1271 +  // VM thread, or in-between safepoints by mutator threads (when
  9.1272 +  // retiring the current allocation region) or a concurrent
  9.1273 +  // refine thread (RSet sampling).
  9.1274 +
  9.1275 +  double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  9.1276 +  size_t used_bytes = hr->used();
  9.1277 +
  9.1278 +  _inc_cset_recorded_rs_lengths += rs_length;
  9.1279 +  _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  9.1280 +
  9.1281 +  _inc_cset_bytes_used_before += used_bytes;
  9.1282 +
  9.1283 +  // Cache the values we have added to the aggregated informtion
  9.1284 +  // in the heap region in case we have to remove this region from
  9.1285 +  // the incremental collection set, or it is updated by the
  9.1286 +  // rset sampling code
  9.1287 +  hr->set_recorded_rs_length(rs_length);
  9.1288 +  hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  9.1289 +
  9.1290 +#if PREDICTIONS_VERBOSE
  9.1291 +  size_t bytes_to_copy = predict_bytes_to_copy(hr);
  9.1292 +  _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
  9.1293 +
  9.1294 +  // Record the number of bytes used in this region
  9.1295 +  _inc_cset_recorded_young_bytes += used_bytes;
  9.1296 +
  9.1297 +  // Cache the values we have added to the aggregated informtion
  9.1298 +  // in the heap region in case we have to remove this region from
  9.1299 +  // the incremental collection set, or it is updated by the
  9.1300 +  // rset sampling code
  9.1301 +  hr->set_predicted_bytes_to_copy(bytes_to_copy);
  9.1302 +#endif // PREDICTIONS_VERBOSE
  9.1303 +}
  9.1304 +
  9.1305 +void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
  9.1306 +  // This routine is currently only called as part of the updating of
  9.1307 +  // existing policy information for regions in the incremental cset that
  9.1308 +  // is performed by the concurrent refine thread(s) as part of young list
  9.1309 +  // RSet sampling. Therefore we should not be at a safepoint.
  9.1310 +
  9.1311 +  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  9.1312 +  assert(hr->is_young(), "it should be");
  9.1313 +
  9.1314 +  size_t used_bytes = hr->used();
  9.1315 +  size_t old_rs_length = hr->recorded_rs_length();
  9.1316 +  double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  9.1317 +
  9.1318 +  // Subtract the old recorded/predicted policy information for
  9.1319 +  // the given heap region from the collection set info.
  9.1320 +  _inc_cset_recorded_rs_lengths -= old_rs_length;
  9.1321 +  _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
  9.1322 +
  9.1323 +  _inc_cset_bytes_used_before -= used_bytes;
  9.1324 +
  9.1325 +  // Clear the values cached in the heap region
  9.1326 +  hr->set_recorded_rs_length(0);
  9.1327 +  hr->set_predicted_elapsed_time_ms(0);
  9.1328 +
  9.1329 +#if PREDICTIONS_VERBOSE
  9.1330 +  size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
  9.1331 +  _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
  9.1332 +
  9.1333 +  // Subtract the number of bytes used in this region
  9.1334 +  _inc_cset_recorded_young_bytes -= used_bytes;
  9.1335 +
  9.1336 +  // Clear the values cached in the heap region
  9.1337 +  hr->set_predicted_bytes_to_copy(0);
  9.1338 +#endif // PREDICTIONS_VERBOSE
  9.1339 +}
  9.1340 +
  9.1341 +void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
  9.1342 +  // Update the collection set information that is dependent on the new RS length
  9.1343 +  assert(hr->is_young(), "Precondition");
  9.1344 +
  9.1345 +  remove_from_incremental_cset_info(hr);
  9.1346 +  add_to_incremental_cset_info(hr, new_rs_length);
  9.1347 +}
  9.1348 +
  9.1349 +void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  9.1350 +  assert( hr->is_young(), "invariant");
  9.1351 +  assert( hr->young_index_in_cset() == -1, "invariant" );
  9.1352 +  assert(_inc_cset_build_state == Active, "Precondition");
  9.1353 +
  9.1354 +  // We need to clear and set the cached recorded/cached collection set
  9.1355 +  // information in the heap region here (before the region gets added
  9.1356 +  // to the collection set). An individual heap region's cached values
  9.1357 +  // are calculated, aggregated with the policy collection set info,
  9.1358 +  // and cached in the heap region here (initially) and (subsequently)
  9.1359 +  // by the Young List sampling code.
  9.1360 +
  9.1361 +  size_t rs_length = hr->rem_set()->occupied();
  9.1362 +  add_to_incremental_cset_info(hr, rs_length);
  9.1363 +
  9.1364 +  HeapWord* hr_end = hr->end();
  9.1365 +  _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  9.1366 +
  9.1367 +  assert(!hr->in_collection_set(), "invariant");
  9.1368 +  hr->set_in_collection_set(true);
  9.1369 +  assert( hr->next_in_collection_set() == NULL, "invariant");
  9.1370 +
  9.1371 +  _inc_cset_size++;
  9.1372 +  _g1->register_region_with_in_cset_fast_test(hr);
  9.1373 +
  9.1374 +  hr->set_young_index_in_cset((int) _inc_cset_young_index);
  9.1375 +  ++_inc_cset_young_index;
  9.1376 +}
  9.1377 +
  9.1378 +// Add the region at the RHS of the incremental cset
  9.1379 +void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  9.1380 +  // We should only ever be appending survivors at the end of a pause
  9.1381 +  assert( hr->is_survivor(), "Logic");
  9.1382 +
  9.1383 +  // Do the 'common' stuff
  9.1384 +  add_region_to_incremental_cset_common(hr);
  9.1385 +
  9.1386 +  // Now add the region at the right hand side
  9.1387 +  if (_inc_cset_tail == NULL) {
  9.1388 +    assert(_inc_cset_head == NULL, "invariant");
  9.1389 +    _inc_cset_head = hr;
  9.1390 +  } else {
  9.1391 +    _inc_cset_tail->set_next_in_collection_set(hr);
  9.1392 +  }
  9.1393 +  _inc_cset_tail = hr;
  9.1394 +
  9.1395 +  if (G1PrintHeapRegions) {
  9.1396 +    gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
  9.1397 +                  "%d:["PTR_FORMAT", "PTR_FORMAT"], "
  9.1398 +                  "top "PTR_FORMAT", young %s",
  9.1399 +                  hr->hrs_index(), hr->bottom(), hr->end(),
  9.1400 +                  hr->top(), (hr->is_young()) ? "YES" : "NO");
  9.1401 +  }
  9.1402 +}
  9.1403 +
  9.1404 +// Add the region to the LHS of the incremental cset
  9.1405 +void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  9.1406 +  // Survivors should be added to the RHS at the end of a pause
  9.1407 +  assert(!hr->is_survivor(), "Logic");
  9.1408 +
  9.1409 +  // Do the 'common' stuff
  9.1410 +  add_region_to_incremental_cset_common(hr);
  9.1411 +
  9.1412 +  // Add the region at the left hand side
  9.1413 +  hr->set_next_in_collection_set(_inc_cset_head);
  9.1414 +  if (_inc_cset_head == NULL) {
  9.1415 +    assert(_inc_cset_tail == NULL, "Invariant");
  9.1416 +    _inc_cset_tail = hr;
  9.1417 +  }
  9.1418 +  _inc_cset_head = hr;
  9.1419 +
  9.1420 +  if (G1PrintHeapRegions) {
  9.1421 +    gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
  9.1422 +                  "%d:["PTR_FORMAT", "PTR_FORMAT"], "
  9.1423 +                  "top "PTR_FORMAT", young %s",
  9.1424 +                  hr->hrs_index(), hr->bottom(), hr->end(),
  9.1425 +                  hr->top(), (hr->is_young()) ? "YES" : "NO");
  9.1426 +  }
  9.1427 +}
  9.1428 +
  9.1429 +#ifndef PRODUCT
  9.1430 +void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  9.1431 +  assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  9.1432 +
  9.1433 +  st->print_cr("\nCollection_set:");
  9.1434 +  HeapRegion* csr = list_head;
  9.1435 +  while (csr != NULL) {
  9.1436 +    HeapRegion* next = csr->next_in_collection_set();
  9.1437 +    assert(csr->in_collection_set(), "bad CS");
  9.1438 +    st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
  9.1439 +                 "age: %4d, y: %d, surv: %d",
  9.1440 +                        csr->bottom(), csr->end(),
  9.1441 +                        csr->top(),
  9.1442 +                        csr->prev_top_at_mark_start(),
  9.1443 +                        csr->next_top_at_mark_start(),
  9.1444 +                        csr->top_at_conc_mark_count(),
  9.1445 +                        csr->age_in_surv_rate_group_cond(),
  9.1446 +                        csr->is_young(),
  9.1447 +                        csr->is_survivor());
  9.1448 +    csr = next;
  9.1449 +  }
  9.1450 +}
  9.1451 +#endif // !PRODUCT
  9.1452 +
  9.1453 +bool
  9.1454 +G1CollectorPolicy_BestRegionsFirst::choose_collection_set() {
  9.1455 +  // Set this here - in case we're not doing young collections.
  9.1456 +  double non_young_start_time_sec = os::elapsedTime();
  9.1457 +
  9.1458 +  // The result that this routine will return. This will be set to
  9.1459 +  // false if:
  9.1460 +  // * we're doing a young or partially young collection and we
  9.1461 +  //   have added the youg regions to collection set, or
  9.1462 +  // * we add old regions to the collection set.
  9.1463 +  bool abandon_collection = true;
  9.1464 +
  9.1465    start_recording_regions();
  9.1466  
  9.1467    guarantee(_target_pause_time_ms > -1.0
  9.1468 @@ -3017,47 +2864,79 @@
  9.1469  
  9.1470      if (G1PolicyVerbose > 0) {
  9.1471        gclog_or_tty->print_cr("Adding %d young regions to the CSet",
  9.1472 -                    _g1->young_list_length());
  9.1473 +                    _g1->young_list()->length());
  9.1474      }
  9.1475 +
  9.1476      _young_cset_length  = 0;
  9.1477      _last_young_gc_full = full_young_gcs() ? true : false;
  9.1478 +
  9.1479      if (_last_young_gc_full)
  9.1480        ++_full_young_pause_num;
  9.1481      else
  9.1482        ++_partial_young_pause_num;
  9.1483 -    hr = _g1->pop_region_from_young_list();
  9.1484 +
  9.1485 +    // The young list is laid with the survivor regions from the previous
  9.1486 +    // pause are appended to the RHS of the young list, i.e.
  9.1487 +    //   [Newly Young Regions ++ Survivors from last pause].
  9.1488 +
  9.1489 +    hr = _g1->young_list()->first_survivor_region();
  9.1490      while (hr != NULL) {
  9.1491 -
  9.1492 -      assert( hr->young_index_in_cset() == -1, "invariant" );
  9.1493 -      assert( hr->age_in_surv_rate_group() != -1, "invariant" );
  9.1494 -      hr->set_young_index_in_cset((int) _young_cset_length);
  9.1495 -
  9.1496 -      ++_young_cset_length;
  9.1497 -      double predicted_time_ms = predict_region_elapsed_time_ms(hr, true);
  9.1498 -      time_remaining_ms -= predicted_time_ms;
  9.1499 -      predicted_pause_time_ms += predicted_time_ms;
  9.1500 -      assert(!hr->in_collection_set(), "invariant");
  9.1501 -      add_to_collection_set(hr);
  9.1502 -      record_cset_region(hr, true);
  9.1503 -      max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
  9.1504 -      if (G1PolicyVerbose > 0) {
  9.1505 -        gclog_or_tty->print_cr("  Added [" PTR_FORMAT ", " PTR_FORMAT") to CS.",
  9.1506 -                      hr->bottom(), hr->end());
  9.1507 -        gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
  9.1508 -                      max_live_bytes/K);
  9.1509 -      }
  9.1510 -      hr = _g1->pop_region_from_young_list();
  9.1511 +      assert(hr->is_survivor(), "badly formed young list");
  9.1512 +      hr->set_young();
  9.1513 +      hr = hr->get_next_young_region();
  9.1514      }
  9.1515  
  9.1516 -    record_scan_only_regions(_g1->young_list_scan_only_length());
  9.1517 +    // Clear the fields that point to the survivor list - they are
  9.1518 +    // all young now.
  9.1519 +    _g1->young_list()->clear_survivors();
  9.1520 +
  9.1521 +    if (_g1->mark_in_progress())
  9.1522 +      _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
  9.1523 +
  9.1524 +    _young_cset_length = _inc_cset_young_index;
  9.1525 +    _collection_set = _inc_cset_head;
  9.1526 +    _collection_set_size = _inc_cset_size;
  9.1527 +    _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  9.1528 +
  9.1529 +    // For young regions in the collection set, we assume the worst
  9.1530 +    // case of complete survival
  9.1531 +    max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
  9.1532 +
  9.1533 +    time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
  9.1534 +    predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  9.1535 +
  9.1536 +    // The number of recorded young regions is the incremental
  9.1537 +    // collection set's current size
  9.1538 +    set_recorded_young_regions(_inc_cset_size);
  9.1539 +    set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  9.1540 +    set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
  9.1541 +#if PREDICTIONS_VERBOSE
  9.1542 +    set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
  9.1543 +#endif // PREDICTIONS_VERBOSE
  9.1544 +
  9.1545 +    if (G1PolicyVerbose > 0) {
  9.1546 +      gclog_or_tty->print_cr("  Added " PTR_FORMAT " Young Regions to CS.",
  9.1547 +                             _inc_cset_size);
  9.1548 +      gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
  9.1549 +                            max_live_bytes/K);
  9.1550 +    }
  9.1551 +
  9.1552 +    assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
  9.1553 +    if (_inc_cset_size > 0) {
  9.1554 +      assert(_collection_set != NULL, "Invariant");
  9.1555 +      abandon_collection = false;
  9.1556 +    }
  9.1557  
  9.1558      double young_end_time_sec = os::elapsedTime();
  9.1559      _recorded_young_cset_choice_time_ms =
  9.1560        (young_end_time_sec - young_start_time_sec) * 1000.0;
  9.1561  
  9.1562 -    non_young_start_time_sec = os::elapsedTime();
  9.1563 -
  9.1564 -    if (_young_cset_length > 0 && _last_young_gc_full) {
  9.1565 +    // We are doing young collections so reset this.
  9.1566 +    non_young_start_time_sec = young_end_time_sec;
  9.1567 +
  9.1568 +    // Note we can use either _collection_set_size or
  9.1569 +    // _young_cset_length here
  9.1570 +    if (_collection_set_size > 0 && _last_young_gc_full) {
  9.1571        // don't bother adding more regions...
  9.1572        goto choose_collection_set_end;
  9.1573      }
  9.1574 @@ -3067,6 +2946,11 @@
  9.1575      bool should_continue = true;
  9.1576      NumberSeq seq;
  9.1577      double avg_prediction = 100000000000000000.0; // something very large
  9.1578 +
  9.1579 +    // Save the current size of the collection set to detect
  9.1580 +    // if we actually added any old regions.
  9.1581 +    size_t n_young_regions = _collection_set_size;
  9.1582 +
  9.1583      do {
  9.1584        hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
  9.1585                                                        avg_prediction);
  9.1586 @@ -3075,7 +2959,7 @@
  9.1587          time_remaining_ms -= predicted_time_ms;
  9.1588          predicted_pause_time_ms += predicted_time_ms;
  9.1589          add_to_collection_set(hr);
  9.1590 -        record_cset_region(hr, false);
  9.1591 +        record_non_young_cset_region(hr);
  9.1592          max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
  9.1593          if (G1PolicyVerbose > 0) {
  9.1594            gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
  9.1595 @@ -3093,9 +2977,17 @@
  9.1596      if (!adaptive_young_list_length() &&
  9.1597          _collection_set_size < _young_list_fixed_length)
  9.1598        _should_revert_to_full_young_gcs  = true;
  9.1599 +
  9.1600 +    if (_collection_set_size > n_young_regions) {
  9.1601 +      // We actually added old regions to the collection set
  9.1602 +      // so we are not abandoning this collection.
  9.1603 +      abandon_collection = false;
  9.1604 +    }
  9.1605    }
  9.1606  
  9.1607  choose_collection_set_end:
  9.1608 +  stop_incremental_cset_building();
  9.1609 +
  9.1610    count_CS_bytes_used();
  9.1611  
  9.1612    end_recording_regions();
  9.1613 @@ -3103,6 +2995,8 @@
  9.1614    double non_young_end_time_sec = os::elapsedTime();
  9.1615    _recorded_non_young_cset_choice_time_ms =
  9.1616      (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
  9.1617 +
  9.1618 +  return abandon_collection;
  9.1619  }
  9.1620  
  9.1621  void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
    10.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Apr 21 01:13:15 2010 -0700
    10.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Apr 26 18:01:55 2010 -0400
    10.3 @@ -1,5 +1,5 @@
    10.4  /*
    10.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    10.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    10.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8   *
    10.9   * This code is free software; you can redistribute it and/or modify it
   10.10 @@ -61,7 +61,6 @@
   10.11    define_num_seq(parallel) // parallel only
   10.12      define_num_seq(ext_root_scan)
   10.13      define_num_seq(mark_stack_scan)
   10.14 -    define_num_seq(scan_only)
   10.15      define_num_seq(update_rs)
   10.16      define_num_seq(scan_rs)
   10.17      define_num_seq(scan_new_refs) // Only for temp use; added to
   10.18 @@ -174,8 +173,6 @@
   10.19  
   10.20    double* _par_last_ext_root_scan_times_ms;
   10.21    double* _par_last_mark_stack_scan_times_ms;
   10.22 -  double* _par_last_scan_only_times_ms;
   10.23 -  double* _par_last_scan_only_regions_scanned;
   10.24    double* _par_last_update_rs_start_times_ms;
   10.25    double* _par_last_update_rs_times_ms;
   10.26    double* _par_last_update_rs_processed_buffers;
   10.27 @@ -196,7 +193,6 @@
   10.28    bool _adaptive_young_list_length;
   10.29    size_t _young_list_min_length;
   10.30    size_t _young_list_target_length;
   10.31 -  size_t _young_list_so_prefix_length;
   10.32    size_t _young_list_fixed_length;
   10.33  
   10.34    size_t _young_cset_length;
   10.35 @@ -234,7 +230,6 @@
   10.36    TruncatedSeq* _pending_card_diff_seq;
   10.37    TruncatedSeq* _rs_length_diff_seq;
   10.38    TruncatedSeq* _cost_per_card_ms_seq;
   10.39 -  TruncatedSeq* _cost_per_scan_only_region_ms_seq;
   10.40    TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
   10.41    TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
   10.42    TruncatedSeq* _cost_per_entry_ms_seq;
   10.43 @@ -249,19 +244,16 @@
   10.44    TruncatedSeq* _rs_lengths_seq;
   10.45  
   10.46    TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
   10.47 -  TruncatedSeq* _cost_per_scan_only_region_ms_during_cm_seq;
   10.48  
   10.49    TruncatedSeq* _young_gc_eff_seq;
   10.50  
   10.51    TruncatedSeq* _max_conc_overhead_seq;
   10.52  
   10.53    size_t _recorded_young_regions;
   10.54 -  size_t _recorded_scan_only_regions;
   10.55    size_t _recorded_non_young_regions;
   10.56    size_t _recorded_region_num;
   10.57  
   10.58    size_t _free_regions_at_end_of_collection;
   10.59 -  size_t _scan_only_regions_at_end_of_collection;
   10.60  
   10.61    size_t _recorded_rs_lengths;
   10.62    size_t _max_rs_lengths;
   10.63 @@ -277,7 +269,6 @@
   10.64    double _predicted_survival_ratio;
   10.65    double _predicted_rs_update_time_ms;
   10.66    double _predicted_rs_scan_time_ms;
   10.67 -  double _predicted_scan_only_scan_time_ms;
   10.68    double _predicted_object_copy_time_ms;
   10.69    double _predicted_constant_other_time_ms;
   10.70    double _predicted_young_other_time_ms;
   10.71 @@ -344,8 +335,6 @@
   10.72    bool verify_young_ages();
   10.73  #endif // PRODUCT
   10.74  
   10.75 -  void tag_scan_only(size_t short_lived_scan_only_length);
   10.76 -
   10.77    double get_new_prediction(TruncatedSeq* seq) {
   10.78      return MAX2(seq->davg() + sigma() * seq->dsd(),
   10.79                  seq->davg() * confidence_factor(seq->num()));
   10.80 @@ -431,23 +420,6 @@
   10.81          get_new_prediction(_partially_young_cost_per_entry_ms_seq);
   10.82    }
   10.83  
   10.84 -  double predict_scan_only_time_ms_during_cm(size_t scan_only_region_num) {
   10.85 -    if (_cost_per_scan_only_region_ms_during_cm_seq->num() < 3)
   10.86 -      return 1.5 * (double) scan_only_region_num *
   10.87 -        get_new_prediction(_cost_per_scan_only_region_ms_seq);
   10.88 -    else
   10.89 -      return (double) scan_only_region_num *
   10.90 -        get_new_prediction(_cost_per_scan_only_region_ms_during_cm_seq);
   10.91 -  }
   10.92 -
   10.93 -  double predict_scan_only_time_ms(size_t scan_only_region_num) {
   10.94 -    if (_in_marking_window_im)
   10.95 -      return predict_scan_only_time_ms_during_cm(scan_only_region_num);
   10.96 -    else
   10.97 -      return (double) scan_only_region_num *
   10.98 -        get_new_prediction(_cost_per_scan_only_region_ms_seq);
   10.99 -  }
  10.100 -
  10.101    double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
  10.102      if (_cost_per_byte_ms_during_cm_seq->num() < 3)
  10.103        return 1.1 * (double) bytes_to_copy *
  10.104 @@ -490,24 +462,21 @@
  10.105    size_t predict_bytes_to_copy(HeapRegion* hr);
  10.106    double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
  10.107  
  10.108 -  // for use by: calculate_optimal_so_length(length)
  10.109 -  void predict_gc_eff(size_t young_region_num,
  10.110 -                      size_t so_length,
  10.111 -                      double base_time_ms,
  10.112 -                      double *gc_eff,
  10.113 -                      double *pause_time_ms);
  10.114 -
  10.115 -  // for use by: calculate_young_list_target_config(rs_length)
  10.116 -  bool predict_gc_eff(size_t young_region_num,
  10.117 -                      size_t so_length,
  10.118 -                      double base_time_with_so_ms,
  10.119 -                      size_t init_free_regions,
  10.120 -                      double target_pause_time_ms,
  10.121 -                      double* gc_eff);
  10.122 +    // for use by: calculate_young_list_target_length(rs_length)
  10.123 +  bool predict_will_fit(size_t young_region_num,
  10.124 +                        double base_time_ms,
  10.125 +                        size_t init_free_regions,
  10.126 +                        double target_pause_time_ms);
  10.127  
  10.128    void start_recording_regions();
  10.129 -  void record_cset_region(HeapRegion* hr, bool young);
  10.130 -  void record_scan_only_regions(size_t scan_only_length);
  10.131 +  void record_cset_region_info(HeapRegion* hr, bool young);
  10.132 +  void record_non_young_cset_region(HeapRegion* hr);
  10.133 +
  10.134 +  void set_recorded_young_regions(size_t n_regions);
  10.135 +  void set_recorded_young_bytes(size_t bytes);
  10.136 +  void set_recorded_rs_lengths(size_t rs_lengths);
  10.137 +  void set_predicted_bytes_to_copy(size_t bytes);
  10.138 +
  10.139    void end_recording_regions();
  10.140  
  10.141    void record_vtime_diff_ms(double vtime_diff_ms) {
  10.142 @@ -638,11 +607,74 @@
  10.143    void update_recent_gc_times(double end_time_sec, double elapsed_ms);
  10.144  
  10.145    // The head of the list (via "next_in_collection_set()") representing the
  10.146 -  // current collection set.
  10.147 +  // current collection set. Set from the incrementally built collection
  10.148 +  // set at the start of the pause.
  10.149    HeapRegion* _collection_set;
  10.150 +
  10.151 +  // The number of regions in the collection set. Set from the incrementally
  10.152 +  // built collection set at the start of an evacuation pause.
  10.153    size_t _collection_set_size;
  10.154 +
  10.155 +  // The number of bytes in the collection set before the pause. Set from
  10.156 +  // the incrementally built collection set at the start of an evacuation
  10.157 +  // pause.
  10.158    size_t _collection_set_bytes_used_before;
  10.159  
  10.160 +  // The associated information that is maintained while the incremental
  10.161 +  // collection set is being built with young regions. Used to populate
  10.162 +  // the recorded info for the evacuation pause.
  10.163 +
  10.164 +  enum CSetBuildType {
  10.165 +    Active,             // We are actively building the collection set
  10.166 +    Inactive            // We are not actively building the collection set
  10.167 +  };
  10.168 +
  10.169 +  CSetBuildType _inc_cset_build_state;
  10.170 +
  10.171 +  // The head of the incrementally built collection set.
  10.172 +  HeapRegion* _inc_cset_head;
  10.173 +
  10.174 +  // The tail of the incrementally built collection set.
  10.175 +  HeapRegion* _inc_cset_tail;
  10.176 +
  10.177 +  // The number of regions in the incrementally built collection set.
  10.178 +  // Used to set _collection_set_size at the start of an evacuation
  10.179 +  // pause.
  10.180 +  size_t _inc_cset_size;
  10.181 +
  10.182 +  // Used as the index in the surving young words structure
  10.183 +  // which tracks the amount of space, for each young region,
  10.184 +  // that survives the pause.
  10.185 +  size_t _inc_cset_young_index;
  10.186 +
  10.187 +  // The number of bytes in the incrementally built collection set.
  10.188 +  // Used to set _collection_set_bytes_used_before at the start of
  10.189 +  // an evacuation pause.
  10.190 +  size_t _inc_cset_bytes_used_before;
  10.191 +
  10.192 +  // Used to record the highest end of heap region in collection set
  10.193 +  HeapWord* _inc_cset_max_finger;
  10.194 +
  10.195 +  // The number of recorded used bytes in the young regions
  10.196 +  // of the collection set. This is the sum of the used() bytes
  10.197 +  // of retired young regions in the collection set.
  10.198 +  size_t _inc_cset_recorded_young_bytes;
  10.199 +
  10.200 +  // The RSet lengths recorded for regions in the collection set
  10.201 +  // (updated by the periodic sampling of the regions in the
  10.202 +  // young list/collection set).
  10.203 +  size_t _inc_cset_recorded_rs_lengths;
  10.204 +
  10.205 +  // The predicted elapsed time it will take to collect the regions
  10.206 +  // in the collection set (updated by the periodic sampling of the
  10.207 +  // regions in the young list/collection set).
  10.208 +  double _inc_cset_predicted_elapsed_time_ms;
  10.209 +
  10.210 +  // The predicted bytes to copy for the regions in the collection
  10.211 +  // set (updated by the periodic sampling of the regions in the
  10.212 +  // young list/collection set).
  10.213 +  size_t _inc_cset_predicted_bytes_to_copy;
  10.214 +
  10.215    // Info about marking.
  10.216    int _n_marks; // Sticky at 2, so we know when we've done at least 2.
  10.217  
  10.218 @@ -761,9 +793,8 @@
  10.219    double _mark_closure_time_ms;
  10.220  
  10.221    void   calculate_young_list_min_length();
  10.222 -  void   calculate_young_list_target_config();
  10.223 -  void   calculate_young_list_target_config(size_t rs_lengths);
  10.224 -  size_t calculate_optimal_so_length(size_t young_list_length);
  10.225 +  void   calculate_young_list_target_length();
  10.226 +  void   calculate_young_list_target_length(size_t rs_lengths);
  10.227  
  10.228  public:
  10.229  
  10.230 @@ -868,11 +899,6 @@
  10.231      _par_last_mark_stack_scan_times_ms[worker_i] = ms;
  10.232    }
  10.233  
  10.234 -  void record_scan_only_time(int worker_i, double ms, int n) {
  10.235 -    _par_last_scan_only_times_ms[worker_i] = ms;
  10.236 -    _par_last_scan_only_regions_scanned[worker_i] = (double) n;
  10.237 -  }
  10.238 -
  10.239    void record_satb_drain_time(double ms) {
  10.240      _cur_satb_drain_time_ms = ms;
  10.241      _satb_drain_time_set    = true;
  10.242 @@ -987,20 +1013,67 @@
  10.243    // Choose a new collection set.  Marks the chosen regions as being
  10.244    // "in_collection_set", and links them together.  The head and number of
  10.245    // the collection set are available via access methods.
  10.246 -  virtual void choose_collection_set() = 0;
  10.247 -
  10.248 -  void clear_collection_set() { _collection_set = NULL; }
  10.249 +  virtual bool choose_collection_set() = 0;
  10.250  
  10.251    // The head of the list (via "next_in_collection_set()") representing the
  10.252    // current collection set.
  10.253    HeapRegion* collection_set() { return _collection_set; }
  10.254  
  10.255 +  void clear_collection_set() { _collection_set = NULL; }
  10.256 +
  10.257    // The number of elements in the current collection set.
  10.258    size_t collection_set_size() { return _collection_set_size; }
  10.259  
  10.260    // Add "hr" to the CS.
  10.261    void add_to_collection_set(HeapRegion* hr);
  10.262  
  10.263 +  // Incremental CSet Support
  10.264 +
  10.265 +  // The head of the incrementally built collection set.
  10.266 +  HeapRegion* inc_cset_head() { return _inc_cset_head; }
  10.267 +
  10.268 +  // The tail of the incrementally built collection set.
  10.269 +  HeapRegion* inc_set_tail() { return _inc_cset_tail; }
  10.270 +
  10.271 +  // The number of elements in the incrementally built collection set.
  10.272 +  size_t inc_cset_size() { return _inc_cset_size; }
  10.273 +
  10.274 +  // Initialize incremental collection set info.
  10.275 +  void start_incremental_cset_building();
  10.276 +
  10.277 +  void clear_incremental_cset() {
  10.278 +    _inc_cset_head = NULL;
  10.279 +    _inc_cset_tail = NULL;
  10.280 +  }
  10.281 +
  10.282 +  // Stop adding regions to the incremental collection set
  10.283 +  void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
  10.284 +
  10.285 +  // Add/remove information about hr to the aggregated information
  10.286 +  // for the incrementally built collection set.
  10.287 +  void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
  10.288 +  void remove_from_incremental_cset_info(HeapRegion* hr);
  10.289 +
  10.290 +  // Update information about hr in the aggregated information for
  10.291 +  // the incrementally built collection set.
  10.292 +  void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
  10.293 +
  10.294 +private:
  10.295 +  // Update the incremental cset information when adding a region
  10.296 +  // (should not be called directly).
  10.297 +  void add_region_to_incremental_cset_common(HeapRegion* hr);
  10.298 +
  10.299 +public:
  10.300 +  // Add hr to the LHS of the incremental collection set.
  10.301 +  void add_region_to_incremental_cset_lhs(HeapRegion* hr);
  10.302 +
  10.303 +  // Add hr to the RHS of the incremental collection set.
  10.304 +  void add_region_to_incremental_cset_rhs(HeapRegion* hr);
  10.305 +
  10.306 +#ifndef PRODUCT
  10.307 +  void print_collection_set(HeapRegion* list_head, outputStream* st);
  10.308 +#endif // !PRODUCT
  10.309 +
  10.310    bool initiate_conc_mark_if_possible()       { return _initiate_conc_mark_if_possible;  }
  10.311    void set_initiate_conc_mark_if_possible()   { _initiate_conc_mark_if_possible = true;  }
  10.312    void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
  10.313 @@ -1191,7 +1264,7 @@
  10.314    // If the estimated is less then desirable, resize if possible.
  10.315    void expand_if_possible(size_t numRegions);
  10.316  
  10.317 -  virtual void choose_collection_set();
  10.318 +  virtual bool choose_collection_set();
  10.319    virtual void record_collection_pause_start(double start_time_sec,
  10.320                                               size_t start_used);
  10.321    virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
    11.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Apr 21 01:13:15 2010 -0700
    11.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon Apr 26 18:01:55 2010 -0400
    11.3 @@ -1,5 +1,5 @@
    11.4  /*
    11.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    11.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    11.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    11.8   *
    11.9   * This code is free software; you can redistribute it and/or modify it
   11.10 @@ -31,6 +31,12 @@
   11.11                                        bool clear_all_softrefs) {
   11.12    assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   11.13  
   11.14 +  SharedHeap* sh = SharedHeap::heap();
   11.15 +#ifdef ASSERT
   11.16 +  if (sh->collector_policy()->should_clear_all_soft_refs()) {
   11.17 +    assert(clear_all_softrefs, "Policy should have been checked earler");
   11.18 +  }
   11.19 +#endif
   11.20    // hook up weak ref data so it can be used during Mark-Sweep
   11.21    assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
   11.22    assert(rp != NULL, "should be non-NULL");
   11.23 @@ -44,7 +50,6 @@
   11.24  
   11.25    // Increment the invocation count for the permanent generation, since it is
   11.26    // implicitly collected whenever we do a full mark sweep collection.
   11.27 -  SharedHeap* sh = SharedHeap::heap();
   11.28    sh->perm_gen()->stat_record()->invocations++;
   11.29  
   11.30    bool marked_for_unloading = false;
    12.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Apr 21 01:13:15 2010 -0700
    12.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon Apr 26 18:01:55 2010 -0400
    12.3 @@ -1,5 +1,5 @@
    12.4  /*
    12.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    12.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    12.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.8   *
    12.9   * This code is free software; you can redistribute it and/or modify it
   12.10 @@ -28,9 +28,6 @@
   12.11  
   12.12  #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \
   12.13                                                                              \
   12.14 -  product(intx, G1ParallelGCAllocBufferSize, 8*K,                           \
   12.15 -          "Size of parallel G1 allocation buffers in to-space.")            \
   12.16 -                                                                            \
   12.17    product(intx, G1ConfidencePercent, 50,                                    \
   12.18            "Confidence level for MMU/pause predictions")                     \
   12.19                                                                              \
   12.20 @@ -229,10 +226,6 @@
   12.21            "the number of regions for which we'll print a surv rate "        \
   12.22            "summary.")                                                       \
   12.23                                                                              \
   12.24 -  develop(bool, G1UseScanOnlyPrefix, false,                                 \
   12.25 -          "It determines whether the system will calculate an optimum "     \
   12.26 -          "scan-only set.")                                                 \
   12.27 -                                                                            \
   12.28    product(intx, G1ReservePercent, 10,                                       \
   12.29            "It determines the minimum reserve we should have in the heap "   \
   12.30            "to minimize the probability of promotion failure.")              \
    13.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Apr 21 01:13:15 2010 -0700
    13.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Apr 26 18:01:55 2010 -0400
    13.3 @@ -1,5 +1,5 @@
    13.4  /*
    13.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    13.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    13.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.8   *
    13.9   * This code is free software; you can redistribute it and/or modify it
   13.10 @@ -75,6 +75,16 @@
   13.11    virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   13.12    virtual void do_oop(      oop* p) { do_oop_work(p); }
   13.13  
   13.14 +  void print_object(outputStream* out, oop obj) {
   13.15 +#ifdef PRODUCT
   13.16 +    klassOop k = obj->klass();
   13.17 +    const char* class_name = instanceKlass::cast(k)->external_name();
   13.18 +    out->print_cr("class name %s", class_name);
   13.19 +#else // PRODUCT
   13.20 +    obj->print_on(out);
   13.21 +#endif // PRODUCT
   13.22 +  }
   13.23 +
   13.24    template <class T> void do_oop_work(T* p) {
   13.25      assert(_containing_obj != NULL, "Precondition");
   13.26      assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
   13.27 @@ -90,21 +100,29 @@
   13.28            gclog_or_tty->print_cr("----------");
   13.29          }
   13.30          if (!_g1h->is_in_closed_subset(obj)) {
   13.31 +          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   13.32            gclog_or_tty->print_cr("Field "PTR_FORMAT
   13.33 -                        " of live obj "PTR_FORMAT
   13.34 -                        " points to obj "PTR_FORMAT
   13.35 -                        " not in the heap.",
   13.36 -                        p, (void*) _containing_obj, (void*) obj);
   13.37 +                                 " of live obj "PTR_FORMAT" in region "
   13.38 +                                 "["PTR_FORMAT", "PTR_FORMAT")",
   13.39 +                                 p, (void*) _containing_obj,
   13.40 +                                 from->bottom(), from->end());
   13.41 +          print_object(gclog_or_tty, _containing_obj);
   13.42 +          gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
   13.43 +                                 (void*) obj);
   13.44          } else {
   13.45 +          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
   13.46 +          HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
   13.47            gclog_or_tty->print_cr("Field "PTR_FORMAT
   13.48 -                        " of live obj "PTR_FORMAT
   13.49 -                        " points to dead obj "PTR_FORMAT".",
   13.50 -                        p, (void*) _containing_obj, (void*) obj);
   13.51 +                                 " of live obj "PTR_FORMAT" in region "
   13.52 +                                 "["PTR_FORMAT", "PTR_FORMAT")",
   13.53 +                                 p, (void*) _containing_obj,
   13.54 +                                 from->bottom(), from->end());
   13.55 +          print_object(gclog_or_tty, _containing_obj);
   13.56 +          gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
   13.57 +                                 "["PTR_FORMAT", "PTR_FORMAT")",
   13.58 +                                 (void*) obj, to->bottom(), to->end());
   13.59 +          print_object(gclog_or_tty, obj);
   13.60          }
   13.61 -        gclog_or_tty->print_cr("Live obj:");
   13.62 -        _containing_obj->print_on(gclog_or_tty);
   13.63 -        gclog_or_tty->print_cr("Bad referent:");
   13.64 -        obj->print_on(gclog_or_tty);
   13.65          gclog_or_tty->print_cr("----------");
   13.66          _failures = true;
   13.67          failed = true;
   13.68 @@ -432,7 +450,9 @@
   13.69      _young_type(NotYoung), _next_young_region(NULL),
   13.70      _next_dirty_cards_region(NULL),
   13.71      _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
   13.72 -    _rem_set(NULL), _zfs(NotZeroFilled)
   13.73 +    _rem_set(NULL), _zfs(NotZeroFilled),
   13.74 +    _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
   13.75 +    _predicted_bytes_to_copy(0)
   13.76  {
   13.77    _orig_end = mr.end();
   13.78    // Note that initialize() will set the start of the unmarked area of the
   13.79 @@ -715,7 +735,7 @@
   13.80    else
   13.81      st->print("   ");
   13.82    if (is_young())
   13.83 -    st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y "));
   13.84 +    st->print(is_survivor() ? " SU" : " Y ");
   13.85    else
   13.86      st->print("   ");
   13.87    if (is_empty())
   13.88 @@ -723,6 +743,8 @@
   13.89    else
   13.90      st->print("  ");
   13.91    st->print(" %5d", _gc_time_stamp);
   13.92 +  st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
   13.93 +            prev_top_at_mark_start(), next_top_at_mark_start());
   13.94    G1OffsetTableContigSpace::print_on(st);
   13.95  }
   13.96  
    14.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Apr 21 01:13:15 2010 -0700
    14.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Apr 26 18:01:55 2010 -0400
    14.3 @@ -1,5 +1,5 @@
    14.4  /*
    14.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    14.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    14.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.8   *
    14.9   * This code is free software; you can redistribute it and/or modify it
   14.10 @@ -247,7 +247,6 @@
   14.11  
   14.12    enum YoungType {
   14.13      NotYoung,                   // a region is not young
   14.14 -    ScanOnly,                   // a region is young and scan-only
   14.15      Young,                      // a region is young
   14.16      Survivor                    // a region is young and it contains
   14.17                                  // survivor
   14.18 @@ -292,6 +291,20 @@
   14.19      _young_type = new_type;
   14.20    }
   14.21  
   14.22 +  // Cached attributes used in the collection set policy information
   14.23 +
   14.24 +  // The RSet length that was added to the total value
   14.25 +  // for the collection set.
   14.26 +  size_t _recorded_rs_length;
   14.27 +
   14.28 +  // The predicted elapsed time that was added to total value
   14.29 +  // for the collection set.
   14.30 +  double _predicted_elapsed_time_ms;
   14.31 +
   14.32 +  // The predicted number of bytes to copy that was added to
   14.33 +  // the total value for the collection set.
   14.34 +  size_t _predicted_bytes_to_copy;
   14.35 +
   14.36   public:
   14.37    // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
   14.38    HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
   14.39 @@ -614,7 +627,6 @@
   14.40    // </PREDICTION>
   14.41  
   14.42    bool is_young() const     { return _young_type != NotYoung; }
   14.43 -  bool is_scan_only() const { return _young_type == ScanOnly; }
   14.44    bool is_survivor() const  { return _young_type == Survivor; }
   14.45  
   14.46    int  young_index_in_cset() const { return _young_index_in_cset; }
   14.47 @@ -629,12 +641,6 @@
   14.48      return _surv_rate_group->age_in_group(_age_index);
   14.49    }
   14.50  
   14.51 -  void recalculate_age_in_surv_rate_group() {
   14.52 -    assert( _surv_rate_group != NULL, "pre-condition" );
   14.53 -    assert( _age_index > -1, "pre-condition" );
   14.54 -    _age_index = _surv_rate_group->recalculate_age_index(_age_index);
   14.55 -  }
   14.56 -
   14.57    void record_surv_words_in_group(size_t words_survived) {
   14.58      assert( _surv_rate_group != NULL, "pre-condition" );
   14.59      assert( _age_index > -1, "pre-condition" );
   14.60 @@ -676,8 +682,6 @@
   14.61  
   14.62    void set_young() { set_young_type(Young); }
   14.63  
   14.64 -  void set_scan_only() { set_young_type(ScanOnly); }
   14.65 -
   14.66    void set_survivor() { set_young_type(Survivor); }
   14.67  
   14.68    void set_not_young() { set_young_type(NotYoung); }
   14.69 @@ -775,6 +779,22 @@
   14.70      _zero_filler = NULL;
   14.71    }
   14.72  
   14.73 +  size_t recorded_rs_length() const        { return _recorded_rs_length; }
   14.74 +  double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
   14.75 +  size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
   14.76 +
   14.77 +  void set_recorded_rs_length(size_t rs_length) {
   14.78 +    _recorded_rs_length = rs_length;
   14.79 +  }
   14.80 +
   14.81 +  void set_predicted_elapsed_time_ms(double ms) {
   14.82 +    _predicted_elapsed_time_ms = ms;
   14.83 +  }
   14.84 +
   14.85 +  void set_predicted_bytes_to_copy(size_t bytes) {
   14.86 +    _predicted_bytes_to_copy = bytes;
   14.87 +  }
   14.88 +
   14.89  #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
   14.90    virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
   14.91    SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
    15.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Apr 21 01:13:15 2010 -0700
    15.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Mon Apr 26 18:01:55 2010 -0400
    15.3 @@ -662,8 +662,6 @@
    15.4          prt = PosParPRT::alloc(from_hr);
    15.5        }
    15.6        prt->init(from_hr);
    15.7 -      // Record the outgoing pointer in the from_region's outgoing bitmap.
    15.8 -      from_hr->rem_set()->add_outgoing_reference(hr());
    15.9  
   15.10        PosParPRT* first_prt = _fine_grain_regions[ind];
   15.11        prt->set_next(first_prt);  // XXX Maybe move to init?
   15.12 @@ -1073,11 +1071,7 @@
   15.13  
   15.14  HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
   15.15                                     HeapRegion* hr)
   15.16 -    : _bosa(bosa), _other_regions(hr),
   15.17 -      _outgoing_region_map(G1CollectedHeap::heap()->max_regions(),
   15.18 -                           false /* in-resource-area */),
   15.19 -      _iter_state(Unclaimed)
   15.20 -{}
   15.21 +  : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
   15.22  
   15.23  
   15.24  void HeapRegionRemSet::setup_remset_size() {
   15.25 @@ -1148,30 +1142,11 @@
   15.26    PosParPRT::par_contract_all();
   15.27  }
   15.28  
   15.29 -void HeapRegionRemSet::add_outgoing_reference(HeapRegion* to_hr) {
   15.30 -  _outgoing_region_map.par_at_put(to_hr->hrs_index(), 1);
   15.31 -}
   15.32 -
   15.33  void HeapRegionRemSet::clear() {
   15.34 -  clear_outgoing_entries();
   15.35 -  _outgoing_region_map.clear();
   15.36    _other_regions.clear();
   15.37    assert(occupied() == 0, "Should be clear.");
   15.38  }
   15.39  
   15.40 -void HeapRegionRemSet::clear_outgoing_entries() {
   15.41 -  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   15.42 -  size_t i = _outgoing_region_map.get_next_one_offset(0);
   15.43 -  while (i < _outgoing_region_map.size()) {
   15.44 -    HeapRegion* to_region = g1h->region_at(i);
   15.45 -    if (!to_region->in_collection_set()) {
   15.46 -      to_region->rem_set()->clear_incoming_entry(hr());
   15.47 -    }
   15.48 -    i = _outgoing_region_map.get_next_one_offset(i+1);
   15.49 -  }
   15.50 -}
   15.51 -
   15.52 -
   15.53  void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
   15.54                               BitMap* region_bm, BitMap* card_bm) {
   15.55    _other_regions.scrub(ctbs, region_bm, card_bm);
    16.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Apr 21 01:13:15 2010 -0700
    16.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Mon Apr 26 18:01:55 2010 -0400
    16.3 @@ -179,13 +179,6 @@
    16.4  
    16.5    OtherRegionsTable _other_regions;
    16.6  
    16.7 -  // One set bit for every region that has an entry for this one.
    16.8 -  BitMap _outgoing_region_map;
    16.9 -
   16.10 -  // Clear entries for the current region in any rem sets named in
   16.11 -  // the _outgoing_region_map.
   16.12 -  void clear_outgoing_entries();
   16.13 -
   16.14    enum ParIterState { Unclaimed, Claimed, Complete };
   16.15    volatile ParIterState _iter_state;
   16.16    volatile jlong _iter_claimed;
   16.17 @@ -243,10 +236,6 @@
   16.18      _other_regions.add_reference(from, tid);
   16.19    }
   16.20  
   16.21 -  // Records the fact that the current region contains an outgoing
   16.22 -  // reference into "to_hr".
   16.23 -  void add_outgoing_reference(HeapRegion* to_hr);
   16.24 -
   16.25    // Removes any entries shown by the given bitmaps to contain only dead
   16.26    // objects.
   16.27    void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
    17.1 --- a/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Wed Apr 21 01:13:15 2010 -0700
    17.2 +++ b/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Mon Apr 26 18:01:55 2010 -0400
    17.3 @@ -1,5 +1,5 @@
    17.4  /*
    17.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    17.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    17.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    17.8   *
    17.9   * This code is free software; you can redistribute it and/or modify it
   17.10 @@ -55,7 +55,6 @@
   17.11  void SurvRateGroup::reset()
   17.12  {
   17.13    _all_regions_allocated = 0;
   17.14 -  _scan_only_prefix      = 0;
   17.15    _setup_seq_num         = 0;
   17.16    _stats_arrays_length   = 0;
   17.17    _accum_surv_rate       = 0.0;
   17.18 @@ -74,7 +73,7 @@
   17.19  void
   17.20  SurvRateGroup::start_adding_regions() {
   17.21    _setup_seq_num   = _stats_arrays_length;
   17.22 -  _region_num      = _scan_only_prefix;
   17.23 +  _region_num      = 0;
   17.24    _accum_surv_rate = 0.0;
   17.25  
   17.26  #if 0
   17.27 @@ -164,12 +163,6 @@
   17.28  }
   17.29  
   17.30  void
   17.31 -SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) {
   17.32 -  guarantee( scan_only_prefix <= _region_num, "pre-condition" );
   17.33 -  _scan_only_prefix = scan_only_prefix;
   17.34 -}
   17.35 -
   17.36 -void
   17.37  SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
   17.38    guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
   17.39               "pre-condition" );
   17.40 @@ -218,13 +211,12 @@
   17.41  #ifndef PRODUCT
   17.42  void
   17.43  SurvRateGroup::print() {
   17.44 -  gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)",
   17.45 -                _name, _region_num, _scan_only_prefix);
   17.46 +  gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries)",
   17.47 +                _name, _region_num);
   17.48    for (size_t i = 0; i < _region_num; ++i) {
   17.49 -    gclog_or_tty->print_cr("    age %4d   surv rate %6.2lf %%   pred %6.2lf %%%s",
   17.50 +    gclog_or_tty->print_cr("    age %4d   surv rate %6.2lf %%   pred %6.2lf %%",
   17.51                    i, _surv_rate[i] * 100.0,
   17.52 -                  _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0,
   17.53 -                  (i < _scan_only_prefix) ? " S-O" : "    ");
   17.54 +                  _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0);
   17.55    }
   17.56  }
   17.57  
    18.1 --- a/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Wed Apr 21 01:13:15 2010 -0700
    18.2 +++ b/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Mon Apr 26 18:01:55 2010 -0400
    18.3 @@ -1,5 +1,5 @@
    18.4  /*
    18.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    18.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    18.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    18.8   *
    18.9   * This code is free software; you can redistribute it and/or modify it
   18.10 @@ -41,7 +41,6 @@
   18.11  
   18.12    int _all_regions_allocated;
   18.13    size_t _region_num;
   18.14 -  size_t _scan_only_prefix;
   18.15    size_t _setup_seq_num;
   18.16  
   18.17  public:
   18.18 @@ -51,13 +50,11 @@
   18.19    void reset();
   18.20    void start_adding_regions();
   18.21    void stop_adding_regions();
   18.22 -  void record_scan_only_prefix(size_t scan_only_prefix);
   18.23    void record_surviving_words(int age_in_group, size_t surv_words);
   18.24    void all_surviving_words_recorded(bool propagate);
   18.25    const char* name() { return _name; }
   18.26  
   18.27    size_t region_num() { return _region_num; }
   18.28 -  size_t scan_only_length() { return _scan_only_prefix; }
   18.29    double accum_surv_rate_pred(int age) {
   18.30      assert(age >= 0, "must be");
   18.31      if ((size_t)age < _stats_arrays_length)
   18.32 @@ -82,17 +79,12 @@
   18.33  
   18.34    int next_age_index();
   18.35    int age_in_group(int age_index) {
   18.36 -    int ret = (int) (_all_regions_allocated -  age_index);
   18.37 +    int ret = (int) (_all_regions_allocated - age_index);
   18.38      assert( ret >= 0, "invariant" );
   18.39      return ret;
   18.40    }
   18.41 -  int recalculate_age_index(int age_index) {
   18.42 -    int new_age_index = (int) _scan_only_prefix - age_in_group(age_index);
   18.43 -    guarantee( new_age_index >= 0, "invariant" );
   18.44 -    return new_age_index;
   18.45 -  }
   18.46    void finished_recalculating_age_indexes() {
   18.47 -    _all_regions_allocated = (int) _scan_only_prefix;
   18.48 +    _all_regions_allocated = 0;
   18.49    }
   18.50  
   18.51  #ifndef PRODUCT
    19.1 --- a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Wed Apr 21 01:13:15 2010 -0700
    19.2 +++ b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Mon Apr 26 18:01:55 2010 -0400
    19.3 @@ -1,5 +1,5 @@
    19.4  //
    19.5 -// Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    19.6 +// Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    19.7  // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    19.8  //
    19.9  // This code is free software; you can redistribute it and/or modify it
   19.10 @@ -161,8 +161,10 @@
   19.11  parMarkBitMap.hpp			bitMap.inline.hpp
   19.12  parMarkBitMap.hpp			psVirtualspace.hpp
   19.13  
   19.14 +psAdaptiveSizePolicy.cpp                collectorPolicy.hpp
   19.15  psAdaptiveSizePolicy.cpp                gcPolicyCounters.hpp
   19.16  psAdaptiveSizePolicy.cpp		gcCause.hpp
   19.17 +psAdaptiveSizePolicy.cpp                generationSizer.hpp
   19.18  psAdaptiveSizePolicy.cpp                psAdaptiveSizePolicy.hpp
   19.19  psAdaptiveSizePolicy.cpp                psGCAdaptivePolicyCounters.hpp
   19.20  psAdaptiveSizePolicy.cpp                psScavenge.hpp
   19.21 @@ -215,6 +217,7 @@
   19.22  psMarkSweep.cpp                         fprofiler.hpp
   19.23  psMarkSweep.cpp                         gcCause.hpp
   19.24  psMarkSweep.cpp                         gcLocker.inline.hpp
   19.25 +psMarkSweep.cpp                         generationSizer.hpp
   19.26  psMarkSweep.cpp                         isGCActiveMark.hpp
   19.27  psMarkSweep.cpp                         oop.inline.hpp
   19.28  psMarkSweep.cpp                         memoryService.hpp
   19.29 @@ -256,6 +259,7 @@
   19.30  psParallelCompact.cpp			gcCause.hpp
   19.31  psParallelCompact.cpp			gcLocker.inline.hpp
   19.32  psParallelCompact.cpp                   gcTaskManager.hpp
   19.33 +psParallelCompact.cpp                   generationSizer.hpp
   19.34  psParallelCompact.cpp			isGCActiveMark.hpp
   19.35  psParallelCompact.cpp			management.hpp
   19.36  psParallelCompact.cpp			memoryService.hpp
   19.37 @@ -344,10 +348,12 @@
   19.38  psScavenge.cpp                          psAdaptiveSizePolicy.hpp
   19.39  psScavenge.cpp                          biasedLocking.hpp
   19.40  psScavenge.cpp                          cardTableExtension.hpp
   19.41 +psScavenge.cpp                          collectorPolicy.hpp
   19.42  psScavenge.cpp                          fprofiler.hpp
   19.43  psScavenge.cpp                          gcCause.hpp
   19.44  psScavenge.cpp                          gcLocker.inline.hpp
   19.45  psScavenge.cpp                          gcTaskManager.hpp
   19.46 +psScavenge.cpp                          generationSizer.hpp
   19.47  psScavenge.cpp                          handles.inline.hpp
   19.48  psScavenge.cpp                          isGCActiveMark.hpp
   19.49  psScavenge.cpp                          oop.inline.hpp
    20.1 --- a/src/share/vm/gc_implementation/includeDB_gc_serial	Wed Apr 21 01:13:15 2010 -0700
    20.2 +++ b/src/share/vm/gc_implementation/includeDB_gc_serial	Mon Apr 26 18:01:55 2010 -0400
    20.3 @@ -1,5 +1,5 @@
    20.4  //
    20.5 -// Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
    20.6 +// Copyright 2007-2010 Sun Microsystems, Inc.  All Rights Reserved.
    20.7  // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    20.8  //   
    20.9  // This code is free software; you can redistribute it and/or modify it
   20.10 @@ -29,6 +29,7 @@
   20.11  adaptiveSizePolicy.hpp			universe.hpp
   20.12  
   20.13  adaptiveSizePolicy.cpp			adaptiveSizePolicy.hpp
   20.14 +adaptiveSizePolicy.cpp			collectorPolicy.hpp
   20.15  adaptiveSizePolicy.cpp			gcCause.hpp
   20.16  adaptiveSizePolicy.cpp			ostream.hpp
   20.17  adaptiveSizePolicy.cpp			timer.hpp
    21.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Apr 21 01:13:15 2010 -0700
    21.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Apr 26 18:01:55 2010 -0400
    21.3 @@ -1,5 +1,5 @@
    21.4  /*
    21.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    21.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    21.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    21.8   *
    21.9   * This code is free software; you can redistribute it and/or modify it
   21.10 @@ -892,6 +892,10 @@
   21.11      }
   21.12      swap_spaces();
   21.13  
   21.14 +    // A successful scavenge should restart the GC time limit count which is
   21.15 +    // for full GC's.
   21.16 +    size_policy->reset_gc_overhead_limit_count();
   21.17 +
   21.18      assert(to()->is_empty(), "to space should be empty now");
   21.19    } else {
   21.20      assert(HandlePromotionFailure,
    22.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Apr 21 01:13:15 2010 -0700
    22.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Mon Apr 26 18:01:55 2010 -0400
    22.3 @@ -1,5 +1,5 @@
    22.4  /*
    22.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    22.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    22.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    22.8   *
    22.9   * This code is free software; you can redistribute it and/or modify it
   22.10 @@ -54,15 +54,16 @@
   22.11    CollectedHeap::pre_initialize();
   22.12  
   22.13    // Cannot be initialized until after the flags are parsed
   22.14 -  GenerationSizer flag_parser;
   22.15 +  // GenerationSizer flag_parser;
   22.16 +  _collector_policy = new GenerationSizer();
   22.17  
   22.18 -  size_t yg_min_size = flag_parser.min_young_gen_size();
   22.19 -  size_t yg_max_size = flag_parser.max_young_gen_size();
   22.20 -  size_t og_min_size = flag_parser.min_old_gen_size();
   22.21 -  size_t og_max_size = flag_parser.max_old_gen_size();
   22.22 +  size_t yg_min_size = _collector_policy->min_young_gen_size();
   22.23 +  size_t yg_max_size = _collector_policy->max_young_gen_size();
   22.24 +  size_t og_min_size = _collector_policy->min_old_gen_size();
   22.25 +  size_t og_max_size = _collector_policy->max_old_gen_size();
   22.26    // Why isn't there a min_perm_gen_size()?
   22.27 -  size_t pg_min_size = flag_parser.perm_gen_size();
   22.28 -  size_t pg_max_size = flag_parser.max_perm_gen_size();
   22.29 +  size_t pg_min_size = _collector_policy->perm_gen_size();
   22.30 +  size_t pg_max_size = _collector_policy->max_perm_gen_size();
   22.31  
   22.32    trace_gen_sizes("ps heap raw",
   22.33                    pg_min_size, pg_max_size,
   22.34 @@ -89,12 +90,14 @@
   22.35    // move to the common code.
   22.36    yg_min_size = align_size_up(yg_min_size, yg_align);
   22.37    yg_max_size = align_size_up(yg_max_size, yg_align);
   22.38 -  size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align);
   22.39 +  size_t yg_cur_size =
   22.40 +    align_size_up(_collector_policy->young_gen_size(), yg_align);
   22.41    yg_cur_size = MAX2(yg_cur_size, yg_min_size);
   22.42  
   22.43    og_min_size = align_size_up(og_min_size, og_align);
   22.44    og_max_size = align_size_up(og_max_size, og_align);
   22.45 -  size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align);
   22.46 +  size_t og_cur_size =
   22.47 +    align_size_up(_collector_policy->old_gen_size(), og_align);
   22.48    og_cur_size = MAX2(og_cur_size, og_min_size);
   22.49  
   22.50    pg_min_size = align_size_up(pg_min_size, pg_align);
   22.51 @@ -355,6 +358,11 @@
   22.52    assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
   22.53    assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   22.54  
   22.55 +  // In general gc_overhead_limit_was_exceeded should be false so
   22.56 +  // set it so here and reset it to true only if the gc time
   22.57 +  // limit is being exceeded as checked below.
   22.58 +  *gc_overhead_limit_was_exceeded = false;
   22.59 +
   22.60    HeapWord* result = young_gen()->allocate(size, is_tlab);
   22.61  
   22.62    uint loop_count = 0;
   22.63 @@ -428,24 +436,6 @@
   22.64  
   22.65      if (result == NULL) {
   22.66  
   22.67 -      // Exit the loop if if the gc time limit has been exceeded.
   22.68 -      // The allocation must have failed above (result must be NULL),
   22.69 -      // and the most recent collection must have exceeded the
   22.70 -      // gc time limit.  Exit the loop so that an out-of-memory
   22.71 -      // will be thrown (returning a NULL will do that), but
   22.72 -      // clear gc_time_limit_exceeded so that the next collection
   22.73 -      // will succeeded if the applications decides to handle the
   22.74 -      // out-of-memory and tries to go on.
   22.75 -      *gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded();
   22.76 -      if (size_policy()->gc_time_limit_exceeded()) {
   22.77 -        size_policy()->set_gc_time_limit_exceeded(false);
   22.78 -        if (PrintGCDetails && Verbose) {
   22.79 -        gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
   22.80 -          "return NULL because gc_time_limit_exceeded is set");
   22.81 -        }
   22.82 -        return NULL;
   22.83 -      }
   22.84 -
   22.85        // Generate a VM operation
   22.86        VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
   22.87        VMThread::execute(&op);
   22.88 @@ -463,16 +453,34 @@
   22.89            assert(op.result() == NULL, "must be NULL if gc_locked() is true");
   22.90            continue;  // retry and/or stall as necessary
   22.91          }
   22.92 -        // If a NULL result is being returned, an out-of-memory
   22.93 -        // will be thrown now.  Clear the gc_time_limit_exceeded
   22.94 -        // flag to avoid the following situation.
   22.95 -        //      gc_time_limit_exceeded is set during a collection
   22.96 -        //      the collection fails to return enough space and an OOM is thrown
   22.97 -        //      the next GC is skipped because the gc_time_limit_exceeded
   22.98 -        //        flag is set and another OOM is thrown
   22.99 -        if (op.result() == NULL) {
  22.100 -          size_policy()->set_gc_time_limit_exceeded(false);
  22.101 +
  22.102 +        // Exit the loop if the gc time limit has been exceeded.
  22.103 +        // The allocation must have failed above ("result" guarding
  22.104 +        // this path is NULL) and the most recent collection has exceeded the
  22.105 +        // gc overhead limit (although enough may have been collected to
  22.106 +        // satisfy the allocation).  Exit the loop so that an out-of-memory
  22.107 +        // will be thrown (return a NULL ignoring the contents of
  22.108 +        // op.result()),
  22.109 +        // but clear gc_overhead_limit_exceeded so that the next collection
  22.110 +        // starts with a clean slate (i.e., forgets about previous overhead
  22.111 +        // excesses).  Fill op.result() with a filler object so that the
  22.112 +        // heap remains parsable.
  22.113 +        const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
  22.114 +        const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
  22.115 +        assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
  22.116 +        if (limit_exceeded && softrefs_clear) {
  22.117 +          *gc_overhead_limit_was_exceeded = true;
  22.118 +          size_policy()->set_gc_overhead_limit_exceeded(false);
  22.119 +          if (PrintGCDetails && Verbose) {
  22.120 +            gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
  22.121 +              "return NULL because gc_overhead_limit_exceeded is set");
  22.122 +          }
  22.123 +          if (op.result() != NULL) {
  22.124 +            CollectedHeap::fill_with_object(op.result(), size);
  22.125 +          }
  22.126 +          return NULL;
  22.127          }
  22.128 +
  22.129          return op.result();
  22.130        }
  22.131      }
  22.132 @@ -613,14 +621,15 @@
  22.133        // and the most recent collection must have exceeded the
  22.134        // gc time limit.  Exit the loop so that an out-of-memory
  22.135        // will be thrown (returning a NULL will do that), but
  22.136 -      // clear gc_time_limit_exceeded so that the next collection
  22.137 +      // clear gc_overhead_limit_exceeded so that the next collection
  22.138        // will succeeded if the applications decides to handle the
  22.139        // out-of-memory and tries to go on.
  22.140 -      if (size_policy()->gc_time_limit_exceeded()) {
  22.141 -        size_policy()->set_gc_time_limit_exceeded(false);
  22.142 +      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
  22.143 +      if (limit_exceeded) {
  22.144 +        size_policy()->set_gc_overhead_limit_exceeded(false);
  22.145          if (PrintGCDetails && Verbose) {
  22.146 -        gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: "
  22.147 -          "return NULL because gc_time_limit_exceeded is set");
  22.148 +          gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
  22.149 +            " return NULL because gc_overhead_limit_exceeded is set");
  22.150          }
  22.151          assert(result == NULL, "Allocation did not fail");
  22.152          return NULL;
  22.153 @@ -643,14 +652,15 @@
  22.154            continue;  // retry and/or stall as necessary
  22.155          }
  22.156          // If a NULL results is being returned, an out-of-memory
  22.157 -        // will be thrown now.  Clear the gc_time_limit_exceeded
  22.158 +        // will be thrown now.  Clear the gc_overhead_limit_exceeded
  22.159          // flag to avoid the following situation.
  22.160 -        //      gc_time_limit_exceeded is set during a collection
  22.161 +        //      gc_overhead_limit_exceeded is set during a collection
  22.162          //      the collection fails to return enough space and an OOM is thrown
  22.163 -        //      the next GC is skipped because the gc_time_limit_exceeded
  22.164 -        //        flag is set and another OOM is thrown
  22.165 +        //      a subsequent GC prematurely throws an out-of-memory because
  22.166 +        //        the gc_overhead_limit_exceeded counts did not start
  22.167 +        //        again from 0.
  22.168          if (op.result() == NULL) {
  22.169 -          size_policy()->set_gc_time_limit_exceeded(false);
  22.170 +          size_policy()->reset_gc_overhead_limit_count();
  22.171          }
  22.172          return op.result();
  22.173        }
    23.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Apr 21 01:13:15 2010 -0700
    23.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Mon Apr 26 18:01:55 2010 -0400
    23.3 @@ -1,5 +1,5 @@
    23.4  /*
    23.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    23.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    23.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    23.8   *
    23.9   * This code is free software; you can redistribute it and/or modify it
   23.10 @@ -25,6 +25,8 @@
   23.11  class AdjoiningGenerations;
   23.12  class GCTaskManager;
   23.13  class PSAdaptiveSizePolicy;
   23.14 +class GenerationSizer;
   23.15 +class CollectorPolicy;
   23.16  
   23.17  class ParallelScavengeHeap : public CollectedHeap {
   23.18    friend class VMStructs;
   23.19 @@ -43,6 +45,8 @@
   23.20    size_t _young_gen_alignment;
   23.21    size_t _old_gen_alignment;
   23.22  
   23.23 +  GenerationSizer* _collector_policy;
   23.24 +
   23.25    inline size_t set_alignment(size_t& var, size_t val);
   23.26  
   23.27    // Collection of generations that are adjacent in the
   23.28 @@ -72,6 +76,9 @@
   23.29      return CollectedHeap::ParallelScavengeHeap;
   23.30    }
   23.31  
   23.32 +CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
   23.33 +  // GenerationSizer* collector_policy() const { return _collector_policy; }
   23.34 +
   23.35    static PSYoungGen* young_gen()     { return _young_gen; }
   23.36    static PSOldGen* old_gen()         { return _old_gen; }
   23.37    static PSPermGen* perm_gen()       { return _perm_gen; }
    24.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Wed Apr 21 01:13:15 2010 -0700
    24.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Mon Apr 26 18:01:55 2010 -0400
    24.3 @@ -1,5 +1,5 @@
    24.4  /*
    24.5 - * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
    24.6 + * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
    24.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    24.8   *
    24.9   * This code is free software; you can redistribute it and/or modify it
   24.10 @@ -184,18 +184,19 @@
   24.11    set_change_young_gen_for_maj_pauses(0);
   24.12  }
   24.13  
   24.14 -
   24.15  // If this is not a full GC, only test and modify the young generation.
   24.16  
   24.17 -void PSAdaptiveSizePolicy::compute_generation_free_space(size_t young_live,
   24.18 -                                               size_t eden_live,
   24.19 -                                               size_t old_live,
   24.20 -                                               size_t perm_live,
   24.21 -                                               size_t cur_eden,
   24.22 -                                               size_t max_old_gen_size,
   24.23 -                                               size_t max_eden_size,
   24.24 -                                               bool   is_full_gc,
   24.25 -                                               GCCause::Cause gc_cause) {
   24.26 +void PSAdaptiveSizePolicy::compute_generation_free_space(
   24.27 +                                           size_t young_live,
   24.28 +                                           size_t eden_live,
   24.29 +                                           size_t old_live,
   24.30 +                                           size_t perm_live,
   24.31 +                                           size_t cur_eden,
   24.32 +                                           size_t max_old_gen_size,
   24.33 +                                           size_t max_eden_size,
   24.34 +                                           bool   is_full_gc,
   24.35 +                                           GCCause::Cause gc_cause,
   24.36 +                                           CollectorPolicy* collector_policy) {
   24.37  
   24.38    // Update statistics
   24.39    // Time statistics are updated as we go, update footprint stats here
   24.40 @@ -380,91 +381,16 @@
   24.41    // Is too much time being spent in GC?
   24.42    //   Is the heap trying to grow beyond it's limits?
   24.43  
   24.44 -  const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
   24.45 +  const size_t free_in_old_gen =
   24.46 +    (size_t)(max_old_gen_size - avg_old_live()->average());
   24.47    if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) {
   24.48 -
   24.49 -    // eden_limit is the upper limit on the size of eden based on
   24.50 -    // the maximum size of the young generation and the sizes
   24.51 -    // of the survivor space.
   24.52 -    // The question being asked is whether the gc costs are high
   24.53 -    // and the space being recovered by a collection is low.
   24.54 -    // free_in_young_gen is the free space in the young generation
   24.55 -    // after a collection and promo_live is the free space in the old
   24.56 -    // generation after a collection.
   24.57 -    //
   24.58 -    // Use the minimum of the current value of the live in the
   24.59 -    // young gen or the average of the live in the young gen.
   24.60 -    // If the current value drops quickly, that should be taken
   24.61 -    // into account (i.e., don't trigger if the amount of free
   24.62 -    // space has suddenly jumped up).  If the current is much
   24.63 -    // higher than the average, use the average since it represents
   24.64 -    // the longer term behavor.
   24.65 -    const size_t live_in_eden = MIN2(eden_live, (size_t) avg_eden_live()->average());
   24.66 -    const size_t free_in_eden = eden_limit > live_in_eden ?
   24.67 -      eden_limit - live_in_eden : 0;
   24.68 -    const size_t total_free_limit = free_in_old_gen + free_in_eden;
   24.69 -    const size_t total_mem = max_old_gen_size + max_eden_size;
   24.70 -    const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
   24.71 -    if (PrintAdaptiveSizePolicy && (Verbose ||
   24.72 -        (total_free_limit < (size_t) mem_free_limit))) {
   24.73 -      gclog_or_tty->print_cr(
   24.74 -            "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
   24.75 -            " promo_limit: " SIZE_FORMAT
   24.76 -            " eden_limit: " SIZE_FORMAT
   24.77 -            " total_free_limit: " SIZE_FORMAT
   24.78 -            " max_old_gen_size: " SIZE_FORMAT
   24.79 -            " max_eden_size: " SIZE_FORMAT
   24.80 -            " mem_free_limit: " SIZE_FORMAT,
   24.81 -            promo_limit, eden_limit, total_free_limit,
   24.82 -            max_old_gen_size, max_eden_size,
   24.83 -            (size_t) mem_free_limit);
   24.84 -    }
   24.85 -
   24.86 -    if (is_full_gc) {
   24.87 -      if (gc_cost() > gc_cost_limit &&
   24.88 -        total_free_limit < (size_t) mem_free_limit) {
   24.89 -        // Collections, on average, are taking too much time, and
   24.90 -        //      gc_cost() > gc_cost_limit
   24.91 -        // we have too little space available after a full gc.
   24.92 -        //      total_free_limit < mem_free_limit
   24.93 -        // where
   24.94 -        //   total_free_limit is the free space available in
   24.95 -        //     both generations
   24.96 -        //   total_mem is the total space available for allocation
   24.97 -        //     in both generations (survivor spaces are not included
   24.98 -        //     just as they are not included in eden_limit).
   24.99 -        //   mem_free_limit is a fraction of total_mem judged to be an
  24.100 -        //     acceptable amount that is still unused.
  24.101 -        // The heap can ask for the value of this variable when deciding
  24.102 -        // whether to thrown an OutOfMemory error.
  24.103 -        // Note that the gc time limit test only works for the collections
  24.104 -        // of the young gen + tenured gen and not for collections of the
  24.105 -        // permanent gen.  That is because the calculation of the space
  24.106 -        // freed by the collection is the free space in the young gen +
  24.107 -        // tenured gen.
  24.108 -        // Ignore explicit GC's. Ignoring explicit GC's at this level
  24.109 -        // is the equivalent of the GC did not happen as far as the
  24.110 -        // overhead calculation is concerted (i.e., the flag is not set
  24.111 -        // and the count is not affected).  Also the average will not
  24.112 -        // have been updated unless UseAdaptiveSizePolicyWithSystemGC is on.
  24.113 -        if (!GCCause::is_user_requested_gc(gc_cause) &&
  24.114 -            !GCCause::is_serviceability_requested_gc(gc_cause)) {
  24.115 -          inc_gc_time_limit_count();
  24.116 -          if (UseGCOverheadLimit &&
  24.117 -              (gc_time_limit_count() > AdaptiveSizePolicyGCTimeLimitThreshold)){
  24.118 -            // All conditions have been met for throwing an out-of-memory
  24.119 -            _gc_time_limit_exceeded = true;
  24.120 -            // Avoid consecutive OOM due to the gc time limit by resetting
  24.121 -            // the counter.
  24.122 -            reset_gc_time_limit_count();
  24.123 -          }
  24.124 -          _print_gc_time_limit_would_be_exceeded = true;
  24.125 -        }
  24.126 -      } else {
  24.127 -        // Did not exceed overhead limits
  24.128 -        reset_gc_time_limit_count();
  24.129 -      }
  24.130 -    }
  24.131 +    check_gc_overhead_limit(young_live,
  24.132 +                            eden_live,
  24.133 +                            max_old_gen_size,
  24.134 +                            max_eden_size,
  24.135 +                            is_full_gc,
  24.136 +                            gc_cause,
  24.137 +                            collector_policy);
  24.138    }
  24.139  
  24.140  
    25.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Wed Apr 21 01:13:15 2010 -0700
    25.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Mon Apr 26 18:01:55 2010 -0400
    25.3 @@ -1,5 +1,5 @@
    25.4  /*
    25.5 - * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
    25.6 + * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
    25.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    25.8   *
    25.9   * This code is free software; you can redistribute it and/or modify it
   25.10 @@ -45,6 +45,7 @@
   25.11  
   25.12  // Forward decls
   25.13  class elapsedTimer;
   25.14 +class GenerationSizer;
   25.15  
   25.16  class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
   25.17   friend class PSGCAdaptivePolicyCounters;
   25.18 @@ -340,7 +341,8 @@
   25.19                                       size_t max_old_gen_size,
   25.20                                       size_t max_eden_size,
   25.21                                       bool   is_full_gc,
   25.22 -                                     GCCause::Cause gc_cause);
   25.23 +                                     GCCause::Cause gc_cause,
   25.24 +                                     CollectorPolicy* collector_policy);
   25.25  
   25.26    // Calculates new survivor space size;  returns a new tenuring threshold
   25.27    // value. Stores new survivor size in _survivor_size.
    26.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	Wed Apr 21 01:13:15 2010 -0700
    26.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	Mon Apr 26 18:01:55 2010 -0400
    26.3 @@ -1,5 +1,5 @@
    26.4  /*
    26.5 - * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
    26.6 + * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
    26.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    26.8   *
    26.9   * This code is free software; you can redistribute it and/or modify it
   26.10 @@ -117,11 +117,13 @@
   26.11        PerfData::U_Bytes, (jlong) ps_size_policy()->avg_base_footprint()->average(), CHECK);
   26.12  
   26.13      cname = PerfDataManager::counter_name(name_space(), "gcTimeLimitExceeded");
   26.14 -    _gc_time_limit_exceeded = PerfDataManager::create_variable(SUN_GC, cname,
   26.15 -      PerfData::U_Events, ps_size_policy()->gc_time_limit_exceeded(), CHECK);
   26.16 +    _gc_overhead_limit_exceeded_counter =
   26.17 +      PerfDataManager::create_variable(SUN_GC, cname,
   26.18 +      PerfData::U_Events, ps_size_policy()->gc_overhead_limit_exceeded(), CHECK);
   26.19  
   26.20      cname = PerfDataManager::counter_name(name_space(), "liveAtLastFullGc");
   26.21 -    _live_at_last_full_gc = PerfDataManager::create_variable(SUN_GC, cname,
   26.22 +    _live_at_last_full_gc_counter =
   26.23 +      PerfDataManager::create_variable(SUN_GC, cname,
   26.24        PerfData::U_Bytes, ps_size_policy()->live_at_last_full_gc(), CHECK);
   26.25  
   26.26      cname = PerfDataManager::counter_name(name_space(), "majorPauseOldSlope");
   26.27 @@ -189,6 +191,8 @@
   26.28      update_minor_pause_old_slope();
   26.29      update_major_pause_young_slope();
   26.30      update_minor_collection_slope_counter();
   26.31 +    update_gc_overhead_limit_exceeded_counter();
   26.32 +    update_live_at_last_full_gc_counter();
   26.33    }
   26.34  }
   26.35  
    27.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Wed Apr 21 01:13:15 2010 -0700
    27.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Mon Apr 26 18:01:55 2010 -0400
    27.3 @@ -1,5 +1,5 @@
    27.4  /*
    27.5 - * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
    27.6 + * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
    27.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    27.8   *
    27.9   * This code is free software; you can redistribute it and/or modify it
   27.10 @@ -44,8 +44,8 @@
   27.11    PerfVariable* _live_space;
   27.12    PerfVariable* _free_space;
   27.13    PerfVariable* _avg_base_footprint;
   27.14 -  PerfVariable* _gc_time_limit_exceeded;
   27.15 -  PerfVariable* _live_at_last_full_gc;
   27.16 +  PerfVariable* _gc_overhead_limit_exceeded_counter;
   27.17 +  PerfVariable* _live_at_last_full_gc_counter;
   27.18    PerfVariable* _old_capacity;
   27.19    PerfVariable* _boundary_moved;
   27.20  
   27.21 @@ -169,6 +169,14 @@
   27.22        (jlong)(ps_size_policy()->major_pause_young_slope() * 1000)
   27.23      );
   27.24    }
   27.25 +  inline void update_gc_overhead_limit_exceeded_counter() {
   27.26 +    _gc_overhead_limit_exceeded_counter->set_value(
   27.27 +      (jlong) ps_size_policy()->gc_overhead_limit_exceeded());
   27.28 +  }
   27.29 +  inline void update_live_at_last_full_gc_counter() {
   27.30 +    _live_at_last_full_gc_counter->set_value(
   27.31 +      (jlong)(ps_size_policy()->live_at_last_full_gc()));
   27.32 +  }
   27.33  
   27.34    inline void update_scavenge_skipped(int cause) {
   27.35      _scavenge_skipped->set_value(cause);
    28.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Apr 21 01:13:15 2010 -0700
    28.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Apr 26 18:01:55 2010 -0400
    28.3 @@ -1,5 +1,5 @@
    28.4  /*
    28.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    28.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    28.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    28.8   *
    28.9   * This code is free software; you can redistribute it and/or modify it
   28.10 @@ -46,6 +46,12 @@
   28.11  //
   28.12  // Note that this method should only be called from the vm_thread while
   28.13  // at a safepoint!
   28.14 +//
   28.15 +// Note that the all_soft_refs_clear flag in the collector policy
   28.16 +// may be true because this method can be called without intervening
   28.17 +// activity.  For example when the heap space is tight and full measure
   28.18 +// are being taken to free space.
   28.19 +
   28.20  void PSMarkSweep::invoke(bool maximum_heap_compaction) {
   28.21    assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   28.22    assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   28.23 @@ -54,24 +60,18 @@
   28.24    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   28.25    GCCause::Cause gc_cause = heap->gc_cause();
   28.26    PSAdaptiveSizePolicy* policy = heap->size_policy();
   28.27 +  IsGCActiveMark mark;
   28.28  
   28.29 -  // Before each allocation/collection attempt, find out from the
   28.30 -  // policy object if GCs are, on the whole, taking too long. If so,
   28.31 -  // bail out without attempting a collection.  The exceptions are
   28.32 -  // for explicitly requested GC's.
   28.33 -  if (!policy->gc_time_limit_exceeded() ||
   28.34 -      GCCause::is_user_requested_gc(gc_cause) ||
   28.35 -      GCCause::is_serviceability_requested_gc(gc_cause)) {
   28.36 -    IsGCActiveMark mark;
   28.37 +  if (ScavengeBeforeFullGC) {
   28.38 +    PSScavenge::invoke_no_policy();
   28.39 +  }
   28.40  
   28.41 -    if (ScavengeBeforeFullGC) {
   28.42 -      PSScavenge::invoke_no_policy();
   28.43 -    }
   28.44 +  const bool clear_all_soft_refs =
   28.45 +    heap->collector_policy()->should_clear_all_soft_refs();
   28.46  
   28.47 -    int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
   28.48 -    IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
   28.49 -    PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
   28.50 -  }
   28.51 +  int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
   28.52 +  IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
   28.53 +  PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
   28.54  }
   28.55  
   28.56  // This method contains no policy. You should probably
   28.57 @@ -89,6 +89,10 @@
   28.58    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   28.59    PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   28.60  
   28.61 +  // The scope of casr should end after code that can change
   28.62 +  // CollectorPolicy::_should_clear_all_soft_refs.
   28.63 +  ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
   28.64 +
   28.65    PSYoungGen* young_gen = heap->young_gen();
   28.66    PSOldGen* old_gen = heap->old_gen();
   28.67    PSPermGen* perm_gen = heap->perm_gen();
   28.68 @@ -275,7 +279,8 @@
   28.69                                   old_gen->max_gen_size(),
   28.70                                   max_eden_size,
   28.71                                   true /* full gc*/,
   28.72 -                                 gc_cause);
   28.73 +                                 gc_cause,
   28.74 +                                 heap->collector_policy());
   28.75  
   28.76          heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
   28.77  
   28.78 @@ -326,19 +331,6 @@
   28.79      // Track memory usage and detect low memory
   28.80      MemoryService::track_memory_usage();
   28.81      heap->update_counters();
   28.82 -
   28.83 -    if (PrintGCDetails) {
   28.84 -      if (size_policy->print_gc_time_limit_would_be_exceeded()) {
   28.85 -        if (size_policy->gc_time_limit_exceeded()) {
   28.86 -          gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
   28.87 -            "of %d%%", GCTimeLimit);
   28.88 -        } else {
   28.89 -          gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
   28.90 -            "of %d%%", GCTimeLimit);
   28.91 -        }
   28.92 -      }
   28.93 -      size_policy->set_print_gc_time_limit_would_be_exceeded(false);
   28.94 -    }
   28.95    }
   28.96  
   28.97    if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
    29.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Apr 21 01:13:15 2010 -0700
    29.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Apr 26 18:01:55 2010 -0400
    29.3 @@ -1,5 +1,5 @@
    29.4  /*
    29.5 - * Copyright 2005-2009 Sun Microsystems, Inc.  All Rights Reserved.
    29.6 + * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
    29.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    29.8   *
    29.9   * This code is free software; you can redistribute it and/or modify it
   29.10 @@ -1923,31 +1923,32 @@
   29.11  //
   29.12  // Note that this method should only be called from the vm_thread while at a
   29.13  // safepoint.
   29.14 +//
   29.15 +// Note that the all_soft_refs_clear flag in the collector policy
   29.16 +// may be true because this method can be called without intervening
   29.17 +// activity.  For example when the heap space is tight and full measure
   29.18 +// are being taken to free space.
   29.19  void PSParallelCompact::invoke(bool maximum_heap_compaction) {
   29.20    assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   29.21    assert(Thread::current() == (Thread*)VMThread::vm_thread(),
   29.22           "should be in vm thread");
   29.23 +
   29.24    ParallelScavengeHeap* heap = gc_heap();
   29.25    GCCause::Cause gc_cause = heap->gc_cause();
   29.26    assert(!heap->is_gc_active(), "not reentrant");
   29.27  
   29.28    PSAdaptiveSizePolicy* policy = heap->size_policy();
   29.29 -
   29.30 -  // Before each allocation/collection attempt, find out from the
   29.31 -  // policy object if GCs are, on the whole, taking too long. If so,
   29.32 -  // bail out without attempting a collection.  The exceptions are
   29.33 -  // for explicitly requested GC's.
   29.34 -  if (!policy->gc_time_limit_exceeded() ||
   29.35 -      GCCause::is_user_requested_gc(gc_cause) ||
   29.36 -      GCCause::is_serviceability_requested_gc(gc_cause)) {
   29.37 -    IsGCActiveMark mark;
   29.38 -
   29.39 -    if (ScavengeBeforeFullGC) {
   29.40 -      PSScavenge::invoke_no_policy();
   29.41 -    }
   29.42 -
   29.43 -    PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
   29.44 +  IsGCActiveMark mark;
   29.45 +
   29.46 +  if (ScavengeBeforeFullGC) {
   29.47 +    PSScavenge::invoke_no_policy();
   29.48    }
   29.49 +
   29.50 +  const bool clear_all_soft_refs =
   29.51 +    heap->collector_policy()->should_clear_all_soft_refs();
   29.52 +
   29.53 +  PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
   29.54 +                                      maximum_heap_compaction);
   29.55  }
   29.56  
   29.57  bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
   29.58 @@ -1976,6 +1977,11 @@
   29.59    PSPermGen* perm_gen = heap->perm_gen();
   29.60    PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   29.61  
   29.62 +  // The scope of casr should end after code that can change
   29.63 +  // CollectorPolicy::_should_clear_all_soft_refs.
   29.64 +  ClearedAllSoftRefs casr(maximum_heap_compaction,
   29.65 +                          heap->collector_policy());
   29.66 +
   29.67    if (ZapUnusedHeapArea) {
   29.68      // Save information needed to minimize mangling
   29.69      heap->record_gen_tops_before_GC();
   29.70 @@ -2109,7 +2115,8 @@
   29.71                                old_gen->max_gen_size(),
   29.72                                max_eden_size,
   29.73                                true /* full gc*/,
   29.74 -                              gc_cause);
   29.75 +                              gc_cause,
   29.76 +                              heap->collector_policy());
   29.77  
   29.78          heap->resize_old_gen(
   29.79            size_policy->calculated_old_free_size_in_bytes());
   29.80 @@ -2157,19 +2164,6 @@
   29.81      // Track memory usage and detect low memory
   29.82      MemoryService::track_memory_usage();
   29.83      heap->update_counters();
   29.84 -
   29.85 -    if (PrintGCDetails) {
   29.86 -      if (size_policy->print_gc_time_limit_would_be_exceeded()) {
   29.87 -        if (size_policy->gc_time_limit_exceeded()) {
   29.88 -          gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
   29.89 -            "of %d%%", GCTimeLimit);
   29.90 -        } else {
   29.91 -          gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
   29.92 -            "of %d%%", GCTimeLimit);
   29.93 -        }
   29.94 -      }
   29.95 -      size_policy->set_print_gc_time_limit_would_be_exceeded(false);
   29.96 -    }
   29.97    }
   29.98  
   29.99    if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
    30.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Apr 21 01:13:15 2010 -0700
    30.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon Apr 26 18:01:55 2010 -0400
    30.3 @@ -1,5 +1,5 @@
    30.4  /*
    30.5 - * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
    30.6 + * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
    30.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    30.8   *
    30.9   * This code is free software; you can redistribute it and/or modify it
   30.10 @@ -187,8 +187,7 @@
   30.11  //
   30.12  // Note that this method should only be called from the vm_thread while
   30.13  // at a safepoint!
   30.14 -void PSScavenge::invoke()
   30.15 -{
   30.16 +void PSScavenge::invoke() {
   30.17    assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   30.18    assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   30.19    assert(!Universe::heap()->is_gc_active(), "not reentrant");
   30.20 @@ -197,29 +196,25 @@
   30.21    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   30.22  
   30.23    PSAdaptiveSizePolicy* policy = heap->size_policy();
   30.24 +  IsGCActiveMark mark;
   30.25  
   30.26 -  // Before each allocation/collection attempt, find out from the
   30.27 -  // policy object if GCs are, on the whole, taking too long. If so,
   30.28 -  // bail out without attempting a collection.
   30.29 -  if (!policy->gc_time_limit_exceeded()) {
   30.30 -    IsGCActiveMark mark;
   30.31 +  bool scavenge_was_done = PSScavenge::invoke_no_policy();
   30.32  
   30.33 -    bool scavenge_was_done = PSScavenge::invoke_no_policy();
   30.34 +  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   30.35 +  if (UsePerfData)
   30.36 +    counters->update_full_follows_scavenge(0);
   30.37 +  if (!scavenge_was_done ||
   30.38 +      policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
   30.39 +    if (UsePerfData)
   30.40 +      counters->update_full_follows_scavenge(full_follows_scavenge);
   30.41 +    GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
   30.42 +    CollectorPolicy* cp = heap->collector_policy();
   30.43 +    const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
   30.44  
   30.45 -    PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   30.46 -    if (UsePerfData)
   30.47 -      counters->update_full_follows_scavenge(0);
   30.48 -    if (!scavenge_was_done ||
   30.49 -        policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
   30.50 -      if (UsePerfData)
   30.51 -        counters->update_full_follows_scavenge(full_follows_scavenge);
   30.52 -
   30.53 -      GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
   30.54 -      if (UseParallelOldGC) {
   30.55 -        PSParallelCompact::invoke_no_policy(false);
   30.56 -      } else {
   30.57 -        PSMarkSweep::invoke_no_policy(false);
   30.58 -      }
   30.59 +    if (UseParallelOldGC) {
   30.60 +      PSParallelCompact::invoke_no_policy(clear_all_softrefs);
   30.61 +    } else {
   30.62 +      PSMarkSweep::invoke_no_policy(clear_all_softrefs);
   30.63      }
   30.64    }
   30.65  }
   30.66 @@ -447,6 +442,9 @@
   30.67        size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
   30.68        size_policy->update_averages(_survivor_overflow, survived, promoted);
   30.69  
   30.70 +      // A successful scavenge should restart the GC time limit count which is
   30.71 +      // for full GC's.
   30.72 +      size_policy->reset_gc_overhead_limit_count();
   30.73        if (UseAdaptiveSizePolicy) {
   30.74          // Calculate the new survivor size and tenuring threshold
   30.75  
   30.76 @@ -523,7 +521,8 @@
   30.77                                     old_gen->max_gen_size(),
   30.78                                     max_eden_size,
   30.79                                     false  /* full gc*/,
   30.80 -                                   gc_cause);
   30.81 +                                   gc_cause,
   30.82 +                                   heap->collector_policy());
   30.83  
   30.84          }
   30.85          // Resize the young generation at every collection
    31.1 --- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Wed Apr 21 01:13:15 2010 -0700
    31.2 +++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Mon Apr 26 18:01:55 2010 -0400
    31.3 @@ -1,5 +1,5 @@
    31.4  /*
    31.5 - * Copyright 2004-2006 Sun Microsystems, Inc.  All Rights Reserved.
    31.6 + * Copyright 2004-2010 Sun Microsystems, Inc.  All Rights Reserved.
    31.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    31.8   *
    31.9   * This code is free software; you can redistribute it and/or modify it
   31.10 @@ -44,13 +44,15 @@
   31.11      _survivor_size(init_survivor_size),
   31.12      _gc_pause_goal_sec(gc_pause_goal_sec),
   31.13      _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
   31.14 -    _gc_time_limit_exceeded(false),
   31.15 -    _print_gc_time_limit_would_be_exceeded(false),
   31.16 -    _gc_time_limit_count(0),
   31.17 +    _gc_overhead_limit_exceeded(false),
   31.18 +    _print_gc_overhead_limit_would_be_exceeded(false),
   31.19 +    _gc_overhead_limit_count(0),
   31.20      _latest_minor_mutator_interval_seconds(0),
   31.21      _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
   31.22      _young_gen_change_for_minor_throughput(0),
   31.23      _old_gen_change_for_major_throughput(0) {
   31.24 +  assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
   31.25 +    "No opportunity to clear SoftReferences before GC overhead limit");
   31.26    _avg_minor_pause    =
   31.27      new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
   31.28    _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
   31.29 @@ -278,6 +280,147 @@
   31.30    set_decide_at_full_gc(0);
   31.31  }
   31.32  
   31.33 +void AdaptiveSizePolicy::check_gc_overhead_limit(
   31.34 +                                          size_t young_live,
   31.35 +                                          size_t eden_live,
   31.36 +                                          size_t max_old_gen_size,
   31.37 +                                          size_t max_eden_size,
   31.38 +                                          bool   is_full_gc,
   31.39 +                                          GCCause::Cause gc_cause,
   31.40 +                                          CollectorPolicy* collector_policy) {
   31.41 +
   31.42 +  // Ignore explicit GC's.  Exiting here does not set the flag and
   31.43 +  // does not reset the count.  Updating of the averages for system
   31.44 +  // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
   31.45 +  if (GCCause::is_user_requested_gc(gc_cause) ||
   31.46 +      GCCause::is_serviceability_requested_gc(gc_cause)) {
   31.47 +    return;
   31.48 +  }
   31.49 +  // eden_limit is the upper limit on the size of eden based on
   31.50 +  // the maximum size of the young generation and the sizes
   31.51 +  // of the survivor space.
   31.52 +  // The question being asked is whether the gc costs are high
   31.53 +  // and the space being recovered by a collection is low.
   31.54 +  // free_in_young_gen is the free space in the young generation
   31.55 +  // after a collection and promo_live is the free space in the old
   31.56 +  // generation after a collection.
   31.57 +  //
   31.58 +  // Use the minimum of the current value of the live in the
   31.59 +  // young gen or the average of the live in the young gen.
   31.60 +  // If the current value drops quickly, that should be taken
   31.61 +  // into account (i.e., don't trigger if the amount of free
   31.62 +  // space has suddenly jumped up).  If the current is much
   31.63 +  // higher than the average, use the average since it represents
   31.64 +  // the longer term behavor.
   31.65 +  const size_t live_in_eden =
   31.66 +    MIN2(eden_live, (size_t) avg_eden_live()->average());
   31.67 +  const size_t free_in_eden = max_eden_size > live_in_eden ?
   31.68 +    max_eden_size - live_in_eden : 0;
   31.69 +  const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
   31.70 +  const size_t total_free_limit = free_in_old_gen + free_in_eden;
   31.71 +  const size_t total_mem = max_old_gen_size + max_eden_size;
   31.72 +  const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
   31.73 +  const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
   31.74 +  const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
   31.75 +  const double gc_cost_limit = GCTimeLimit/100.0;
   31.76 +  size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
   31.77 +  // But don't force a promo size below the current promo size. Otherwise,
   31.78 +  // the promo size will shrink for no good reason.
   31.79 +  promo_limit = MAX2(promo_limit, _promo_size);
   31.80 +
   31.81 +
   31.82 +  if (PrintAdaptiveSizePolicy && (Verbose ||
   31.83 +      (free_in_old_gen < (size_t) mem_free_old_limit &&
   31.84 +       free_in_eden < (size_t) mem_free_eden_limit))) {
   31.85 +    gclog_or_tty->print_cr(
   31.86 +          "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
   31.87 +          " promo_limit: " SIZE_FORMAT
   31.88 +          " max_eden_size: " SIZE_FORMAT
   31.89 +          " total_free_limit: " SIZE_FORMAT
   31.90 +          " max_old_gen_size: " SIZE_FORMAT
   31.91 +          " max_eden_size: " SIZE_FORMAT
   31.92 +          " mem_free_limit: " SIZE_FORMAT,
   31.93 +          promo_limit, max_eden_size, total_free_limit,
   31.94 +          max_old_gen_size, max_eden_size,
   31.95 +          (size_t) mem_free_limit);
   31.96 +  }
   31.97 +
   31.98 +  bool print_gc_overhead_limit_would_be_exceeded = false;
   31.99 +  if (is_full_gc) {
  31.100 +    if (gc_cost() > gc_cost_limit &&
  31.101 +      free_in_old_gen < (size_t) mem_free_old_limit &&
  31.102 +      free_in_eden < (size_t) mem_free_eden_limit) {
  31.103 +      // Collections, on average, are taking too much time, and
  31.104 +      //      gc_cost() > gc_cost_limit
  31.105 +      // we have too little space available after a full gc.
  31.106 +      //      total_free_limit < mem_free_limit
  31.107 +      // where
  31.108 +      //   total_free_limit is the free space available in
  31.109 +      //     both generations
  31.110 +      //   total_mem is the total space available for allocation
  31.111 +      //     in both generations (survivor spaces are not included
  31.112 +      //     just as they are not included in eden_limit).
  31.113 +      //   mem_free_limit is a fraction of total_mem judged to be an
  31.114 +      //     acceptable amount that is still unused.
  31.115 +      // The heap can ask for the value of this variable when deciding
  31.116 +      // whether to thrown an OutOfMemory error.
  31.117 +      // Note that the gc time limit test only works for the collections
  31.118 +      // of the young gen + tenured gen and not for collections of the
  31.119 +      // permanent gen.  That is because the calculation of the space
  31.120 +      // freed by the collection is the free space in the young gen +
  31.121 +      // tenured gen.
  31.122 +      // At this point the GC overhead limit is being exceeded.
  31.123 +      inc_gc_overhead_limit_count();
  31.124 +      if (UseGCOverheadLimit) {
  31.125 +        if (gc_overhead_limit_count() >=
  31.126 +            AdaptiveSizePolicyGCTimeLimitThreshold){
  31.127 +          // All conditions have been met for throwing an out-of-memory
  31.128 +          set_gc_overhead_limit_exceeded(true);
  31.129 +          // Avoid consecutive OOM due to the gc time limit by resetting
  31.130 +          // the counter.
  31.131 +          reset_gc_overhead_limit_count();
  31.132 +        } else {
  31.133 +          // The required consecutive collections which exceed the
  31.134 +          // GC time limit may or may not have been reached. We
  31.135 +          // are approaching that condition and so as not to
  31.136 +          // throw an out-of-memory before all SoftRef's have been
  31.137 +          // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
  31.138 +          // The clearing will be done on the next GC.
  31.139 +          bool near_limit = gc_overhead_limit_near();
  31.140 +          if (near_limit) {
  31.141 +            collector_policy->set_should_clear_all_soft_refs(true);
  31.142 +            if (PrintGCDetails && Verbose) {
  31.143 +              gclog_or_tty->print_cr("  Nearing GC overhead limit, "
  31.144 +                "will be clearing all SoftReference");
  31.145 +            }
  31.146 +          }
  31.147 +        }
  31.148 +      }
  31.149 +      // Set this even when the overhead limit will not
  31.150 +      // cause an out-of-memory.  Diagnostic message indicating
  31.151 +      // that the overhead limit is being exceeded is sometimes
  31.152 +      // printed.
  31.153 +      print_gc_overhead_limit_would_be_exceeded = true;
  31.154 +
  31.155 +    } else {
  31.156 +      // Did not exceed overhead limits
  31.157 +      reset_gc_overhead_limit_count();
  31.158 +    }
  31.159 +  }
  31.160 +
  31.161 +  if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
  31.162 +    if (gc_overhead_limit_exceeded()) {
  31.163 +      gclog_or_tty->print_cr("      GC is exceeding overhead limit "
  31.164 +        "of %d%%", GCTimeLimit);
  31.165 +      reset_gc_overhead_limit_count();
  31.166 +    } else if (print_gc_overhead_limit_would_be_exceeded) {
  31.167 +      assert(gc_overhead_limit_count() > 0, "Should not be printing");
  31.168 +      gclog_or_tty->print_cr("      GC would exceed overhead limit "
  31.169 +        "of %d%% %d consecutive time(s)",
  31.170 +        GCTimeLimit, gc_overhead_limit_count());
  31.171 +    }
  31.172 +  }
  31.173 +}
  31.174  // Printing
  31.175  
  31.176  bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const {
    32.1 --- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Wed Apr 21 01:13:15 2010 -0700
    32.2 +++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Mon Apr 26 18:01:55 2010 -0400
    32.3 @@ -1,5 +1,5 @@
    32.4  /*
    32.5 - * Copyright 2004-2006 Sun Microsystems, Inc.  All Rights Reserved.
    32.6 + * Copyright 2004-2010 Sun Microsystems, Inc.  All Rights Reserved.
    32.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    32.8   *
    32.9   * This code is free software; you can redistribute it and/or modify it
   32.10 @@ -27,6 +27,7 @@
   32.11  
   32.12  // Forward decls
   32.13  class elapsedTimer;
   32.14 +class CollectorPolicy;
   32.15  
   32.16  class AdaptiveSizePolicy : public CHeapObj {
   32.17   friend class GCAdaptivePolicyCounters;
   32.18 @@ -75,13 +76,16 @@
   32.19  
   32.20    // This is a hint for the heap:  we've detected that gc times
   32.21    // are taking longer than GCTimeLimit allows.
   32.22 -  bool _gc_time_limit_exceeded;
   32.23 -  // Use for diagnostics only.  If UseGCTimeLimit is false,
   32.24 +  bool _gc_overhead_limit_exceeded;
   32.25 +  // Use for diagnostics only.  If UseGCOverheadLimit is false,
   32.26    // this variable is still set.
   32.27 -  bool _print_gc_time_limit_would_be_exceeded;
   32.28 +  bool _print_gc_overhead_limit_would_be_exceeded;
   32.29    // Count of consecutive GC that have exceeded the
   32.30    // GC time limit criterion.
   32.31 -  uint _gc_time_limit_count;
   32.32 +  uint _gc_overhead_limit_count;
   32.33 +  // This flag signals that GCTimeLimit is being exceeded
   32.34 +  // but may not have done so for the required number of consequetive
   32.35 +  // collections.
   32.36  
   32.37    // Minor collection timers used to determine both
   32.38    // pause and interval times for collections.
   32.39 @@ -406,22 +410,21 @@
   32.40    // Most heaps will choose to throw an OutOfMemoryError when
   32.41    // this occurs but it is up to the heap to request this information
   32.42    // of the policy
   32.43 -  bool gc_time_limit_exceeded() {
   32.44 -    return _gc_time_limit_exceeded;
   32.45 +  bool gc_overhead_limit_exceeded() {
   32.46 +    return _gc_overhead_limit_exceeded;
   32.47    }
   32.48 -  void set_gc_time_limit_exceeded(bool v) {
   32.49 -    _gc_time_limit_exceeded = v;
   32.50 -  }
   32.51 -  bool print_gc_time_limit_would_be_exceeded() {
   32.52 -    return _print_gc_time_limit_would_be_exceeded;
   32.53 -  }
   32.54 -  void set_print_gc_time_limit_would_be_exceeded(bool v) {
   32.55 -    _print_gc_time_limit_would_be_exceeded = v;
   32.56 +  void set_gc_overhead_limit_exceeded(bool v) {
   32.57 +    _gc_overhead_limit_exceeded = v;
   32.58    }
   32.59  
   32.60 -  uint gc_time_limit_count() { return _gc_time_limit_count; }
   32.61 -  void reset_gc_time_limit_count() { _gc_time_limit_count = 0; }
   32.62 -  void inc_gc_time_limit_count() { _gc_time_limit_count++; }
   32.63 +  // Tests conditions indicate the GC overhead limit is being approached.
   32.64 +  bool gc_overhead_limit_near() {
   32.65 +    return gc_overhead_limit_count() >=
   32.66 +        (AdaptiveSizePolicyGCTimeLimitThreshold - 1);
   32.67 +  }
   32.68 +  uint gc_overhead_limit_count() { return _gc_overhead_limit_count; }
   32.69 +  void reset_gc_overhead_limit_count() { _gc_overhead_limit_count = 0; }
   32.70 +  void inc_gc_overhead_limit_count() { _gc_overhead_limit_count++; }
   32.71    // accessors for flags recording the decisions to resize the
   32.72    // generations to meet the pause goal.
   32.73  
   32.74 @@ -436,6 +439,16 @@
   32.75    int decide_at_full_gc() { return _decide_at_full_gc; }
   32.76    void set_decide_at_full_gc(int v) { _decide_at_full_gc = v; }
   32.77  
   32.78 +  // Check the conditions for an out-of-memory due to excessive GC time.
   32.79 +  // Set _gc_overhead_limit_exceeded if all the conditions have been met.
   32.80 +  void check_gc_overhead_limit(size_t young_live,
   32.81 +                               size_t eden_live,
   32.82 +                               size_t max_old_gen_size,
   32.83 +                               size_t max_eden_size,
   32.84 +                               bool   is_full_gc,
   32.85 +                               GCCause::Cause gc_cause,
   32.86 +                               CollectorPolicy* collector_policy);
   32.87 +
   32.88    // Printing support
   32.89    virtual bool print_adaptive_size_policy_on(outputStream* st) const;
   32.90    bool print_adaptive_size_policy_on(outputStream* st, int
    33.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Wed Apr 21 01:13:15 2010 -0700
    33.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Mon Apr 26 18:01:55 2010 -0400
    33.3 @@ -115,11 +115,25 @@
    33.4  void VM_GC_HeapInspection::doit() {
    33.5    HandleMark hm;
    33.6    CollectedHeap* ch = Universe::heap();
    33.7 +  ch->ensure_parsability(false); // must happen, even if collection does
    33.8 +                                 // not happen (e.g. due to GC_locker)
    33.9    if (_full_gc) {
   33.10 -    ch->collect_as_vm_thread(GCCause::_heap_inspection);
   33.11 -  } else {
   33.12 -    // make the heap parsable (no need to retire TLABs)
   33.13 -    ch->ensure_parsability(false);
   33.14 +    // The collection attempt below would be skipped anyway if
   33.15 +    // the gc locker is held. The following dump may then be a tad
   33.16 +    // misleading to someone expecting only live objects to show
   33.17 +    // up in the dump (see CR 6944195). Just issue a suitable warning
   33.18 +    // in that case and do not attempt to do a collection.
   33.19 +    // The latter is a subtle point, because even a failed attempt
   33.20 +    // to GC will, in fact, induce one in the future, which we
   33.21 +    // probably want to avoid in this case because the GC that we may
   33.22 +    // be about to attempt holds value for us only
   33.23 +    // if it happens now and not if it happens in the eventual
   33.24 +    // future.
   33.25 +    if (GC_locker::is_active()) {
   33.26 +      warning("GC locker is held; pre-dump GC was skipped");
   33.27 +    } else {
   33.28 +      ch->collect_as_vm_thread(GCCause::_heap_inspection);
   33.29 +    }
   33.30    }
   33.31    HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */);
   33.32  }
    34.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Wed Apr 21 01:13:15 2010 -0700
    34.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Mon Apr 26 18:01:55 2010 -0400
    34.3 @@ -1,5 +1,5 @@
    34.4  /*
    34.5 - * Copyright 2005-2008 Sun Microsystems, Inc.  All Rights Reserved.
    34.6 + * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
    34.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    34.8   *
    34.9   * This code is free software; you can redistribute it and/or modify it
   34.10 @@ -89,8 +89,19 @@
   34.11      if (full) {
   34.12        _full_gc_count_before = full_gc_count_before;
   34.13      }
   34.14 +    // In ParallelScavengeHeap::mem_allocate() collections can be
   34.15 +    // executed within a loop and _all_soft_refs_clear can be set
   34.16 +    // true after they have been cleared by a collection and another
   34.17 +    // collection started so that _all_soft_refs_clear can be true
   34.18 +    // when this collection is started.  Don't assert that
   34.19 +    // _all_soft_refs_clear have to be false here even though
   34.20 +    // mutators have run.  Soft refs will be cleared again in this
   34.21 +    // collection.
   34.22    }
   34.23 -  ~VM_GC_Operation() {}
   34.24 +  ~VM_GC_Operation() {
   34.25 +    CollectedHeap* ch = Universe::heap();
   34.26 +    ch->collector_policy()->set_all_soft_refs_clear(false);
   34.27 +  }
   34.28  
   34.29    // Acquire the reference synchronization lock
   34.30    virtual bool doit_prologue();
    35.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp	Wed Apr 21 01:13:15 2010 -0700
    35.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp	Mon Apr 26 18:01:55 2010 -0400
    35.3 @@ -1,5 +1,5 @@
    35.4  /*
    35.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    35.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    35.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    35.8   *
    35.9   * This code is free software; you can redistribute it and/or modify it
   35.10 @@ -31,6 +31,7 @@
   35.11  class ThreadClosure;
   35.12  class AdaptiveSizePolicy;
   35.13  class Thread;
   35.14 +class CollectorPolicy;
   35.15  
   35.16  //
   35.17  // CollectedHeap
   35.18 @@ -506,6 +507,9 @@
   35.19    // Return the AdaptiveSizePolicy for the heap.
   35.20    virtual AdaptiveSizePolicy* size_policy() = 0;
   35.21  
   35.22 +  // Return the CollectorPolicy for the heap
   35.23 +  virtual CollectorPolicy* collector_policy() const = 0;
   35.24 +
   35.25    // Iterate over all the ref-containing fields of all objects, calling
   35.26    // "cl.do_oop" on each. This includes objects in permanent memory.
   35.27    virtual void oop_iterate(OopClosure* cl) = 0;
    36.1 --- a/src/share/vm/memory/collectorPolicy.cpp	Wed Apr 21 01:13:15 2010 -0700
    36.2 +++ b/src/share/vm/memory/collectorPolicy.cpp	Mon Apr 26 18:01:55 2010 -0400
    36.3 @@ -1,5 +1,5 @@
    36.4  /*
    36.5 - * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
    36.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    36.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    36.8   *
    36.9   * This code is free software; you can redistribute it and/or modify it
   36.10 @@ -112,6 +112,11 @@
   36.11    }
   36.12  }
   36.13  
   36.14 +bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
   36.15 +  bool result = _should_clear_all_soft_refs;
   36.16 +  set_should_clear_all_soft_refs(false);
   36.17 +  return result;
   36.18 +}
   36.19  
   36.20  GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
   36.21                                             int max_covered_regions) {
   36.22 @@ -126,6 +131,17 @@
   36.23    }
   36.24  }
   36.25  
   36.26 +void CollectorPolicy::cleared_all_soft_refs() {
   36.27 +  // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
   36.28 +  // have been cleared in the last collection but if the gc overhear
   36.29 +  // limit continues to be near, SoftRefs should still be cleared.
   36.30 +  if (size_policy() != NULL) {
   36.31 +    _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
   36.32 +  }
   36.33 +  _all_soft_refs_clear = true;
   36.34 +}
   36.35 +
   36.36 +
   36.37  // GenCollectorPolicy methods.
   36.38  
   36.39  size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
   36.40 @@ -489,6 +505,12 @@
   36.41  
   36.42    debug_only(gch->check_for_valid_allocation_state());
   36.43    assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
   36.44 +
   36.45 +  // In general gc_overhead_limit_was_exceeded should be false so
   36.46 +  // set it so here and reset it to true only if the gc time
   36.47 +  // limit is being exceeded as checked below.
   36.48 +  *gc_overhead_limit_was_exceeded = false;
   36.49 +
   36.50    HeapWord* result = NULL;
   36.51  
   36.52    // Loop until the allocation is satisified,
   36.53 @@ -524,12 +546,6 @@
   36.54          return result;
   36.55        }
   36.56  
   36.57 -      // There are NULL's returned for different circumstances below.
   36.58 -      // In general gc_overhead_limit_was_exceeded should be false so
   36.59 -      // set it so here and reset it to true only if the gc time
   36.60 -      // limit is being exceeded as checked below.
   36.61 -      *gc_overhead_limit_was_exceeded = false;
   36.62 -
   36.63        if (GC_locker::is_active_and_needs_gc()) {
   36.64          if (is_tlab) {
   36.65            return NULL;  // Caller will retry allocating individual object
   36.66 @@ -568,18 +584,6 @@
   36.67        gc_count_before = Universe::heap()->total_collections();
   36.68      }
   36.69  
   36.70 -    // Allocation has failed and a collection is about
   36.71 -    // to be done.  If the gc time limit was exceeded the
   36.72 -    // last time a collection was done, return NULL so
   36.73 -    // that an out-of-memory will be thrown.  Clear
   36.74 -    // gc_time_limit_exceeded so that subsequent attempts
   36.75 -    // at a collection will be made.
   36.76 -    if (size_policy()->gc_time_limit_exceeded()) {
   36.77 -      *gc_overhead_limit_was_exceeded = true;
   36.78 -      size_policy()->set_gc_time_limit_exceeded(false);
   36.79 -      return NULL;
   36.80 -    }
   36.81 -
   36.82      VM_GenCollectForAllocation op(size,
   36.83                                    is_tlab,
   36.84                                    gc_count_before);
   36.85 @@ -590,6 +594,24 @@
   36.86           assert(result == NULL, "must be NULL if gc_locked() is true");
   36.87           continue;  // retry and/or stall as necessary
   36.88        }
   36.89 +
   36.90 +      // Allocation has failed and a collection
   36.91 +      // has been done.  If the gc time limit was exceeded the
   36.92 +      // this time, return NULL so that an out-of-memory
   36.93 +      // will be thrown.  Clear gc_overhead_limit_exceeded
   36.94 +      // so that the overhead exceeded does not persist.
   36.95 +
   36.96 +      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
   36.97 +      const bool softrefs_clear = all_soft_refs_clear();
   36.98 +      assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
   36.99 +      if (limit_exceeded && softrefs_clear) {
  36.100 +        *gc_overhead_limit_was_exceeded = true;
  36.101 +        size_policy()->set_gc_overhead_limit_exceeded(false);
  36.102 +        if (op.result() != NULL) {
  36.103 +          CollectedHeap::fill_with_object(op.result(), size);
  36.104 +        }
  36.105 +        return NULL;
  36.106 +      }
  36.107        assert(result == NULL || gch->is_in_reserved(result),
  36.108               "result not in heap");
  36.109        return result;
  36.110 @@ -688,6 +710,9 @@
  36.111      return result;
  36.112    }
  36.113  
  36.114 +  assert(!should_clear_all_soft_refs(),
  36.115 +    "Flag should have been handled and cleared prior to this point");
  36.116 +
  36.117    // What else?  We might try synchronous finalization later.  If the total
  36.118    // space available is large enough for the allocation, then a more
  36.119    // complete compaction phase than we've tried so far might be
    37.1 --- a/src/share/vm/memory/collectorPolicy.hpp	Wed Apr 21 01:13:15 2010 -0700
    37.2 +++ b/src/share/vm/memory/collectorPolicy.hpp	Mon Apr 26 18:01:55 2010 -0400
    37.3 @@ -1,5 +1,5 @@
    37.4  /*
    37.5 - * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
    37.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    37.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    37.8   *
    37.9   * This code is free software; you can redistribute it and/or modify it
   37.10 @@ -69,12 +69,28 @@
   37.11    size_t _min_alignment;
   37.12    size_t _max_alignment;
   37.13  
   37.14 +  // The sizing of the heap are controlled by a sizing policy.
   37.15 +  AdaptiveSizePolicy* _size_policy;
   37.16 +
   37.17 +  // Set to true when policy wants soft refs cleared.
   37.18 +  // Reset to false by gc after it clears all soft refs.
   37.19 +  bool _should_clear_all_soft_refs;
   37.20 +  // Set to true by the GC if the just-completed gc cleared all
   37.21 +  // softrefs.  This is set to true whenever a gc clears all softrefs, and
   37.22 +  // set to false each time gc returns to the mutator.  For example, in the
   37.23 +  // ParallelScavengeHeap case the latter would be done toward the end of
   37.24 +  // mem_allocate() where it returns op.result()
   37.25 +  bool _all_soft_refs_clear;
   37.26 +
   37.27    CollectorPolicy() :
   37.28      _min_alignment(1),
   37.29      _max_alignment(1),
   37.30      _initial_heap_byte_size(0),
   37.31      _max_heap_byte_size(0),
   37.32 -    _min_heap_byte_size(0)
   37.33 +    _min_heap_byte_size(0),
   37.34 +    _size_policy(NULL),
   37.35 +    _should_clear_all_soft_refs(false),
   37.36 +    _all_soft_refs_clear(false)
   37.37    {}
   37.38  
   37.39   public:
   37.40 @@ -98,6 +114,19 @@
   37.41      G1CollectorPolicyKind
   37.42    };
   37.43  
   37.44 +  AdaptiveSizePolicy* size_policy() { return _size_policy; }
   37.45 +  bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
   37.46 +  void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
   37.47 +  // Returns the current value of _should_clear_all_soft_refs.
   37.48 +  // _should_clear_all_soft_refs is set to false as a side effect.
   37.49 +  bool use_should_clear_all_soft_refs(bool v);
   37.50 +  bool all_soft_refs_clear() { return _all_soft_refs_clear; }
   37.51 +  void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; }
   37.52 +
   37.53 +  // Called by the GC after Soft Refs have been cleared to indicate
   37.54 +  // that the request in _should_clear_all_soft_refs has been fulfilled.
   37.55 +  void cleared_all_soft_refs();
   37.56 +
   37.57    // Identification methods.
   37.58    virtual GenCollectorPolicy*           as_generation_policy()            { return NULL; }
   37.59    virtual TwoGenerationCollectorPolicy* as_two_generation_policy()        { return NULL; }
   37.60 @@ -165,6 +194,22 @@
   37.61  
   37.62  };
   37.63  
   37.64 +class ClearedAllSoftRefs : public StackObj {
   37.65 +  bool _clear_all_soft_refs;
   37.66 +  CollectorPolicy* _collector_policy;
   37.67 + public:
   37.68 +  ClearedAllSoftRefs(bool clear_all_soft_refs,
   37.69 +                     CollectorPolicy* collector_policy) :
   37.70 +    _clear_all_soft_refs(clear_all_soft_refs),
   37.71 +    _collector_policy(collector_policy) {}
   37.72 +
   37.73 +  ~ClearedAllSoftRefs() {
   37.74 +    if (_clear_all_soft_refs) {
   37.75 +      _collector_policy->cleared_all_soft_refs();
   37.76 +    }
   37.77 +  }
   37.78 +};
   37.79 +
   37.80  class GenCollectorPolicy : public CollectorPolicy {
   37.81   protected:
   37.82    size_t _min_gen0_size;
   37.83 @@ -173,10 +218,6 @@
   37.84  
   37.85    GenerationSpec **_generations;
   37.86  
   37.87 -  // The sizing of the different generations in the heap are controlled
   37.88 -  // by a sizing policy.
   37.89 -  AdaptiveSizePolicy* _size_policy;
   37.90 -
   37.91    // Return true if an allocation should be attempted in the older
   37.92    // generation if it fails in the younger generation.  Return
   37.93    // false, otherwise.
   37.94 @@ -236,14 +277,11 @@
   37.95    virtual size_t large_typearray_limit();
   37.96  
   37.97    // Adaptive size policy
   37.98 -  AdaptiveSizePolicy* size_policy() { return _size_policy; }
   37.99    virtual void initialize_size_policy(size_t init_eden_size,
  37.100                                        size_t init_promo_size,
  37.101                                        size_t init_survivor_size);
  37.102 -
  37.103  };
  37.104  
  37.105 -
  37.106  // All of hotspot's current collectors are subtypes of this
  37.107  // class. Currently, these collectors all use the same gen[0],
  37.108  // but have different gen[1] types. If we add another subtype
    38.1 --- a/src/share/vm/memory/defNewGeneration.cpp	Wed Apr 21 01:13:15 2010 -0700
    38.2 +++ b/src/share/vm/memory/defNewGeneration.cpp	Mon Apr 26 18:01:55 2010 -0400
    38.3 @@ -1,5 +1,5 @@
    38.4  /*
    38.5 - * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
    38.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    38.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    38.8   *
    38.9   * This code is free software; you can redistribute it and/or modify it
   38.10 @@ -594,6 +594,10 @@
   38.11      _tenuring_threshold =
   38.12        age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
   38.13  
   38.14 +    // A successful scavenge should restart the GC time limit count which is
   38.15 +    // for full GC's.
   38.16 +    AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
   38.17 +    size_policy->reset_gc_overhead_limit_count();
   38.18      if (PrintGC && !PrintGCDetails) {
   38.19        gch->print_heap_change(gch_prev_used);
   38.20      }
    39.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Wed Apr 21 01:13:15 2010 -0700
    39.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Mon Apr 26 18:01:55 2010 -0400
    39.3 @@ -1,5 +1,5 @@
    39.4  /*
    39.5 - * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
    39.6 + * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
    39.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    39.8   *
    39.9   * This code is free software; you can redistribute it and/or modify it
   39.10 @@ -428,7 +428,8 @@
   39.11    assert(my_thread->is_VM_thread() ||
   39.12           my_thread->is_ConcurrentGC_thread(),
   39.13           "incorrect thread type capability");
   39.14 -  assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock");
   39.15 +  assert(Heap_lock->is_locked(),
   39.16 +         "the requesting thread should have the Heap_lock");
   39.17    guarantee(!is_gc_active(), "collection is not reentrant");
   39.18    assert(max_level < n_gens(), "sanity check");
   39.19  
   39.20 @@ -436,6 +437,11 @@
   39.21      return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   39.22    }
   39.23  
   39.24 +  const bool do_clear_all_soft_refs = clear_all_soft_refs ||
   39.25 +                          collector_policy()->should_clear_all_soft_refs();
   39.26 +
   39.27 +  ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
   39.28 +
   39.29    const size_t perm_prev_used = perm_gen()->used();
   39.30  
   39.31    if (PrintHeapAtGC) {
   39.32 @@ -560,11 +566,11 @@
   39.33            if (rp->discovery_is_atomic()) {
   39.34              rp->verify_no_references_recorded();
   39.35              rp->enable_discovery();
   39.36 -            rp->setup_policy(clear_all_soft_refs);
   39.37 +            rp->setup_policy(do_clear_all_soft_refs);
   39.38            } else {
   39.39              // collect() below will enable discovery as appropriate
   39.40            }
   39.41 -          _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab);
   39.42 +          _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
   39.43            if (!rp->enqueuing_is_done()) {
   39.44              rp->enqueue_discovered_references();
   39.45            } else {
    40.1 --- a/src/share/vm/memory/genMarkSweep.cpp	Wed Apr 21 01:13:15 2010 -0700
    40.2 +++ b/src/share/vm/memory/genMarkSweep.cpp	Mon Apr 26 18:01:55 2010 -0400
    40.3 @@ -1,5 +1,5 @@
    40.4  /*
    40.5 - * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
    40.6 + * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
    40.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    40.8   *
    40.9   * This code is free software; you can redistribute it and/or modify it
   40.10 @@ -29,6 +29,13 @@
   40.11    bool clear_all_softrefs) {
   40.12    assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   40.13  
   40.14 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
   40.15 +#ifdef ASSERT
   40.16 +  if (gch->collector_policy()->should_clear_all_soft_refs()) {
   40.17 +    assert(clear_all_softrefs, "Policy should have been checked earlier");
   40.18 +  }
   40.19 +#endif
   40.20 +
   40.21    // hook up weak ref data so it can be used during Mark-Sweep
   40.22    assert(ref_processor() == NULL, "no stomping");
   40.23    assert(rp != NULL, "should be non-NULL");
   40.24 @@ -44,7 +51,6 @@
   40.25  
   40.26    // Increment the invocation count for the permanent generation, since it is
   40.27    // implicitly collected whenever we do a full mark sweep collection.
   40.28 -  GenCollectedHeap* gch = GenCollectedHeap::heap();
   40.29    gch->perm_gen()->stat_record()->invocations++;
   40.30  
   40.31    // Capture heap size before collection for printing.
    41.1 --- a/src/share/vm/services/g1MemoryPool.cpp	Wed Apr 21 01:13:15 2010 -0700
    41.2 +++ b/src/share/vm/services/g1MemoryPool.cpp	Mon Apr 26 18:01:55 2010 -0400
    41.3 @@ -1,5 +1,5 @@
    41.4  /*
    41.5 - * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
    41.6 + * Copyright 2007-2010 Sun Microsystems, Inc.  All Rights Reserved.
    41.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    41.8   *
    41.9   * This code is free software; you can redistribute it and/or modify it
   41.10 @@ -45,7 +45,7 @@
   41.11  
   41.12  // See the comment at the top of g1MemoryPool.hpp
   41.13  size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
   41.14 -  size_t young_list_length = g1h->young_list_length();
   41.15 +  size_t young_list_length = g1h->young_list()->length();
   41.16    size_t eden_used = young_list_length * HeapRegion::GrainBytes;
   41.17    size_t survivor_used = survivor_space_used(g1h);
   41.18    eden_used = subtract_up_to_zero(eden_used, survivor_used);

mercurial