src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 1907
c18cbe5936b8
child 2314
f95d63e2154a
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

     1 /*
     2  * Copyright (c) 2001, 2007, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 // A BufferingOops closure tries to separate out the cost of finding roots
    26 // from the cost of applying closures to them.  It maintains an array of
    27 // ref-containing locations.  Until the array is full, applying the closure
    28 // to an oop* merely records that location in the array.  Since this
    29 // closure app cost is small, an elapsed timer can approximately attribute
    30 // all of this cost to the cost of finding the roots.  When the array fills
    31 // up, the wrapped closure is applied to all elements, keeping track of
    32 // this elapsed time of this process, and leaving the array empty.
    33 // The caller must be sure to call "done" to process any unprocessed
    34 // buffered entriess.
    36 class Generation;
    37 class HeapRegion;
    39 class BufferingOopClosure: public OopClosure {
    40 protected:
    41   enum PrivateConstants {
    42     BufferLength = 1024
    43   };
    45   StarTask  _buffer[BufferLength];
    46   StarTask* _buffer_top;
    47   StarTask* _buffer_curr;
    49   OopClosure* _oc;
    50   double      _closure_app_seconds;
    52   void process_buffer () {
    53     double start = os::elapsedTime();
    54     for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
    55       if (curr->is_narrow()) {
    56         assert(UseCompressedOops, "Error");
    57         _oc->do_oop((narrowOop*)(*curr));
    58       } else {
    59         _oc->do_oop((oop*)(*curr));
    60       }
    61     }
    62     _buffer_curr = _buffer;
    63     _closure_app_seconds += (os::elapsedTime() - start);
    64   }
    66   template <class T> inline void do_oop_work(T* p) {
    67     if (_buffer_curr == _buffer_top) {
    68       process_buffer();
    69     }
    70     StarTask new_ref(p);
    71     *_buffer_curr = new_ref;
    72     ++_buffer_curr;
    73   }
    75 public:
    76   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
    77   virtual void do_oop(oop* p)       { do_oop_work(p); }
    79   void done () {
    80     if (_buffer_curr > _buffer) {
    81       process_buffer();
    82     }
    83   }
    84   double closure_app_seconds () {
    85     return _closure_app_seconds;
    86   }
    87   BufferingOopClosure (OopClosure *oc) :
    88     _oc(oc),
    89     _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength),
    90     _closure_app_seconds(0.0) { }
    91 };
    93 class BufferingOopsInGenClosure: public OopsInGenClosure {
    94   BufferingOopClosure _boc;
    95   OopsInGenClosure* _oc;
    96  protected:
    97   template <class T> inline void do_oop_work(T* p) {
    98     assert(generation()->is_in_reserved((void*)p), "Must be in!");
    99     _boc.do_oop(p);
   100   }
   101  public:
   102   BufferingOopsInGenClosure(OopsInGenClosure *oc) :
   103     _boc(oc), _oc(oc) {}
   105   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   106   virtual void do_oop(oop* p)       { do_oop_work(p); }
   108   void done() {
   109     _boc.done();
   110   }
   112   double closure_app_seconds () {
   113     return _boc.closure_app_seconds();
   114   }
   116   void set_generation(Generation* gen) {
   117     OopsInGenClosure::set_generation(gen);
   118     _oc->set_generation(gen);
   119   }
   121   void reset_generation() {
   122     // Make sure we finish the current work with the current generation.
   123     _boc.done();
   124     OopsInGenClosure::reset_generation();
   125     _oc->reset_generation();
   126   }
   128 };
   131 class BufferingOopsInHeapRegionClosure: public OopsInHeapRegionClosure {
   132 private:
   133   enum PrivateConstants {
   134     BufferLength = 1024
   135   };
   137   StarTask     _buffer[BufferLength];
   138   StarTask*    _buffer_top;
   139   StarTask*    _buffer_curr;
   141   HeapRegion*  _hr_buffer[BufferLength];
   142   HeapRegion** _hr_curr;
   144   OopsInHeapRegionClosure*  _oc;
   145   double                    _closure_app_seconds;
   147   void process_buffer () {
   149     assert((_hr_curr - _hr_buffer) == (_buffer_curr - _buffer),
   150            "the two lengths should be the same");
   152     double start = os::elapsedTime();
   153     HeapRegion** hr_curr = _hr_buffer;
   154     HeapRegion*  hr_prev = NULL;
   155     for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
   156       HeapRegion* region = *hr_curr;
   157       if (region != hr_prev) {
   158         _oc->set_region(region);
   159         hr_prev = region;
   160       }
   161       if (curr->is_narrow()) {
   162         assert(UseCompressedOops, "Error");
   163         _oc->do_oop((narrowOop*)(*curr));
   164       } else {
   165         _oc->do_oop((oop*)(*curr));
   166       }
   167       ++hr_curr;
   168     }
   169     _buffer_curr = _buffer;
   170     _hr_curr = _hr_buffer;
   171     _closure_app_seconds += (os::elapsedTime() - start);
   172   }
   174 public:
   175   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   176   virtual void do_oop(      oop* p) { do_oop_work(p); }
   178   template <class T> void do_oop_work(T* p) {
   179     if (_buffer_curr == _buffer_top) {
   180       assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
   181       process_buffer();
   182     }
   183     StarTask new_ref(p);
   184     *_buffer_curr = new_ref;
   185     ++_buffer_curr;
   186     *_hr_curr = _from;
   187     ++_hr_curr;
   188   }
   189   void done () {
   190     if (_buffer_curr > _buffer) {
   191       assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
   192       process_buffer();
   193     }
   194   }
   195   double closure_app_seconds () {
   196     return _closure_app_seconds;
   197   }
   198   BufferingOopsInHeapRegionClosure (OopsInHeapRegionClosure *oc) :
   199     _oc(oc),
   200     _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength),
   201     _hr_curr(_hr_buffer),
   202     _closure_app_seconds(0.0) { }
   203 };

mercurial