src/share/vm/gc_implementation/g1/heapRegionSeq.cpp

Thu, 22 Apr 2010 10:02:38 -0700

author
johnc
date
Thu, 22 Apr 2010 10:02:38 -0700
changeset 1829
1316cec51b4d
parent 1395
eb058abd0562
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6819061: G1: eliminate serial Other times that are proportional to the collection set length
6871109: G1: remove the concept of the scan only prefix
Summary: Removed scan only regions and associated code. The young portion of the collection set is now constructed incrementally - when a young region is retired as the current allocation region it is added to the collection set.
Reviewed-by: apetrusenko, iveresov, tonyp

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_heapRegionSeq.cpp.incl"
    28 // Local to this file.
    30 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
    31   if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
    32   else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
    33   else if (*hr1p == *hr2p) return 0;
    34   else {
    35     assert(false, "We should never compare distinct overlapping regions.");
    36   }
    37   return 0;
    38 }
    40 HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
    41   _alloc_search_start(0),
    42   // The line below is the worst bit of C++ hackery I've ever written
    43   // (Detlefs, 11/23).  You should think of it as equivalent to
    44   // "_regions(100, true)": initialize the growable array and inform it
    45   // that it should allocate its elem array(s) on the C heap.  The first
    46   // argument, however, is actually a comma expression (new-expr, 100).
    47   // The purpose of the new_expr is to inform the growable array that it
    48   // is *already* allocated on the C heap: it uses the placement syntax to
    49   // keep it from actually doing any allocation.
    50   _regions((ResourceObj::operator new (sizeof(GrowableArray<HeapRegion*>),
    51                                        (void*)&_regions,
    52                                        ResourceObj::C_HEAP),
    53             (int)max_size),
    54            true),
    55   _next_rr_candidate(0),
    56   _seq_bottom(NULL)
    57 {}
    59 // Private methods.
    61 HeapWord*
    62 HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
    63   assert(G1CollectedHeap::isHumongous(word_size),
    64          "Allocation size should be humongous");
    65   int cur = ind;
    66   int first = cur;
    67   size_t sumSizes = 0;
    68   while (cur < _regions.length() && sumSizes < word_size) {
    69     // Loop invariant:
    70     //  For all i in [first, cur):
    71     //       _regions.at(i)->is_empty()
    72     //    && _regions.at(i) is contiguous with its predecessor, if any
    73     //  && sumSizes is the sum of the sizes of the regions in the interval
    74     //       [first, cur)
    75     HeapRegion* curhr = _regions.at(cur);
    76     if (curhr->is_empty()
    77         && (first == cur
    78             || (_regions.at(cur-1)->end() ==
    79                 curhr->bottom()))) {
    80       sumSizes += curhr->capacity() / HeapWordSize;
    81     } else {
    82       first = cur + 1;
    83       sumSizes = 0;
    84     }
    85     cur++;
    86   }
    87   if (sumSizes >= word_size) {
    88     _alloc_search_start = cur;
    89     // Mark the allocated regions as allocated.
    90     bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
    91     HeapRegion* first_hr = _regions.at(first);
    92     for (int i = first; i < cur; i++) {
    93       HeapRegion* hr = _regions.at(i);
    94       if (zf)
    95         hr->ensure_zero_filled();
    96       {
    97         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
    98         hr->set_zero_fill_allocated();
    99       }
   100       size_t sz = hr->capacity() / HeapWordSize;
   101       HeapWord* tmp = hr->allocate(sz);
   102       assert(tmp != NULL, "Humongous allocation failure");
   103       MemRegion mr = MemRegion(tmp, sz);
   104       CollectedHeap::fill_with_object(mr);
   105       hr->declare_filled_region_to_BOT(mr);
   106       if (i == first) {
   107         first_hr->set_startsHumongous();
   108       } else {
   109         assert(i > first, "sanity");
   110         hr->set_continuesHumongous(first_hr);
   111       }
   112     }
   113     HeapWord* first_hr_bot = first_hr->bottom();
   114     HeapWord* obj_end = first_hr_bot + word_size;
   115     first_hr->set_top(obj_end);
   116     return first_hr_bot;
   117   } else {
   118     // If we started from the beginning, we want to know why we can't alloc.
   119     return NULL;
   120   }
   121 }
   123 void HeapRegionSeq::print_empty_runs() {
   124   int empty_run = 0;
   125   int n_empty = 0;
   126   int empty_run_start;
   127   for (int i = 0; i < _regions.length(); i++) {
   128     HeapRegion* r = _regions.at(i);
   129     if (r->continuesHumongous()) continue;
   130     if (r->is_empty()) {
   131       assert(!r->isHumongous(), "H regions should not be empty.");
   132       if (empty_run == 0) empty_run_start = i;
   133       empty_run++;
   134       n_empty++;
   135     } else {
   136       if (empty_run > 0) {
   137         gclog_or_tty->print("  %d:%d", empty_run_start, empty_run);
   138         empty_run = 0;
   139       }
   140     }
   141   }
   142   if (empty_run > 0) {
   143     gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
   144   }
   145   gclog_or_tty->print_cr(" [tot = %d]", n_empty);
   146 }
   148 int HeapRegionSeq::find(HeapRegion* hr) {
   149   // FIXME: optimized for adjacent regions of fixed size.
   150   int ind = hr->hrs_index();
   151   if (ind != -1) {
   152     assert(_regions.at(ind) == hr, "Mismatch");
   153   }
   154   return ind;
   155 }
   158 // Public methods.
   160 void HeapRegionSeq::insert(HeapRegion* hr) {
   161   assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
   162   if (_regions.length() == 0
   163       || _regions.top()->end() <= hr->bottom()) {
   164     hr->set_hrs_index(_regions.length());
   165     _regions.append(hr);
   166   } else {
   167     _regions.append(hr);
   168     _regions.sort(orderRegions);
   169     for (int i = 0; i < _regions.length(); i++) {
   170       _regions.at(i)->set_hrs_index(i);
   171     }
   172   }
   173   char* bot = (char*)_regions.at(0)->bottom();
   174   if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
   175 }
   177 size_t HeapRegionSeq::length() {
   178   return _regions.length();
   179 }
   181 size_t HeapRegionSeq::free_suffix() {
   182   size_t res = 0;
   183   int first = _regions.length() - 1;
   184   int cur = first;
   185   while (cur >= 0 &&
   186          (_regions.at(cur)->is_empty()
   187           && (first == cur
   188               || (_regions.at(cur+1)->bottom() ==
   189                   _regions.at(cur)->end())))) {
   190       res++;
   191       cur--;
   192   }
   193   return res;
   194 }
   196 HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) {
   197   int cur = _alloc_search_start;
   198   // Make sure "cur" is a valid index.
   199   assert(cur >= 0, "Invariant.");
   200   HeapWord* res = alloc_obj_from_region_index(cur, word_size);
   201   if (res == NULL)
   202     res = alloc_obj_from_region_index(0, word_size);
   203   return res;
   204 }
   206 void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
   207   iterate_from((HeapRegion*)NULL, blk);
   208 }
   210 // The first argument r is the heap region at which iteration begins.
   211 // This operation runs fastest when r is NULL, or the heap region for
   212 // which a HeapRegionClosure most recently returned true, or the
   213 // heap region immediately to its right in the sequence.  In all
   214 // other cases a linear search is required to find the index of r.
   216 void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
   218   // :::: FIXME ::::
   219   // Static cache value is bad, especially when we start doing parallel
   220   // remembered set update. For now just don't cache anything (the
   221   // code in the def'd out blocks).
   223 #if 0
   224   static int cached_j = 0;
   225 #endif
   226   int len = _regions.length();
   227   int j = 0;
   228   // Find the index of r.
   229   if (r != NULL) {
   230 #if 0
   231     assert(cached_j >= 0, "Invariant.");
   232     if ((cached_j < len) && (r == _regions.at(cached_j))) {
   233       j = cached_j;
   234     } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
   235       j = cached_j + 1;
   236     } else {
   237       j = find(r);
   238 #endif
   239       if (j < 0) {
   240         j = 0;
   241       }
   242 #if 0
   243     }
   244 #endif
   245   }
   246   int i;
   247   for (i = j; i < len; i += 1) {
   248     int res = blk->doHeapRegion(_regions.at(i));
   249     if (res) {
   250 #if 0
   251       cached_j = i;
   252 #endif
   253       blk->incomplete();
   254       return;
   255     }
   256   }
   257   for (i = 0; i < j; i += 1) {
   258     int res = blk->doHeapRegion(_regions.at(i));
   259     if (res) {
   260 #if 0
   261       cached_j = i;
   262 #endif
   263       blk->incomplete();
   264       return;
   265     }
   266   }
   267 }
   269 void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
   270   int len = _regions.length();
   271   int i;
   272   for (i = idx; i < len; i++) {
   273     if (blk->doHeapRegion(_regions.at(i))) {
   274       blk->incomplete();
   275       return;
   276     }
   277   }
   278   for (i = 0; i < idx; i++) {
   279     if (blk->doHeapRegion(_regions.at(i))) {
   280       blk->incomplete();
   281       return;
   282     }
   283   }
   284 }
   286 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
   287                                    size_t& num_regions_deleted) {
   288   assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
   289   assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
   291   if (_regions.length() == 0) {
   292     num_regions_deleted = 0;
   293     return MemRegion();
   294   }
   295   int j = _regions.length() - 1;
   296   HeapWord* end = _regions.at(j)->end();
   297   HeapWord* last_start = end;
   298   while (j >= 0 && shrink_bytes > 0) {
   299     HeapRegion* cur = _regions.at(j);
   300     // We have to leave humongous regions where they are,
   301     // and work around them.
   302     if (cur->isHumongous()) {
   303       return MemRegion(last_start, end);
   304     }
   305     assert(cur == _regions.top(), "Should be top");
   306     if (!cur->is_empty()) break;
   307     cur->reset_zero_fill();
   308     shrink_bytes -= cur->capacity();
   309     num_regions_deleted++;
   310     _regions.pop();
   311     last_start = cur->bottom();
   312     // We need to delete these somehow, but can't currently do so here: if
   313     // we do, the ZF thread may still access the deleted region.  We'll
   314     // leave this here as a reminder that we have to do something about
   315     // this.
   316     // delete cur;
   317     j--;
   318   }
   319   return MemRegion(last_start, end);
   320 }
   323 class PrintHeapRegionClosure : public  HeapRegionClosure {
   324 public:
   325   bool doHeapRegion(HeapRegion* r) {
   326     gclog_or_tty->print(PTR_FORMAT ":", r);
   327     r->print();
   328     return false;
   329   }
   330 };
   332 void HeapRegionSeq::print() {
   333   PrintHeapRegionClosure cl;
   334   iterate(&cl);
   335 }

mercurial