src/share/vm/gc_implementation/g1/heapRegionSeq.cpp

Tue, 23 Nov 2010 13:22:55 -0800

author
stefank
date
Tue, 23 Nov 2010 13:22:55 -0800
changeset 2314
f95d63e2154a
parent 2241
72a161e62cc4
child 2453
2250ee17e258
permissions
-rw-r--r--

6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    27 #include "gc_implementation/g1/heapRegionSeq.hpp"
    28 #include "memory/allocation.hpp"
    30 // Local to this file.
    32 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
    33   if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
    34   else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
    35   else if (*hr1p == *hr2p) return 0;
    36   else {
    37     assert(false, "We should never compare distinct overlapping regions.");
    38   }
    39   return 0;
    40 }
    42 HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
    43   _alloc_search_start(0),
    44   // The line below is the worst bit of C++ hackery I've ever written
    45   // (Detlefs, 11/23).  You should think of it as equivalent to
    46   // "_regions(100, true)": initialize the growable array and inform it
    47   // that it should allocate its elem array(s) on the C heap.
    48   //
    49   // The first argument, however, is actually a comma expression
    50   // (set_allocation_type(this, C_HEAP), 100). The purpose of the
    51   // set_allocation_type() call is to replace the default allocation
    52   // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
    53   // allow to pass the assert in GenericGrowableArray() which checks
    54   // that a growable array object must be on C heap if elements are.
    55   //
    56   // Note: containing object is allocated on C heap since it is CHeapObj.
    57   //
    58   _regions((ResourceObj::set_allocation_type((address)&_regions,
    59                                              ResourceObj::C_HEAP),
    60             (int)max_size),
    61            true),
    62   _next_rr_candidate(0),
    63   _seq_bottom(NULL)
    64 {}
    66 // Private methods.
    68 HeapWord*
    69 HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
    70   assert(G1CollectedHeap::isHumongous(word_size),
    71          "Allocation size should be humongous");
    72   int cur = ind;
    73   int first = cur;
    74   size_t sumSizes = 0;
    75   while (cur < _regions.length() && sumSizes < word_size) {
    76     // Loop invariant:
    77     //  For all i in [first, cur):
    78     //       _regions.at(i)->is_empty()
    79     //    && _regions.at(i) is contiguous with its predecessor, if any
    80     //  && sumSizes is the sum of the sizes of the regions in the interval
    81     //       [first, cur)
    82     HeapRegion* curhr = _regions.at(cur);
    83     if (curhr->is_empty()
    84         && (first == cur
    85             || (_regions.at(cur-1)->end() ==
    86                 curhr->bottom()))) {
    87       sumSizes += curhr->capacity() / HeapWordSize;
    88     } else {
    89       first = cur + 1;
    90       sumSizes = 0;
    91     }
    92     cur++;
    93   }
    94   if (sumSizes >= word_size) {
    95     _alloc_search_start = cur;
    97     // We need to initialize the region(s) we just discovered. This is
    98     // a bit tricky given that it can happen concurrently with
    99     // refinement threads refining cards on these regions and
   100     // potentially wanting to refine the BOT as they are scanning
   101     // those cards (this can happen shortly after a cleanup; see CR
   102     // 6991377). So we have to set up the region(s) carefully and in
   103     // a specific order.
   105     // Currently, allocs_are_zero_filled() returns false. The zero
   106     // filling infrastructure will be going away soon (see CR 6977804).
   107     // So no need to do anything else here.
   108     bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
   109     assert(!zf, "not supported");
   111     // This will be the "starts humongous" region.
   112     HeapRegion* first_hr = _regions.at(first);
   113     {
   114       MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   115       first_hr->set_zero_fill_allocated();
   116     }
   117     // The header of the new object will be placed at the bottom of
   118     // the first region.
   119     HeapWord* new_obj = first_hr->bottom();
   120     // This will be the new end of the first region in the series that
   121     // should also match the end of the last region in the seriers.
   122     // (Note: sumSizes = "region size" x "number of regions we found").
   123     HeapWord* new_end = new_obj + sumSizes;
   124     // This will be the new top of the first region that will reflect
   125     // this allocation.
   126     HeapWord* new_top = new_obj + word_size;
   128     // First, we need to zero the header of the space that we will be
   129     // allocating. When we update top further down, some refinement
   130     // threads might try to scan the region. By zeroing the header we
   131     // ensure that any thread that will try to scan the region will
   132     // come across the zero klass word and bail out.
   133     //
   134     // NOTE: It would not have been correct to have used
   135     // CollectedHeap::fill_with_object() and make the space look like
   136     // an int array. The thread that is doing the allocation will
   137     // later update the object header to a potentially different array
   138     // type and, for a very short period of time, the klass and length
   139     // fields will be inconsistent. This could cause a refinement
   140     // thread to calculate the object size incorrectly.
   141     Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
   143     // We will set up the first region as "starts humongous". This
   144     // will also update the BOT covering all the regions to reflect
   145     // that there is a single object that starts at the bottom of the
   146     // first region.
   147     first_hr->set_startsHumongous(new_end);
   149     // Then, if there are any, we will set up the "continues
   150     // humongous" regions.
   151     HeapRegion* hr = NULL;
   152     for (int i = first + 1; i < cur; ++i) {
   153       hr = _regions.at(i);
   154       {
   155         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   156         hr->set_zero_fill_allocated();
   157       }
   158       hr->set_continuesHumongous(first_hr);
   159     }
   160     // If we have "continues humongous" regions (hr != NULL), then the
   161     // end of the last one should match new_end.
   162     assert(hr == NULL || hr->end() == new_end, "sanity");
   164     // Up to this point no concurrent thread would have been able to
   165     // do any scanning on any region in this series. All the top
   166     // fields still point to bottom, so the intersection between
   167     // [bottom,top] and [card_start,card_end] will be empty. Before we
   168     // update the top fields, we'll do a storestore to make sure that
   169     // no thread sees the update to top before the zeroing of the
   170     // object header and the BOT initialization.
   171     OrderAccess::storestore();
   173     // Now that the BOT and the object header have been initialized,
   174     // we can update top of the "starts humongous" region.
   175     assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
   176            "new_top should be in this region");
   177     first_hr->set_top(new_top);
   179     // Now, we will update the top fields of the "continues humongous"
   180     // regions. The reason we need to do this is that, otherwise,
   181     // these regions would look empty and this will confuse parts of
   182     // G1. For example, the code that looks for a consecutive number
   183     // of empty regions will consider them empty and try to
   184     // re-allocate them. We can extend is_empty() to also include
   185     // !continuesHumongous(), but it is easier to just update the top
   186     // fields here.
   187     hr = NULL;
   188     for (int i = first + 1; i < cur; ++i) {
   189       hr = _regions.at(i);
   190       if ((i + 1) == cur) {
   191         // last continues humongous region
   192         assert(hr->bottom() < new_top && new_top <= hr->end(),
   193                "new_top should fall on this region");
   194         hr->set_top(new_top);
   195       } else {
   196         // not last one
   197         assert(new_top > hr->end(), "new_top should be above this region");
   198         hr->set_top(hr->end());
   199       }
   200     }
   201     // If we have continues humongous regions (hr != NULL), then the
   202     // end of the last one should match new_end and its top should
   203     // match new_top.
   204     assert(hr == NULL ||
   205            (hr->end() == new_end && hr->top() == new_top), "sanity");
   207     return new_obj;
   208   } else {
   209     // If we started from the beginning, we want to know why we can't alloc.
   210     return NULL;
   211   }
   212 }
   214 void HeapRegionSeq::print_empty_runs() {
   215   int empty_run = 0;
   216   int n_empty = 0;
   217   int empty_run_start;
   218   for (int i = 0; i < _regions.length(); i++) {
   219     HeapRegion* r = _regions.at(i);
   220     if (r->continuesHumongous()) continue;
   221     if (r->is_empty()) {
   222       assert(!r->isHumongous(), "H regions should not be empty.");
   223       if (empty_run == 0) empty_run_start = i;
   224       empty_run++;
   225       n_empty++;
   226     } else {
   227       if (empty_run > 0) {
   228         gclog_or_tty->print("  %d:%d", empty_run_start, empty_run);
   229         empty_run = 0;
   230       }
   231     }
   232   }
   233   if (empty_run > 0) {
   234     gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
   235   }
   236   gclog_or_tty->print_cr(" [tot = %d]", n_empty);
   237 }
   239 int HeapRegionSeq::find(HeapRegion* hr) {
   240   // FIXME: optimized for adjacent regions of fixed size.
   241   int ind = hr->hrs_index();
   242   if (ind != -1) {
   243     assert(_regions.at(ind) == hr, "Mismatch");
   244   }
   245   return ind;
   246 }
   249 // Public methods.
   251 void HeapRegionSeq::insert(HeapRegion* hr) {
   252   assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
   253   if (_regions.length() == 0
   254       || _regions.top()->end() <= hr->bottom()) {
   255     hr->set_hrs_index(_regions.length());
   256     _regions.append(hr);
   257   } else {
   258     _regions.append(hr);
   259     _regions.sort(orderRegions);
   260     for (int i = 0; i < _regions.length(); i++) {
   261       _regions.at(i)->set_hrs_index(i);
   262     }
   263   }
   264   char* bot = (char*)_regions.at(0)->bottom();
   265   if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
   266 }
   268 size_t HeapRegionSeq::length() {
   269   return _regions.length();
   270 }
   272 size_t HeapRegionSeq::free_suffix() {
   273   size_t res = 0;
   274   int first = _regions.length() - 1;
   275   int cur = first;
   276   while (cur >= 0 &&
   277          (_regions.at(cur)->is_empty()
   278           && (first == cur
   279               || (_regions.at(cur+1)->bottom() ==
   280                   _regions.at(cur)->end())))) {
   281       res++;
   282       cur--;
   283   }
   284   return res;
   285 }
   287 HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) {
   288   int cur = _alloc_search_start;
   289   // Make sure "cur" is a valid index.
   290   assert(cur >= 0, "Invariant.");
   291   HeapWord* res = alloc_obj_from_region_index(cur, word_size);
   292   if (res == NULL)
   293     res = alloc_obj_from_region_index(0, word_size);
   294   return res;
   295 }
   297 void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
   298   iterate_from((HeapRegion*)NULL, blk);
   299 }
   301 // The first argument r is the heap region at which iteration begins.
   302 // This operation runs fastest when r is NULL, or the heap region for
   303 // which a HeapRegionClosure most recently returned true, or the
   304 // heap region immediately to its right in the sequence.  In all
   305 // other cases a linear search is required to find the index of r.
   307 void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
   309   // :::: FIXME ::::
   310   // Static cache value is bad, especially when we start doing parallel
   311   // remembered set update. For now just don't cache anything (the
   312   // code in the def'd out blocks).
   314 #if 0
   315   static int cached_j = 0;
   316 #endif
   317   int len = _regions.length();
   318   int j = 0;
   319   // Find the index of r.
   320   if (r != NULL) {
   321 #if 0
   322     assert(cached_j >= 0, "Invariant.");
   323     if ((cached_j < len) && (r == _regions.at(cached_j))) {
   324       j = cached_j;
   325     } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
   326       j = cached_j + 1;
   327     } else {
   328       j = find(r);
   329 #endif
   330       if (j < 0) {
   331         j = 0;
   332       }
   333 #if 0
   334     }
   335 #endif
   336   }
   337   int i;
   338   for (i = j; i < len; i += 1) {
   339     int res = blk->doHeapRegion(_regions.at(i));
   340     if (res) {
   341 #if 0
   342       cached_j = i;
   343 #endif
   344       blk->incomplete();
   345       return;
   346     }
   347   }
   348   for (i = 0; i < j; i += 1) {
   349     int res = blk->doHeapRegion(_regions.at(i));
   350     if (res) {
   351 #if 0
   352       cached_j = i;
   353 #endif
   354       blk->incomplete();
   355       return;
   356     }
   357   }
   358 }
   360 void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
   361   int len = _regions.length();
   362   int i;
   363   for (i = idx; i < len; i++) {
   364     if (blk->doHeapRegion(_regions.at(i))) {
   365       blk->incomplete();
   366       return;
   367     }
   368   }
   369   for (i = 0; i < idx; i++) {
   370     if (blk->doHeapRegion(_regions.at(i))) {
   371       blk->incomplete();
   372       return;
   373     }
   374   }
   375 }
   377 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
   378                                    size_t& num_regions_deleted) {
   379   assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
   380   assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
   382   if (_regions.length() == 0) {
   383     num_regions_deleted = 0;
   384     return MemRegion();
   385   }
   386   int j = _regions.length() - 1;
   387   HeapWord* end = _regions.at(j)->end();
   388   HeapWord* last_start = end;
   389   while (j >= 0 && shrink_bytes > 0) {
   390     HeapRegion* cur = _regions.at(j);
   391     // We have to leave humongous regions where they are,
   392     // and work around them.
   393     if (cur->isHumongous()) {
   394       return MemRegion(last_start, end);
   395     }
   396     assert(cur == _regions.top(), "Should be top");
   397     if (!cur->is_empty()) break;
   398     cur->reset_zero_fill();
   399     shrink_bytes -= cur->capacity();
   400     num_regions_deleted++;
   401     _regions.pop();
   402     last_start = cur->bottom();
   403     // We need to delete these somehow, but can't currently do so here: if
   404     // we do, the ZF thread may still access the deleted region.  We'll
   405     // leave this here as a reminder that we have to do something about
   406     // this.
   407     // delete cur;
   408     j--;
   409   }
   410   return MemRegion(last_start, end);
   411 }
   414 class PrintHeapRegionClosure : public  HeapRegionClosure {
   415 public:
   416   bool doHeapRegion(HeapRegion* r) {
   417     gclog_or_tty->print(PTR_FORMAT ":", r);
   418     r->print();
   419     return false;
   420   }
   421 };
   423 void HeapRegionSeq::print() {
   424   PrintHeapRegionClosure cl;
   425   iterate(&cl);
   426 }

mercurial