src/share/vm/gc_implementation/g1/heapRegionSeq.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2643
1216415d8e35
child 2963
c3f1170908be
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    27 #include "gc_implementation/g1/heapRegionSeq.hpp"
    28 #include "memory/allocation.hpp"
    30 // Local to this file.
    32 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
    33   if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
    34   else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
    35   else if (*hr1p == *hr2p) return 0;
    36   else {
    37     assert(false, "We should never compare distinct overlapping regions.");
    38   }
    39   return 0;
    40 }
    42 HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
    43   _alloc_search_start(0),
    44   // The line below is the worst bit of C++ hackery I've ever written
    45   // (Detlefs, 11/23).  You should think of it as equivalent to
    46   // "_regions(100, true)": initialize the growable array and inform it
    47   // that it should allocate its elem array(s) on the C heap.
    48   //
    49   // The first argument, however, is actually a comma expression
    50   // (set_allocation_type(this, C_HEAP), 100). The purpose of the
    51   // set_allocation_type() call is to replace the default allocation
    52   // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
    53   // allow to pass the assert in GenericGrowableArray() which checks
    54   // that a growable array object must be on C heap if elements are.
    55   //
    56   // Note: containing object is allocated on C heap since it is CHeapObj.
    57   //
    58   _regions((ResourceObj::set_allocation_type((address)&_regions,
    59                                              ResourceObj::C_HEAP),
    60             (int)max_size),
    61            true),
    62   _next_rr_candidate(0),
    63   _seq_bottom(NULL)
    64 {}
    66 // Private methods.
    68 void HeapRegionSeq::print_empty_runs() {
    69   int empty_run = 0;
    70   int n_empty = 0;
    71   int empty_run_start;
    72   for (int i = 0; i < _regions.length(); i++) {
    73     HeapRegion* r = _regions.at(i);
    74     if (r->continuesHumongous()) continue;
    75     if (r->is_empty()) {
    76       assert(!r->isHumongous(), "H regions should not be empty.");
    77       if (empty_run == 0) empty_run_start = i;
    78       empty_run++;
    79       n_empty++;
    80     } else {
    81       if (empty_run > 0) {
    82         gclog_or_tty->print("  %d:%d", empty_run_start, empty_run);
    83         empty_run = 0;
    84       }
    85     }
    86   }
    87   if (empty_run > 0) {
    88     gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
    89   }
    90   gclog_or_tty->print_cr(" [tot = %d]", n_empty);
    91 }
    93 int HeapRegionSeq::find(HeapRegion* hr) {
    94   // FIXME: optimized for adjacent regions of fixed size.
    95   int ind = hr->hrs_index();
    96   if (ind != -1) {
    97     assert(_regions.at(ind) == hr, "Mismatch");
    98   }
    99   return ind;
   100 }
   103 // Public methods.
   105 void HeapRegionSeq::insert(HeapRegion* hr) {
   106   assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
   107   if (_regions.length() == 0
   108       || _regions.top()->end() <= hr->bottom()) {
   109     hr->set_hrs_index(_regions.length());
   110     _regions.append(hr);
   111   } else {
   112     _regions.append(hr);
   113     _regions.sort(orderRegions);
   114     for (int i = 0; i < _regions.length(); i++) {
   115       _regions.at(i)->set_hrs_index(i);
   116     }
   117   }
   118   char* bot = (char*)_regions.at(0)->bottom();
   119   if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
   120 }
   122 size_t HeapRegionSeq::length() {
   123   return _regions.length();
   124 }
   126 size_t HeapRegionSeq::free_suffix() {
   127   size_t res = 0;
   128   int first = _regions.length() - 1;
   129   int cur = first;
   130   while (cur >= 0 &&
   131          (_regions.at(cur)->is_empty()
   132           && (first == cur
   133               || (_regions.at(cur+1)->bottom() ==
   134                   _regions.at(cur)->end())))) {
   135       res++;
   136       cur--;
   137   }
   138   return res;
   139 }
   141 int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
   142   assert(num > 1, "pre-condition");
   143   assert(0 <= from && from <= _regions.length(),
   144          err_msg("from: %d should be valid and <= than %d",
   145                  from, _regions.length()));
   147   int curr = from;
   148   int first = -1;
   149   size_t num_so_far = 0;
   150   while (curr < _regions.length() && num_so_far < num) {
   151     HeapRegion* curr_hr = _regions.at(curr);
   152     if (curr_hr->is_empty()) {
   153       if (first == -1) {
   154         first = curr;
   155         num_so_far = 1;
   156       } else {
   157         num_so_far += 1;
   158       }
   159     } else {
   160       first = -1;
   161       num_so_far = 0;
   162     }
   163     curr += 1;
   164   }
   166   assert(num_so_far <= num, "post-condition");
   167   if (num_so_far == num) {
   168     // we found enough space for the humongous object
   169     assert(from <= first && first < _regions.length(), "post-condition");
   170     assert(first < curr && (curr - first) == (int) num, "post-condition");
   171     for (int i = first; i < first + (int) num; ++i) {
   172       assert(_regions.at(i)->is_empty(), "post-condition");
   173     }
   174     return first;
   175   } else {
   176     // we failed to find enough space for the humongous object
   177     return -1;
   178   }
   179 }
   181 int HeapRegionSeq::find_contiguous(size_t num) {
   182   assert(num > 1, "otherwise we should not be calling this");
   183   assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
   184          err_msg("_alloc_search_start: %d should be valid and <= than %d",
   185                  _alloc_search_start, _regions.length()));
   187   int start = _alloc_search_start;
   188   int res = find_contiguous_from(start, num);
   189   if (res == -1 && start != 0) {
   190     // Try starting from the beginning. If _alloc_search_start was 0,
   191     // no point in doing this again.
   192     res = find_contiguous_from(0, num);
   193   }
   194   if (res != -1) {
   195     assert(0 <= res && res < _regions.length(),
   196            err_msg("res: %d should be valid", res));
   197     _alloc_search_start = res + (int) num;
   198     assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
   199            err_msg("_alloc_search_start: %d should be valid",
   200                    _alloc_search_start));
   201   }
   202   return res;
   203 }
   205 void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
   206   iterate_from((HeapRegion*)NULL, blk);
   207 }
   209 // The first argument r is the heap region at which iteration begins.
   210 // This operation runs fastest when r is NULL, or the heap region for
   211 // which a HeapRegionClosure most recently returned true, or the
   212 // heap region immediately to its right in the sequence.  In all
   213 // other cases a linear search is required to find the index of r.
   215 void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
   217   // :::: FIXME ::::
   218   // Static cache value is bad, especially when we start doing parallel
   219   // remembered set update. For now just don't cache anything (the
   220   // code in the def'd out blocks).
   222 #if 0
   223   static int cached_j = 0;
   224 #endif
   225   int len = _regions.length();
   226   int j = 0;
   227   // Find the index of r.
   228   if (r != NULL) {
   229 #if 0
   230     assert(cached_j >= 0, "Invariant.");
   231     if ((cached_j < len) && (r == _regions.at(cached_j))) {
   232       j = cached_j;
   233     } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
   234       j = cached_j + 1;
   235     } else {
   236       j = find(r);
   237 #endif
   238       if (j < 0) {
   239         j = 0;
   240       }
   241 #if 0
   242     }
   243 #endif
   244   }
   245   int i;
   246   for (i = j; i < len; i += 1) {
   247     int res = blk->doHeapRegion(_regions.at(i));
   248     if (res) {
   249 #if 0
   250       cached_j = i;
   251 #endif
   252       blk->incomplete();
   253       return;
   254     }
   255   }
   256   for (i = 0; i < j; i += 1) {
   257     int res = blk->doHeapRegion(_regions.at(i));
   258     if (res) {
   259 #if 0
   260       cached_j = i;
   261 #endif
   262       blk->incomplete();
   263       return;
   264     }
   265   }
   266 }
   268 void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
   269   int len = _regions.length();
   270   int i;
   271   for (i = idx; i < len; i++) {
   272     if (blk->doHeapRegion(_regions.at(i))) {
   273       blk->incomplete();
   274       return;
   275     }
   276   }
   277   for (i = 0; i < idx; i++) {
   278     if (blk->doHeapRegion(_regions.at(i))) {
   279       blk->incomplete();
   280       return;
   281     }
   282   }
   283 }
   285 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
   286                                    size_t& num_regions_deleted) {
   287   // Reset this in case it's currently pointing into the regions that
   288   // we just removed.
   289   _alloc_search_start = 0;
   291   assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
   292   assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
   294   if (_regions.length() == 0) {
   295     num_regions_deleted = 0;
   296     return MemRegion();
   297   }
   298   int j = _regions.length() - 1;
   299   HeapWord* end = _regions.at(j)->end();
   300   HeapWord* last_start = end;
   301   while (j >= 0 && shrink_bytes > 0) {
   302     HeapRegion* cur = _regions.at(j);
   303     // We have to leave humongous regions where they are,
   304     // and work around them.
   305     if (cur->isHumongous()) {
   306       return MemRegion(last_start, end);
   307     }
   308     assert(cur == _regions.top(), "Should be top");
   309     if (!cur->is_empty()) break;
   310     shrink_bytes -= cur->capacity();
   311     num_regions_deleted++;
   312     _regions.pop();
   313     last_start = cur->bottom();
   314     // We need to delete these somehow, but can't currently do so here: if
   315     // we do, the ZF thread may still access the deleted region.  We'll
   316     // leave this here as a reminder that we have to do something about
   317     // this.
   318     // delete cur;
   319     j--;
   320   }
   321   return MemRegion(last_start, end);
   322 }
   324 class PrintHeapRegionClosure : public  HeapRegionClosure {
   325 public:
   326   bool doHeapRegion(HeapRegion* r) {
   327     gclog_or_tty->print(PTR_FORMAT ":", r);
   328     r->print();
   329     return false;
   330   }
   331 };
   333 void HeapRegionSeq::print() {
   334   PrintHeapRegionClosure cl;
   335   iterate(&cl);
   336 }

mercurial