src/share/vm/gc_implementation/g1/heapRegionSeq.cpp

changeset 2241
72a161e62cc4
parent 2043
2dfd013a7465
child 2314
f95d63e2154a
     1.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Mon Oct 18 15:01:41 2010 -0700
     1.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Sat Oct 16 17:12:19 2010 -0400
     1.3 @@ -91,34 +91,118 @@
     1.4    }
     1.5    if (sumSizes >= word_size) {
     1.6      _alloc_search_start = cur;
     1.7 -    // Mark the allocated regions as allocated.
     1.8 +
     1.9 +    // We need to initialize the region(s) we just discovered. This is
    1.10 +    // a bit tricky given that it can happen concurrently with
    1.11 +    // refinement threads refining cards on these regions and
    1.12 +    // potentially wanting to refine the BOT as they are scanning
    1.13 +    // those cards (this can happen shortly after a cleanup; see CR
    1.14 +    // 6991377). So we have to set up the region(s) carefully and in
    1.15 +    // a specific order.
    1.16 +
    1.17 +    // Currently, allocs_are_zero_filled() returns false. The zero
    1.18 +    // filling infrastructure will be going away soon (see CR 6977804).
    1.19 +    // So no need to do anything else here.
    1.20      bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
    1.21 +    assert(!zf, "not supported");
    1.22 +
    1.23 +    // This will be the "starts humongous" region.
    1.24      HeapRegion* first_hr = _regions.at(first);
    1.25 -    for (int i = first; i < cur; i++) {
    1.26 -      HeapRegion* hr = _regions.at(i);
    1.27 -      if (zf)
    1.28 -        hr->ensure_zero_filled();
    1.29 +    {
    1.30 +      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
    1.31 +      first_hr->set_zero_fill_allocated();
    1.32 +    }
    1.33 +    // The header of the new object will be placed at the bottom of
    1.34 +    // the first region.
    1.35 +    HeapWord* new_obj = first_hr->bottom();
    1.36 +    // This will be the new end of the first region in the series that
    1.37 +    // should also match the end of the last region in the seriers.
    1.38 +    // (Note: sumSizes = "region size" x "number of regions we found").
    1.39 +    HeapWord* new_end = new_obj + sumSizes;
    1.40 +    // This will be the new top of the first region that will reflect
    1.41 +    // this allocation.
    1.42 +    HeapWord* new_top = new_obj + word_size;
    1.43 +
    1.44 +    // First, we need to zero the header of the space that we will be
    1.45 +    // allocating. When we update top further down, some refinement
    1.46 +    // threads might try to scan the region. By zeroing the header we
    1.47 +    // ensure that any thread that will try to scan the region will
    1.48 +    // come across the zero klass word and bail out.
    1.49 +    //
    1.50 +    // NOTE: It would not have been correct to have used
    1.51 +    // CollectedHeap::fill_with_object() and make the space look like
    1.52 +    // an int array. The thread that is doing the allocation will
    1.53 +    // later update the object header to a potentially different array
    1.54 +    // type and, for a very short period of time, the klass and length
    1.55 +    // fields will be inconsistent. This could cause a refinement
    1.56 +    // thread to calculate the object size incorrectly.
    1.57 +    Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
    1.58 +
    1.59 +    // We will set up the first region as "starts humongous". This
    1.60 +    // will also update the BOT covering all the regions to reflect
    1.61 +    // that there is a single object that starts at the bottom of the
    1.62 +    // first region.
    1.63 +    first_hr->set_startsHumongous(new_end);
    1.64 +
    1.65 +    // Then, if there are any, we will set up the "continues
    1.66 +    // humongous" regions.
    1.67 +    HeapRegion* hr = NULL;
    1.68 +    for (int i = first + 1; i < cur; ++i) {
    1.69 +      hr = _regions.at(i);
    1.70        {
    1.71          MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
    1.72          hr->set_zero_fill_allocated();
    1.73        }
    1.74 -      size_t sz = hr->capacity() / HeapWordSize;
    1.75 -      HeapWord* tmp = hr->allocate(sz);
    1.76 -      assert(tmp != NULL, "Humongous allocation failure");
    1.77 -      MemRegion mr = MemRegion(tmp, sz);
    1.78 -      CollectedHeap::fill_with_object(mr);
    1.79 -      hr->declare_filled_region_to_BOT(mr);
    1.80 -      if (i == first) {
    1.81 -        first_hr->set_startsHumongous();
    1.82 +      hr->set_continuesHumongous(first_hr);
    1.83 +    }
    1.84 +    // If we have "continues humongous" regions (hr != NULL), then the
    1.85 +    // end of the last one should match new_end.
    1.86 +    assert(hr == NULL || hr->end() == new_end, "sanity");
    1.87 +
    1.88 +    // Up to this point no concurrent thread would have been able to
    1.89 +    // do any scanning on any region in this series. All the top
    1.90 +    // fields still point to bottom, so the intersection between
    1.91 +    // [bottom,top] and [card_start,card_end] will be empty. Before we
    1.92 +    // update the top fields, we'll do a storestore to make sure that
    1.93 +    // no thread sees the update to top before the zeroing of the
    1.94 +    // object header and the BOT initialization.
    1.95 +    OrderAccess::storestore();
    1.96 +
    1.97 +    // Now that the BOT and the object header have been initialized,
    1.98 +    // we can update top of the "starts humongous" region.
    1.99 +    assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
   1.100 +           "new_top should be in this region");
   1.101 +    first_hr->set_top(new_top);
   1.102 +
   1.103 +    // Now, we will update the top fields of the "continues humongous"
   1.104 +    // regions. The reason we need to do this is that, otherwise,
   1.105 +    // these regions would look empty and this will confuse parts of
   1.106 +    // G1. For example, the code that looks for a consecutive number
   1.107 +    // of empty regions will consider them empty and try to
   1.108 +    // re-allocate them. We can extend is_empty() to also include
   1.109 +    // !continuesHumongous(), but it is easier to just update the top
   1.110 +    // fields here.
   1.111 +    hr = NULL;
   1.112 +    for (int i = first + 1; i < cur; ++i) {
   1.113 +      hr = _regions.at(i);
   1.114 +      if ((i + 1) == cur) {
   1.115 +        // last continues humongous region
   1.116 +        assert(hr->bottom() < new_top && new_top <= hr->end(),
   1.117 +               "new_top should fall on this region");
   1.118 +        hr->set_top(new_top);
   1.119        } else {
   1.120 -        assert(i > first, "sanity");
   1.121 -        hr->set_continuesHumongous(first_hr);
   1.122 +        // not last one
   1.123 +        assert(new_top > hr->end(), "new_top should be above this region");
   1.124 +        hr->set_top(hr->end());
   1.125        }
   1.126      }
   1.127 -    HeapWord* first_hr_bot = first_hr->bottom();
   1.128 -    HeapWord* obj_end = first_hr_bot + word_size;
   1.129 -    first_hr->set_top(obj_end);
   1.130 -    return first_hr_bot;
   1.131 +    // If we have continues humongous regions (hr != NULL), then the
   1.132 +    // end of the last one should match new_end and its top should
   1.133 +    // match new_top.
   1.134 +    assert(hr == NULL ||
   1.135 +           (hr->end() == new_end && hr->top() == new_top), "sanity");
   1.136 +
   1.137 +    return new_obj;
   1.138    } else {
   1.139      // If we started from the beginning, we want to know why we can't alloc.
   1.140      return NULL;

mercurial