Merge

Mon, 06 Dec 2010 15:37:00 -0500

author
coleenp
date
Mon, 06 Dec 2010 15:37:00 -0500
changeset 2329
42f65821fa4e
parent 2328
9bc798875b2a
parent 2317
d9310331a29c
child 2330
684faacebf20

Merge

     1.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Sat Dec 04 00:09:05 2010 -0500
     1.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Dec 06 15:37:00 2010 -0500
     1.3 @@ -1051,6 +1051,7 @@
     1.4    void work(int worker_i) {
     1.5      assert(Thread::current()->is_ConcurrentGC_thread(),
     1.6             "this should only be done by a conc GC thread");
     1.7 +    ResourceMark rm;
     1.8  
     1.9      double start_vtime = os::elapsedVTime();
    1.10  
    1.11 @@ -1888,6 +1889,9 @@
    1.12    G1CollectedHeap* g1h   = G1CollectedHeap::heap();
    1.13    ReferenceProcessor* rp = g1h->ref_processor();
    1.14  
    1.15 +  // See the comment in G1CollectedHeap::ref_processing_init()
    1.16 +  // about how reference processing currently works in G1.
    1.17 +
    1.18    // Process weak references.
    1.19    rp->setup_policy(clear_all_soft_refs);
    1.20    assert(_markStack.isEmpty(), "mark stack should be empty");
    1.21 @@ -2918,7 +2922,11 @@
    1.22    CMOopClosure(G1CollectedHeap* g1h,
    1.23                 ConcurrentMark* cm,
    1.24                 CMTask* task)
    1.25 -    : _g1h(g1h), _cm(cm), _task(task) { }
    1.26 +    : _g1h(g1h), _cm(cm), _task(task)
    1.27 +  {
    1.28 +    _ref_processor = g1h->ref_processor();
    1.29 +    assert(_ref_processor != NULL, "should not be NULL");
    1.30 +  }
    1.31  };
    1.32  
    1.33  void CMTask::setup_for_region(HeapRegion* hr) {
     2.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Dec 04 00:09:05 2010 -0500
     2.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Dec 06 15:37:00 2010 -0500
     2.3 @@ -58,10 +58,11 @@
     2.4  // INVARIANTS/NOTES
     2.5  //
     2.6  // All allocation activity covered by the G1CollectedHeap interface is
     2.7 -//   serialized by acquiring the HeapLock.  This happens in
     2.8 -//   mem_allocate_work, which all such allocation functions call.
     2.9 -//   (Note that this does not apply to TLAB allocation, which is not part
    2.10 -//   of this interface: it is done by clients of this interface.)
    2.11 +// serialized by acquiring the HeapLock.  This happens in mem_allocate
    2.12 +// and allocate_new_tlab, which are the "entry" points to the
    2.13 +// allocation code from the rest of the JVM.  (Note that this does not
    2.14 +// apply to TLAB allocation, which is not part of this interface: it
    2.15 +// is done by clients of this interface.)
    2.16  
    2.17  // Local to this file.
    2.18  
    2.19 @@ -536,18 +537,20 @@
    2.20  // If could fit into free regions w/o expansion, try.
    2.21  // Otherwise, if can expand, do so.
    2.22  // Otherwise, if using ex regions might help, try with ex given back.
    2.23 -HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
    2.24 +HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
    2.25 +  assert_heap_locked_or_at_safepoint();
    2.26    assert(regions_accounted_for(), "Region leakage!");
    2.27  
    2.28 -  // We can't allocate H regions while cleanupComplete is running, since
    2.29 -  // some of the regions we find to be empty might not yet be added to the
    2.30 -  // unclean list.  (If we're already at a safepoint, this call is
    2.31 -  // unnecessary, not to mention wrong.)
    2.32 -  if (!SafepointSynchronize::is_at_safepoint())
    2.33 +  // We can't allocate humongous regions while cleanupComplete is
    2.34 +  // running, since some of the regions we find to be empty might not
    2.35 +  // yet be added to the unclean list. If we're already at a
    2.36 +  // safepoint, this call is unnecessary, not to mention wrong.
    2.37 +  if (!SafepointSynchronize::is_at_safepoint()) {
    2.38      wait_for_cleanup_complete();
    2.39 +  }
    2.40  
    2.41    size_t num_regions =
    2.42 -    round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
    2.43 +         round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
    2.44  
    2.45    // Special case if < one region???
    2.46  
    2.47 @@ -598,153 +601,474 @@
    2.48    return res;
    2.49  }
    2.50  
    2.51 +void
    2.52 +G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
    2.53 +  // The cleanup operation might update _summary_bytes_used
    2.54 +  // concurrently with this method. So, right now, if we don't wait
    2.55 +  // for it to complete, updates to _summary_bytes_used might get
    2.56 +  // lost. This will be resolved in the near future when the operation
    2.57 +  // of the free region list is revamped as part of CR 6977804.
    2.58 +  wait_for_cleanup_complete();
    2.59 +
    2.60 +  retire_cur_alloc_region_common(cur_alloc_region);
    2.61 +  assert(_cur_alloc_region == NULL, "post-condition");
    2.62 +}
    2.63 +
    2.64 +// See the comment in the .hpp file about the locking protocol and
    2.65 +// assumptions of this method (and other related ones).
    2.66  HeapWord*
    2.67 -G1CollectedHeap::attempt_allocation_slow(size_t word_size,
    2.68 -                                         bool permit_collection_pause) {
    2.69 -  HeapWord* res = NULL;
    2.70 -  HeapRegion* allocated_young_region = NULL;
    2.71 -
    2.72 -  assert( SafepointSynchronize::is_at_safepoint() ||
    2.73 -          Heap_lock->owned_by_self(), "pre condition of the call" );
    2.74 -
    2.75 -  if (isHumongous(word_size)) {
    2.76 -    // Allocation of a humongous object can, in a sense, complete a
    2.77 -    // partial region, if the previous alloc was also humongous, and
    2.78 -    // caused the test below to succeed.
    2.79 -    if (permit_collection_pause)
    2.80 -      do_collection_pause_if_appropriate(word_size);
    2.81 -    res = humongousObjAllocate(word_size);
    2.82 -    assert(_cur_alloc_region == NULL
    2.83 -           || !_cur_alloc_region->isHumongous(),
    2.84 -           "Prevent a regression of this bug.");
    2.85 -
    2.86 -  } else {
    2.87 -    // We may have concurrent cleanup working at the time. Wait for it
    2.88 -    // to complete. In the future we would probably want to make the
    2.89 -    // concurrent cleanup truly concurrent by decoupling it from the
    2.90 -    // allocation.
    2.91 -    if (!SafepointSynchronize::is_at_safepoint())
    2.92 +G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
    2.93 +                                                       bool at_safepoint,
    2.94 +                                                       bool do_dirtying) {
    2.95 +  assert_heap_locked_or_at_safepoint();
    2.96 +  assert(_cur_alloc_region == NULL,
    2.97 +         "replace_cur_alloc_region_and_allocate() should only be called "
    2.98 +         "after retiring the previous current alloc region");
    2.99 +  assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   2.100 +         "at_safepoint and is_at_safepoint() should be a tautology");
   2.101 +
   2.102 +  if (!g1_policy()->is_young_list_full()) {
   2.103 +    if (!at_safepoint) {
   2.104 +      // The cleanup operation might update _summary_bytes_used
   2.105 +      // concurrently with this method. So, right now, if we don't
   2.106 +      // wait for it to complete, updates to _summary_bytes_used might
   2.107 +      // get lost. This will be resolved in the near future when the
   2.108 +      // operation of the free region list is revamped as part of
   2.109 +      // CR 6977804. If we're already at a safepoint, this call is
   2.110 +      // unnecessary, not to mention wrong.
   2.111        wait_for_cleanup_complete();
   2.112 -    // If we do a collection pause, this will be reset to a non-NULL
   2.113 -    // value.  If we don't, nulling here ensures that we allocate a new
   2.114 -    // region below.
   2.115 -    if (_cur_alloc_region != NULL) {
   2.116 -      // We're finished with the _cur_alloc_region.
   2.117 -      // As we're builing (at least the young portion) of the collection
   2.118 -      // set incrementally we'll add the current allocation region to
   2.119 -      // the collection set here.
   2.120 -      if (_cur_alloc_region->is_young()) {
   2.121 -        g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
   2.122 +    }
   2.123 +
   2.124 +    HeapRegion* new_cur_alloc_region = newAllocRegion(word_size,
   2.125 +                                                      false /* zero_filled */);
   2.126 +    if (new_cur_alloc_region != NULL) {
   2.127 +      assert(new_cur_alloc_region->is_empty(),
   2.128 +             "the newly-allocated region should be empty, "
   2.129 +             "as right now we only allocate new regions out of the free list");
   2.130 +      g1_policy()->update_region_num(true /* next_is_young */);
   2.131 +      _summary_bytes_used -= new_cur_alloc_region->used();
   2.132 +      set_region_short_lived_locked(new_cur_alloc_region);
   2.133 +
   2.134 +      assert(!new_cur_alloc_region->isHumongous(),
   2.135 +             "Catch a regression of this bug.");
   2.136 +
   2.137 +      // We need to ensure that the stores to _cur_alloc_region and,
   2.138 +      // subsequently, to top do not float above the setting of the
   2.139 +      // young type.
   2.140 +      OrderAccess::storestore();
   2.141 +
   2.142 +      // Now allocate out of the new current alloc region. We could
   2.143 +      // have re-used allocate_from_cur_alloc_region() but its
   2.144 +      // operation is slightly different to what we need here. First,
   2.145 +      // allocate_from_cur_alloc_region() is only called outside a
   2.146 +      // safepoint and will always unlock the Heap_lock if it returns
   2.147 +      // a non-NULL result. Second, it assumes that the current alloc
   2.148 +      // region is what's already assigned in _cur_alloc_region. What
   2.149 +      // we want here is to actually do the allocation first before we
   2.150 +      // assign the new region to _cur_alloc_region. This ordering is
   2.151 +      // not currently important, but it will be essential when we
   2.152 +      // change the code to support CAS allocation in the future (see
   2.153 +      // CR 6994297).
   2.154 +      //
   2.155 +      // This allocate method does BOT updates and we don't need them in
   2.156 +      // the young generation. This will be fixed in the near future by
   2.157 +      // CR 6994297.
   2.158 +      HeapWord* result = new_cur_alloc_region->allocate(word_size);
   2.159 +      assert(result != NULL, "we just allocate out of an empty region "
   2.160 +             "so allocation should have been successful");
   2.161 +      assert(is_in(result), "result should be in the heap");
   2.162 +
   2.163 +      _cur_alloc_region = new_cur_alloc_region;
   2.164 +
   2.165 +      if (!at_safepoint) {
   2.166 +        Heap_lock->unlock();
   2.167        }
   2.168 -      _summary_bytes_used += _cur_alloc_region->used();
   2.169 -      _cur_alloc_region = NULL;
   2.170 +
   2.171 +      // do the dirtying, if necessary, after we release the Heap_lock
   2.172 +      if (do_dirtying) {
   2.173 +        dirty_young_block(result, word_size);
   2.174 +      }
   2.175 +      return result;
   2.176      }
   2.177 -    assert(_cur_alloc_region == NULL, "Invariant.");
   2.178 -    // Completion of a heap region is perhaps a good point at which to do
   2.179 -    // a collection pause.
   2.180 -    if (permit_collection_pause)
   2.181 -      do_collection_pause_if_appropriate(word_size);
   2.182 -    // Make sure we have an allocation region available.
   2.183 -    if (_cur_alloc_region == NULL) {
   2.184 -      if (!SafepointSynchronize::is_at_safepoint())
   2.185 -        wait_for_cleanup_complete();
   2.186 -      bool next_is_young = should_set_young_locked();
   2.187 -      // If the next region is not young, make sure it's zero-filled.
   2.188 -      _cur_alloc_region = newAllocRegion(word_size, !next_is_young);
   2.189 -      if (_cur_alloc_region != NULL) {
   2.190 -        _summary_bytes_used -= _cur_alloc_region->used();
   2.191 -        if (next_is_young) {
   2.192 -          set_region_short_lived_locked(_cur_alloc_region);
   2.193 -          allocated_young_region = _cur_alloc_region;
   2.194 -        }
   2.195 +  }
   2.196 +
   2.197 +  assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
   2.198 +         "alloc region, it should still be NULL");
   2.199 +  assert_heap_locked_or_at_safepoint();
   2.200 +  return NULL;
   2.201 +}
   2.202 +
   2.203 +// See the comment in the .hpp file about the locking protocol and
   2.204 +// assumptions of this method (and other related ones).
   2.205 +HeapWord*
   2.206 +G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
   2.207 +  assert_heap_locked_and_not_at_safepoint();
   2.208 +  assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
   2.209 +         "used for humongous allocations");
   2.210 +
   2.211 +  // We will loop while succeeded is false, which means that we tried
   2.212 +  // to do a collection, but the VM op did not succeed. So, when we
   2.213 +  // exit the loop, either one of the allocation attempts was
   2.214 +  // successful, or we succeeded in doing the VM op but which was
   2.215 +  // unable to allocate after the collection.
   2.216 +  for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   2.217 +    bool succeeded = true;
   2.218 +
   2.219 +    {
   2.220 +      // We may have concurrent cleanup working at the time. Wait for
   2.221 +      // it to complete. In the future we would probably want to make
   2.222 +      // the concurrent cleanup truly concurrent by decoupling it from
   2.223 +      // the allocation. This will happen in the near future as part
   2.224 +      // of CR 6977804 which will revamp the operation of the free
   2.225 +      // region list. The fact that wait_for_cleanup_complete() will
   2.226 +      // do a wait() means that we'll give up the Heap_lock. So, it's
   2.227 +      // possible that when we exit wait_for_cleanup_complete() we
   2.228 +      // might be able to allocate successfully (since somebody else
   2.229 +      // might have done a collection meanwhile). So, we'll attempt to
   2.230 +      // allocate again, just in case. When we make cleanup truly
   2.231 +      // concurrent with allocation, we should remove this allocation
   2.232 +      // attempt as it's redundant (we only reach here after an
   2.233 +      // allocation attempt has been unsuccessful).
   2.234 +      wait_for_cleanup_complete();
   2.235 +      HeapWord* result = attempt_allocation(word_size);
   2.236 +      if (result != NULL) {
   2.237 +        assert_heap_not_locked();
   2.238 +        return result;
   2.239        }
   2.240      }
   2.241 +
   2.242 +    if (GC_locker::is_active_and_needs_gc()) {
   2.243 +      // We are locked out of GC because of the GC locker. Right now,
   2.244 +      // we'll just stall until the GC locker-induced GC
   2.245 +      // completes. This will be fixed in the near future by extending
   2.246 +      // the eden while waiting for the GC locker to schedule the GC
   2.247 +      // (see CR 6994056).
   2.248 +
   2.249 +      // If this thread is not in a jni critical section, we stall
   2.250 +      // the requestor until the critical section has cleared and
   2.251 +      // GC allowed. When the critical section clears, a GC is
   2.252 +      // initiated by the last thread exiting the critical section; so
   2.253 +      // we retry the allocation sequence from the beginning of the loop,
   2.254 +      // rather than causing more, now probably unnecessary, GC attempts.
   2.255 +      JavaThread* jthr = JavaThread::current();
   2.256 +      assert(jthr != NULL, "sanity");
   2.257 +      if (!jthr->in_critical()) {
   2.258 +        MutexUnlocker mul(Heap_lock);
   2.259 +        GC_locker::stall_until_clear();
   2.260 +
   2.261 +        // We'll then fall off the end of the ("if GC locker active")
   2.262 +        // if-statement and retry the allocation further down in the
   2.263 +        // loop.
   2.264 +      } else {
   2.265 +        if (CheckJNICalls) {
   2.266 +          fatal("Possible deadlock due to allocating while"
   2.267 +                " in jni critical section");
   2.268 +        }
   2.269 +        return NULL;
   2.270 +      }
   2.271 +    } else {
   2.272 +      // We are not locked out. So, let's try to do a GC. The VM op
   2.273 +      // will retry the allocation before it completes.
   2.274 +
   2.275 +      // Read the GC count while holding the Heap_lock
   2.276 +      unsigned int gc_count_before = SharedHeap::heap()->total_collections();
   2.277 +
   2.278 +      Heap_lock->unlock();
   2.279 +
   2.280 +      HeapWord* result =
   2.281 +        do_collection_pause(word_size, gc_count_before, &succeeded);
   2.282 +      assert_heap_not_locked();
   2.283 +      if (result != NULL) {
   2.284 +        assert(succeeded, "the VM op should have succeeded");
   2.285 +
   2.286 +        // Allocations that take place on VM operations do not do any
   2.287 +        // card dirtying and we have to do it here.
   2.288 +        dirty_young_block(result, word_size);
   2.289 +        return result;
   2.290 +      }
   2.291 +
   2.292 +      Heap_lock->lock();
   2.293 +    }
   2.294 +
   2.295 +    assert_heap_locked();
   2.296 +
   2.297 +    // We can reach here when we were unsuccessful in doing a GC,
   2.298 +    // because another thread beat us to it, or because we were locked
   2.299 +    // out of GC due to the GC locker. In either case a new alloc
   2.300 +    // region might be available so we will retry the allocation.
   2.301 +    HeapWord* result = attempt_allocation(word_size);
   2.302 +    if (result != NULL) {
   2.303 +      assert_heap_not_locked();
   2.304 +      return result;
   2.305 +    }
   2.306 +
   2.307 +    // So far our attempts to allocate failed. The only time we'll go
   2.308 +    // around the loop and try again is if we tried to do a GC and the
   2.309 +    // VM op that we tried to schedule was not successful because
   2.310 +    // another thread beat us to it. If that happened it's possible
   2.311 +    // that by the time we grabbed the Heap_lock again and tried to
   2.312 +    // allocate other threads filled up the young generation, which
   2.313 +    // means that the allocation attempt after the GC also failed. So,
   2.314 +    // it's worth trying to schedule another GC pause.
   2.315 +    if (succeeded) {
   2.316 +      break;
   2.317 +    }
   2.318 +
   2.319 +    // Give a warning if we seem to be looping forever.
   2.320 +    if ((QueuedAllocationWarningCount > 0) &&
   2.321 +        (try_count % QueuedAllocationWarningCount == 0)) {
   2.322 +      warning("G1CollectedHeap::attempt_allocation_slow() "
   2.323 +              "retries %d times", try_count);
   2.324 +    }
   2.325 +  }
   2.326 +
   2.327 +  assert_heap_locked();
   2.328 +  return NULL;
   2.329 +}
   2.330 +
   2.331 +// See the comment in the .hpp file about the locking protocol and
   2.332 +// assumptions of this method (and other related ones).
   2.333 +HeapWord*
   2.334 +G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
   2.335 +                                              bool at_safepoint) {
   2.336 +  // This is the method that will allocate a humongous object. All
   2.337 +  // allocation paths that attempt to allocate a humongous object
   2.338 +  // should eventually reach here. Currently, the only paths are from
   2.339 +  // mem_allocate() and attempt_allocation_at_safepoint().
   2.340 +  assert_heap_locked_or_at_safepoint();
   2.341 +  assert(isHumongous(word_size), "attempt_allocation_humongous() "
   2.342 +         "should only be used for humongous allocations");
   2.343 +  assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   2.344 +         "at_safepoint and is_at_safepoint() should be a tautology");
   2.345 +
   2.346 +  HeapWord* result = NULL;
   2.347 +
   2.348 +  // We will loop while succeeded is false, which means that we tried
   2.349 +  // to do a collection, but the VM op did not succeed. So, when we
   2.350 +  // exit the loop, either one of the allocation attempts was
   2.351 +  // successful, or we succeeded in doing the VM op but which was
   2.352 +  // unable to allocate after the collection.
   2.353 +  for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   2.354 +    bool succeeded = true;
   2.355 +
   2.356 +    // Given that humongous objects are not allocated in young
   2.357 +    // regions, we'll first try to do the allocation without doing a
   2.358 +    // collection hoping that there's enough space in the heap.
   2.359 +    result = humongous_obj_allocate(word_size);
   2.360      assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
   2.361 -           "Prevent a regression of this bug.");
   2.362 -
   2.363 -    // Now retry the allocation.
   2.364 -    if (_cur_alloc_region != NULL) {
   2.365 -      if (allocated_young_region != NULL) {
   2.366 -        // We need to ensure that the store to top does not
   2.367 -        // float above the setting of the young type.
   2.368 -        OrderAccess::storestore();
   2.369 +           "catch a regression of this bug.");
   2.370 +    if (result != NULL) {
   2.371 +      if (!at_safepoint) {
   2.372 +        // If we're not at a safepoint, unlock the Heap_lock.
   2.373 +        Heap_lock->unlock();
   2.374        }
   2.375 -      res = _cur_alloc_region->allocate(word_size);
   2.376 +      return result;
   2.377      }
   2.378 -  }
   2.379 -
   2.380 -  // NOTE: fails frequently in PRT
   2.381 -  assert(regions_accounted_for(), "Region leakage!");
   2.382 -
   2.383 -  if (res != NULL) {
   2.384 -    if (!SafepointSynchronize::is_at_safepoint()) {
   2.385 -      assert( permit_collection_pause, "invariant" );
   2.386 -      assert( Heap_lock->owned_by_self(), "invariant" );
   2.387 +
   2.388 +    // If we failed to allocate the humongous object, we should try to
   2.389 +    // do a collection pause (if we're allowed) in case it reclaims
   2.390 +    // enough space for the allocation to succeed after the pause.
   2.391 +    if (!at_safepoint) {
   2.392 +      // Read the GC count while holding the Heap_lock
   2.393 +      unsigned int gc_count_before = SharedHeap::heap()->total_collections();
   2.394 +
   2.395 +      // If we're allowed to do a collection we're not at a
   2.396 +      // safepoint, so it is safe to unlock the Heap_lock.
   2.397        Heap_lock->unlock();
   2.398 +
   2.399 +      result = do_collection_pause(word_size, gc_count_before, &succeeded);
   2.400 +      assert_heap_not_locked();
   2.401 +      if (result != NULL) {
   2.402 +        assert(succeeded, "the VM op should have succeeded");
   2.403 +        return result;
   2.404 +      }
   2.405 +
   2.406 +      // If we get here, the VM operation either did not succeed
   2.407 +      // (i.e., another thread beat us to it) or it succeeded but
   2.408 +      // failed to allocate the object.
   2.409 +
   2.410 +      // If we're allowed to do a collection we're not at a
   2.411 +      // safepoint, so it is safe to lock the Heap_lock.
   2.412 +      Heap_lock->lock();
   2.413      }
   2.414  
   2.415 -    if (allocated_young_region != NULL) {
   2.416 -      HeapRegion* hr = allocated_young_region;
   2.417 -      HeapWord* bottom = hr->bottom();
   2.418 -      HeapWord* end = hr->end();
   2.419 -      MemRegion mr(bottom, end);
   2.420 -      ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
   2.421 +    assert(result == NULL, "otherwise we should have exited the loop earlier");
   2.422 +
   2.423 +    // So far our attempts to allocate failed. The only time we'll go
   2.424 +    // around the loop and try again is if we tried to do a GC and the
   2.425 +    // VM op that we tried to schedule was not successful because
   2.426 +    // another thread beat us to it. That way it's possible that some
   2.427 +    // space was freed up by the thread that successfully scheduled a
   2.428 +    // GC. So it's worth trying to allocate again.
   2.429 +    if (succeeded) {
   2.430 +      break;
   2.431      }
   2.432 -  }
   2.433 -
   2.434 -  assert( SafepointSynchronize::is_at_safepoint() ||
   2.435 -          (res == NULL && Heap_lock->owned_by_self()) ||
   2.436 -          (res != NULL && !Heap_lock->owned_by_self()),
   2.437 -          "post condition of the call" );
   2.438 -
   2.439 -  return res;
   2.440 +
   2.441 +    // Give a warning if we seem to be looping forever.
   2.442 +    if ((QueuedAllocationWarningCount > 0) &&
   2.443 +        (try_count % QueuedAllocationWarningCount == 0)) {
   2.444 +      warning("G1CollectedHeap::attempt_allocation_humongous "
   2.445 +              "retries %d times", try_count);
   2.446 +    }
   2.447 +  }
   2.448 +
   2.449 +  assert_heap_locked_or_at_safepoint();
   2.450 +  return NULL;
   2.451 +}
   2.452 +
   2.453 +HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
   2.454 +                                           bool expect_null_cur_alloc_region) {
   2.455 +  assert_at_safepoint();
   2.456 +  assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
   2.457 +         err_msg("the current alloc region was unexpectedly found "
   2.458 +                 "to be non-NULL, cur alloc region: "PTR_FORMAT" "
   2.459 +                 "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT,
   2.460 +                 _cur_alloc_region, expect_null_cur_alloc_region, word_size));
   2.461 +
   2.462 +  if (!isHumongous(word_size)) {
   2.463 +    if (!expect_null_cur_alloc_region) {
   2.464 +      HeapRegion* cur_alloc_region = _cur_alloc_region;
   2.465 +      if (cur_alloc_region != NULL) {
   2.466 +        // This allocate method does BOT updates and we don't need them in
   2.467 +        // the young generation. This will be fixed in the near future by
   2.468 +        // CR 6994297.
   2.469 +        HeapWord* result = cur_alloc_region->allocate(word_size);
   2.470 +        if (result != NULL) {
   2.471 +          assert(is_in(result), "result should be in the heap");
   2.472 +
   2.473 +          // We will not do any dirtying here. This is guaranteed to be
   2.474 +          // called during a safepoint and the thread that scheduled the
   2.475 +          // pause will do the dirtying if we return a non-NULL result.
   2.476 +          return result;
   2.477 +        }
   2.478 +
   2.479 +        retire_cur_alloc_region_common(cur_alloc_region);
   2.480 +      }
   2.481 +    }
   2.482 +
   2.483 +    assert(_cur_alloc_region == NULL,
   2.484 +           "at this point we should have no cur alloc region");
   2.485 +    return replace_cur_alloc_region_and_allocate(word_size,
   2.486 +                                                 true, /* at_safepoint */
   2.487 +                                                 false /* do_dirtying */);
   2.488 +  } else {
   2.489 +    return attempt_allocation_humongous(word_size,
   2.490 +                                        true /* at_safepoint */);
   2.491 +  }
   2.492 +
   2.493 +  ShouldNotReachHere();
   2.494 +}
   2.495 +
   2.496 +HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   2.497 +  assert_heap_not_locked_and_not_at_safepoint();
   2.498 +  assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
   2.499 +
   2.500 +  Heap_lock->lock();
   2.501 +
   2.502 +  // First attempt: try allocating out of the current alloc region or
   2.503 +  // after replacing the current alloc region.
   2.504 +  HeapWord* result = attempt_allocation(word_size);
   2.505 +  if (result != NULL) {
   2.506 +    assert_heap_not_locked();
   2.507 +    return result;
   2.508 +  }
   2.509 +
   2.510 +  assert_heap_locked();
   2.511 +
   2.512 +  // Second attempt: go into the even slower path where we might
   2.513 +  // try to schedule a collection.
   2.514 +  result = attempt_allocation_slow(word_size);
   2.515 +  if (result != NULL) {
   2.516 +    assert_heap_not_locked();
   2.517 +    return result;
   2.518 +  }
   2.519 +
   2.520 +  assert_heap_locked();
   2.521 +  Heap_lock->unlock();
   2.522 +  return NULL;
   2.523  }
   2.524  
   2.525  HeapWord*
   2.526  G1CollectedHeap::mem_allocate(size_t word_size,
   2.527                                bool   is_noref,
   2.528                                bool   is_tlab,
   2.529 -                              bool* gc_overhead_limit_was_exceeded) {
   2.530 -  debug_only(check_for_valid_allocation_state());
   2.531 -  assert(no_gc_in_progress(), "Allocation during gc not allowed");
   2.532 -  HeapWord* result = NULL;
   2.533 +                              bool*  gc_overhead_limit_was_exceeded) {
   2.534 +  assert_heap_not_locked_and_not_at_safepoint();
   2.535 +  assert(!is_tlab, "mem_allocate() this should not be called directly "
   2.536 +         "to allocate TLABs");
   2.537  
   2.538    // Loop until the allocation is satisified,
   2.539    // or unsatisfied after GC.
   2.540 -  for (int try_count = 1; /* return or throw */; try_count += 1) {
   2.541 -    int gc_count_before;
   2.542 +  for (int try_count = 1; /* we'll return */; try_count += 1) {
   2.543 +    unsigned int gc_count_before;
   2.544      {
   2.545        Heap_lock->lock();
   2.546 -      result = attempt_allocation(word_size);
   2.547 -      if (result != NULL) {
   2.548 -        // attempt_allocation should have unlocked the heap lock
   2.549 -        assert(is_in(result), "result not in heap");
   2.550 -        return result;
   2.551 +
   2.552 +      if (!isHumongous(word_size)) {
   2.553 +        // First attempt: try allocating out of the current alloc
   2.554 +        // region or after replacing the current alloc region.
   2.555 +        HeapWord* result = attempt_allocation(word_size);
   2.556 +        if (result != NULL) {
   2.557 +          assert_heap_not_locked();
   2.558 +          return result;
   2.559 +        }
   2.560 +
   2.561 +        assert_heap_locked();
   2.562 +
   2.563 +        // Second attempt: go into the even slower path where we might
   2.564 +        // try to schedule a collection.
   2.565 +        result = attempt_allocation_slow(word_size);
   2.566 +        if (result != NULL) {
   2.567 +          assert_heap_not_locked();
   2.568 +          return result;
   2.569 +        }
   2.570 +      } else {
   2.571 +        HeapWord* result = attempt_allocation_humongous(word_size,
   2.572 +                                                     false /* at_safepoint */);
   2.573 +        if (result != NULL) {
   2.574 +          assert_heap_not_locked();
   2.575 +          return result;
   2.576 +        }
   2.577        }
   2.578 +
   2.579 +      assert_heap_locked();
   2.580        // Read the gc count while the heap lock is held.
   2.581        gc_count_before = SharedHeap::heap()->total_collections();
   2.582 +      // We cannot be at a safepoint, so it is safe to unlock the Heap_lock
   2.583        Heap_lock->unlock();
   2.584      }
   2.585  
   2.586      // Create the garbage collection operation...
   2.587 -    VM_G1CollectForAllocation op(word_size,
   2.588 -                                 gc_count_before);
   2.589 -
   2.590 +    VM_G1CollectForAllocation op(gc_count_before, word_size);
   2.591      // ...and get the VM thread to execute it.
   2.592      VMThread::execute(&op);
   2.593 -    if (op.prologue_succeeded()) {
   2.594 -      result = op.result();
   2.595 -      assert(result == NULL || is_in(result), "result not in heap");
   2.596 +
   2.597 +    assert_heap_not_locked();
   2.598 +    if (op.prologue_succeeded() && op.pause_succeeded()) {
   2.599 +      // If the operation was successful we'll return the result even
   2.600 +      // if it is NULL. If the allocation attempt failed immediately
   2.601 +      // after a Full GC, it's unlikely we'll be able to allocate now.
   2.602 +      HeapWord* result = op.result();
   2.603 +      if (result != NULL && !isHumongous(word_size)) {
   2.604 +        // Allocations that take place on VM operations do not do any
   2.605 +        // card dirtying and we have to do it here. We only have to do
   2.606 +        // this for non-humongous allocations, though.
   2.607 +        dirty_young_block(result, word_size);
   2.608 +      }
   2.609        return result;
   2.610 +    } else {
   2.611 +      assert(op.result() == NULL,
   2.612 +             "the result should be NULL if the VM op did not succeed");
   2.613      }
   2.614  
   2.615      // Give a warning if we seem to be looping forever.
   2.616      if ((QueuedAllocationWarningCount > 0) &&
   2.617          (try_count % QueuedAllocationWarningCount == 0)) {
   2.618 -      warning("G1CollectedHeap::mem_allocate_work retries %d times",
   2.619 -              try_count);
   2.620 +      warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
   2.621      }
   2.622    }
   2.623 +
   2.624 +  ShouldNotReachHere();
   2.625  }
   2.626  
   2.627  void G1CollectedHeap::abandon_cur_alloc_region() {
   2.628 @@ -841,11 +1165,11 @@
   2.629    }
   2.630  };
   2.631  
   2.632 -void G1CollectedHeap::do_collection(bool explicit_gc,
   2.633 +bool G1CollectedHeap::do_collection(bool explicit_gc,
   2.634                                      bool clear_all_soft_refs,
   2.635                                      size_t word_size) {
   2.636    if (GC_locker::check_active_before_gc()) {
   2.637 -    return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   2.638 +    return false;
   2.639    }
   2.640  
   2.641    ResourceMark rm;
   2.642 @@ -929,6 +1253,9 @@
   2.643        g1_policy()->set_full_young_gcs(true);
   2.644      }
   2.645  
   2.646 +    // See the comment in G1CollectedHeap::ref_processing_init() about
   2.647 +    // how reference processing currently works in G1.
   2.648 +
   2.649      // Temporarily make reference _discovery_ single threaded (non-MT).
   2.650      ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
   2.651  
   2.652 @@ -1047,12 +1374,19 @@
   2.653    if (PrintHeapAtGC) {
   2.654      Universe::print_heap_after_gc();
   2.655    }
   2.656 +
   2.657 +  return true;
   2.658  }
   2.659  
   2.660  void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
   2.661 -  do_collection(true,                /* explicit_gc */
   2.662 -                clear_all_soft_refs,
   2.663 -                0                    /* word_size */);
   2.664 +  // do_collection() will return whether it succeeded in performing
   2.665 +  // the GC. Currently, there is no facility on the
   2.666 +  // do_full_collection() API to notify the caller than the collection
   2.667 +  // did not succeed (e.g., because it was locked out by the GC
   2.668 +  // locker). So, right now, we'll ignore the return value.
   2.669 +  bool dummy = do_collection(true,                /* explicit_gc */
   2.670 +                             clear_all_soft_refs,
   2.671 +                             0                    /* word_size */);
   2.672  }
   2.673  
   2.674  // This code is mostly copied from TenuredGeneration.
   2.675 @@ -1175,46 +1509,74 @@
   2.676  
   2.677  
   2.678  HeapWord*
   2.679 -G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
   2.680 -  HeapWord* result = NULL;
   2.681 +G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
   2.682 +                                           bool* succeeded) {
   2.683 +  assert(SafepointSynchronize::is_at_safepoint(),
   2.684 +         "satisfy_failed_allocation() should only be called at a safepoint");
   2.685 +  assert(Thread::current()->is_VM_thread(),
   2.686 +         "satisfy_failed_allocation() should only be called by the VM thread");
   2.687 +
   2.688 +  *succeeded = true;
   2.689 +  // Let's attempt the allocation first.
   2.690 +  HeapWord* result = attempt_allocation_at_safepoint(word_size,
   2.691 +                                     false /* expect_null_cur_alloc_region */);
   2.692 +  if (result != NULL) {
   2.693 +    assert(*succeeded, "sanity");
   2.694 +    return result;
   2.695 +  }
   2.696  
   2.697    // In a G1 heap, we're supposed to keep allocation from failing by
   2.698    // incremental pauses.  Therefore, at least for now, we'll favor
   2.699    // expansion over collection.  (This might change in the future if we can
   2.700    // do something smarter than full collection to satisfy a failed alloc.)
   2.701 -
   2.702    result = expand_and_allocate(word_size);
   2.703    if (result != NULL) {
   2.704 -    assert(is_in(result), "result not in heap");
   2.705 +    assert(*succeeded, "sanity");
   2.706      return result;
   2.707    }
   2.708  
   2.709 -  // OK, I guess we have to try collection.
   2.710 -
   2.711 -  do_collection(false, false, word_size);
   2.712 -
   2.713 -  result = attempt_allocation(word_size, /*permit_collection_pause*/false);
   2.714 -
   2.715 +  // Expansion didn't work, we'll try to do a Full GC.
   2.716 +  bool gc_succeeded = do_collection(false, /* explicit_gc */
   2.717 +                                    false, /* clear_all_soft_refs */
   2.718 +                                    word_size);
   2.719 +  if (!gc_succeeded) {
   2.720 +    *succeeded = false;
   2.721 +    return NULL;
   2.722 +  }
   2.723 +
   2.724 +  // Retry the allocation
   2.725 +  result = attempt_allocation_at_safepoint(word_size,
   2.726 +                                      true /* expect_null_cur_alloc_region */);
   2.727    if (result != NULL) {
   2.728 -    assert(is_in(result), "result not in heap");
   2.729 +    assert(*succeeded, "sanity");
   2.730      return result;
   2.731    }
   2.732  
   2.733 -  // Try collecting soft references.
   2.734 -  do_collection(false, true, word_size);
   2.735 -  result = attempt_allocation(word_size, /*permit_collection_pause*/false);
   2.736 +  // Then, try a Full GC that will collect all soft references.
   2.737 +  gc_succeeded = do_collection(false, /* explicit_gc */
   2.738 +                               true,  /* clear_all_soft_refs */
   2.739 +                               word_size);
   2.740 +  if (!gc_succeeded) {
   2.741 +    *succeeded = false;
   2.742 +    return NULL;
   2.743 +  }
   2.744 +
   2.745 +  // Retry the allocation once more
   2.746 +  result = attempt_allocation_at_safepoint(word_size,
   2.747 +                                      true /* expect_null_cur_alloc_region */);
   2.748    if (result != NULL) {
   2.749 -    assert(is_in(result), "result not in heap");
   2.750 +    assert(*succeeded, "sanity");
   2.751      return result;
   2.752    }
   2.753  
   2.754    assert(!collector_policy()->should_clear_all_soft_refs(),
   2.755 -    "Flag should have been handled and cleared prior to this point");
   2.756 +         "Flag should have been handled and cleared prior to this point");
   2.757  
   2.758    // What else?  We might try synchronous finalization later.  If the total
   2.759    // space available is large enough for the allocation, then a more
   2.760    // complete compaction phase than we've tried so far might be
   2.761    // appropriate.
   2.762 +  assert(*succeeded, "sanity");
   2.763    return NULL;
   2.764  }
   2.765  
   2.766 @@ -1224,14 +1586,20 @@
   2.767  // allocated block, or else "NULL".
   2.768  
   2.769  HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
   2.770 +  assert(SafepointSynchronize::is_at_safepoint(),
   2.771 +         "expand_and_allocate() should only be called at a safepoint");
   2.772 +  assert(Thread::current()->is_VM_thread(),
   2.773 +         "expand_and_allocate() should only be called by the VM thread");
   2.774 +
   2.775    size_t expand_bytes = word_size * HeapWordSize;
   2.776    if (expand_bytes < MinHeapDeltaBytes) {
   2.777      expand_bytes = MinHeapDeltaBytes;
   2.778    }
   2.779    expand(expand_bytes);
   2.780    assert(regions_accounted_for(), "Region leakage!");
   2.781 -  HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */);
   2.782 -  return result;
   2.783 +
   2.784 +  return attempt_allocation_at_safepoint(word_size,
   2.785 +                                     false /* expect_null_cur_alloc_region */);
   2.786  }
   2.787  
   2.788  size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
   2.789 @@ -1649,6 +2017,24 @@
   2.790  }
   2.791  
   2.792  void G1CollectedHeap::ref_processing_init() {
   2.793 +  // Reference processing in G1 currently works as follows:
   2.794 +  //
   2.795 +  // * There is only one reference processor instance that
   2.796 +  //   'spans' the entire heap. It is created by the code
   2.797 +  //   below.
   2.798 +  // * Reference discovery is not enabled during an incremental
   2.799 +  //   pause (see 6484982).
   2.800 +  // * Discoverered refs are not enqueued nor are they processed
   2.801 +  //   during an incremental pause (see 6484982).
   2.802 +  // * Reference discovery is enabled at initial marking.
   2.803 +  // * Reference discovery is disabled and the discovered
   2.804 +  //   references processed etc during remarking.
   2.805 +  // * Reference discovery is MT (see below).
   2.806 +  // * Reference discovery requires a barrier (see below).
   2.807 +  // * Reference processing is currently not MT (see 6608385).
   2.808 +  // * A full GC enables (non-MT) reference discovery and
   2.809 +  //   processes any discovered references.
   2.810 +
   2.811    SharedHeap::ref_processing_init();
   2.812    MemRegion mr = reserved_region();
   2.813    _ref_processor = ReferenceProcessor::create_ref_processor(
   2.814 @@ -1842,21 +2228,25 @@
   2.815    unsigned int full_gc_count_before;
   2.816    {
   2.817      MutexLocker ml(Heap_lock);
   2.818 +
   2.819 +    // Don't want to do a GC until cleanup is completed. This
   2.820 +    // limitation will be removed in the near future when the
   2.821 +    // operation of the free region list is revamped as part of
   2.822 +    // CR 6977804.
   2.823 +    wait_for_cleanup_complete();
   2.824 +
   2.825      // Read the GC count while holding the Heap_lock
   2.826      gc_count_before = SharedHeap::heap()->total_collections();
   2.827      full_gc_count_before = SharedHeap::heap()->total_full_collections();
   2.828 -
   2.829 -    // Don't want to do a GC until cleanup is completed.
   2.830 -    wait_for_cleanup_complete();
   2.831 -
   2.832 -    // We give up heap lock; VMThread::execute gets it back below
   2.833    }
   2.834  
   2.835    if (should_do_concurrent_full_gc(cause)) {
   2.836      // Schedule an initial-mark evacuation pause that will start a
   2.837 -    // concurrent cycle.
   2.838 +    // concurrent cycle. We're setting word_size to 0 which means that
   2.839 +    // we are not requesting a post-GC allocation.
   2.840      VM_G1IncCollectionPause op(gc_count_before,
   2.841 -                               true, /* should_initiate_conc_mark */
   2.842 +                               0,     /* word_size */
   2.843 +                               true,  /* should_initiate_conc_mark */
   2.844                                 g1_policy()->max_pause_time_ms(),
   2.845                                 cause);
   2.846      VMThread::execute(&op);
   2.847 @@ -1864,8 +2254,10 @@
   2.848      if (cause == GCCause::_gc_locker
   2.849          DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
   2.850  
   2.851 -      // Schedule a standard evacuation pause.
   2.852 +      // Schedule a standard evacuation pause. We're setting word_size
   2.853 +      // to 0 which means that we are not requesting a post-GC allocation.
   2.854        VM_G1IncCollectionPause op(gc_count_before,
   2.855 +                                 0,     /* word_size */
   2.856                                   false, /* should_initiate_conc_mark */
   2.857                                   g1_policy()->max_pause_time_ms(),
   2.858                                   cause);
   2.859 @@ -2221,14 +2613,6 @@
   2.860    }
   2.861  }
   2.862  
   2.863 -HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   2.864 -  assert(!isHumongous(word_size),
   2.865 -         err_msg("a TLAB should not be of humongous size, "
   2.866 -                 "word_size = "SIZE_FORMAT, word_size));
   2.867 -  bool dummy;
   2.868 -  return G1CollectedHeap::mem_allocate(word_size, false, true, &dummy);
   2.869 -}
   2.870 -
   2.871  bool G1CollectedHeap::allocs_are_zero_filled() {
   2.872    return false;
   2.873  }
   2.874 @@ -2633,27 +3017,26 @@
   2.875    // always_do_update_barrier = true;
   2.876  }
   2.877  
   2.878 -void G1CollectedHeap::do_collection_pause() {
   2.879 -  assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock");
   2.880 -
   2.881 -  // Read the GC count while holding the Heap_lock
   2.882 -  // we need to do this _before_ wait_for_cleanup_complete(), to
   2.883 -  // ensure that we do not give up the heap lock and potentially
   2.884 -  // pick up the wrong count
   2.885 -  unsigned int gc_count_before = SharedHeap::heap()->total_collections();
   2.886 -
   2.887 -  // Don't want to do a GC pause while cleanup is being completed!
   2.888 -  wait_for_cleanup_complete();
   2.889 -
   2.890 +HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
   2.891 +                                               unsigned int gc_count_before,
   2.892 +                                               bool* succeeded) {
   2.893 +  assert_heap_not_locked_and_not_at_safepoint();
   2.894    g1_policy()->record_stop_world_start();
   2.895 -  {
   2.896 -    MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
   2.897 -    VM_G1IncCollectionPause op(gc_count_before,
   2.898 -                               false, /* should_initiate_conc_mark */
   2.899 -                               g1_policy()->max_pause_time_ms(),
   2.900 -                               GCCause::_g1_inc_collection_pause);
   2.901 -    VMThread::execute(&op);
   2.902 -  }
   2.903 +  VM_G1IncCollectionPause op(gc_count_before,
   2.904 +                             word_size,
   2.905 +                             false, /* should_initiate_conc_mark */
   2.906 +                             g1_policy()->max_pause_time_ms(),
   2.907 +                             GCCause::_g1_inc_collection_pause);
   2.908 +  VMThread::execute(&op);
   2.909 +
   2.910 +  HeapWord* result = op.result();
   2.911 +  bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
   2.912 +  assert(result == NULL || ret_succeeded,
   2.913 +         "the result should be NULL if the VM did not succeed");
   2.914 +  *succeeded = ret_succeeded;
   2.915 +
   2.916 +  assert_heap_not_locked();
   2.917 +  return result;
   2.918  }
   2.919  
   2.920  void
   2.921 @@ -2797,10 +3180,10 @@
   2.922  }
   2.923  #endif // TASKQUEUE_STATS
   2.924  
   2.925 -void
   2.926 +bool
   2.927  G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
   2.928    if (GC_locker::check_active_before_gc()) {
   2.929 -    return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   2.930 +    return false;
   2.931    }
   2.932  
   2.933    if (PrintHeapAtGC) {
   2.934 @@ -2871,6 +3254,9 @@
   2.935  
   2.936        COMPILER2_PRESENT(DerivedPointerTable::clear());
   2.937  
   2.938 +      // Please see comment in G1CollectedHeap::ref_processing_init()
   2.939 +      // to see how reference processing currently works in G1.
   2.940 +      //
   2.941        // We want to turn off ref discovery, if necessary, and turn it back on
   2.942        // on again later if we do. XXX Dubious: why is discovery disabled?
   2.943        bool was_enabled = ref_processor()->discovery_enabled();
   2.944 @@ -3068,6 +3454,8 @@
   2.945        (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
   2.946      g1_rem_set()->print_summary_info();
   2.947    }
   2.948 +
   2.949 +  return true;
   2.950  }
   2.951  
   2.952  size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
   2.953 @@ -3298,6 +3686,7 @@
   2.954    // untag the GC alloc regions and tear down the GC alloc region
   2.955    // list. It's desirable that no regions are tagged as GC alloc
   2.956    // outside GCs.
   2.957 +
   2.958    forget_alloc_region_list();
   2.959  
   2.960    // The current alloc regions contain objs that have survived
   2.961 @@ -3361,19 +3750,6 @@
   2.962  
   2.963  // *** Sequential G1 Evacuation
   2.964  
   2.965 -HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) {
   2.966 -  HeapRegion* alloc_region = _gc_alloc_regions[purpose];
   2.967 -  // let the caller handle alloc failure
   2.968 -  if (alloc_region == NULL) return NULL;
   2.969 -  assert(isHumongous(word_size) || !alloc_region->isHumongous(),
   2.970 -         "Either the object is humongous or the region isn't");
   2.971 -  HeapWord* block = alloc_region->allocate(word_size);
   2.972 -  if (block == NULL) {
   2.973 -    block = allocate_during_gc_slow(purpose, alloc_region, false, word_size);
   2.974 -  }
   2.975 -  return block;
   2.976 -}
   2.977 -
   2.978  class G1IsAliveClosure: public BoolObjectClosure {
   2.979    G1CollectedHeap* _g1;
   2.980  public:
   2.981 @@ -4316,6 +4692,10 @@
   2.982    }
   2.983    // Finish with the ref_processor roots.
   2.984    if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
   2.985 +    // We need to treat the discovered reference lists as roots and
   2.986 +    // keep entries (which are added by the marking threads) on them
   2.987 +    // live until they can be processed at the end of marking.
   2.988 +    ref_processor()->weak_oops_do(scan_non_heap_roots);
   2.989      ref_processor()->oops_do(scan_non_heap_roots);
   2.990    }
   2.991    g1_policy()->record_collection_pause_end_G1_strong_roots();
   2.992 @@ -4381,6 +4761,11 @@
   2.993    // on individual heap regions when we allocate from
   2.994    // them in parallel, so this seems like the correct place for this.
   2.995    retire_all_alloc_regions();
   2.996 +
   2.997 +  // Weak root processing.
   2.998 +  // Note: when JSR 292 is enabled and code blobs can contain
   2.999 +  // non-perm oops then we will need to process the code blobs
  2.1000 +  // here too.
  2.1001    {
  2.1002      G1IsAliveClosure is_alive(this);
  2.1003      G1KeepAliveClosure keep_alive(this);
  2.1004 @@ -4625,12 +5010,6 @@
  2.1005  #endif
  2.1006  }
  2.1007  
  2.1008 -void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
  2.1009 -  if (g1_policy()->should_do_collection_pause(word_size)) {
  2.1010 -    do_collection_pause();
  2.1011 -  }
  2.1012 -}
  2.1013 -
  2.1014  void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  2.1015    double young_time_ms     = 0.0;
  2.1016    double non_young_time_ms = 0.0;
  2.1017 @@ -4789,6 +5168,7 @@
  2.1018  }
  2.1019  
  2.1020  void G1CollectedHeap::wait_for_cleanup_complete() {
  2.1021 +  assert_not_at_safepoint();
  2.1022    MutexLockerEx x(Cleanup_mon);
  2.1023    wait_for_cleanup_complete_locked();
  2.1024  }
  2.1025 @@ -5093,13 +5473,6 @@
  2.1026    return n + m;
  2.1027  }
  2.1028  
  2.1029 -bool G1CollectedHeap::should_set_young_locked() {
  2.1030 -  assert(heap_lock_held_for_gc(),
  2.1031 -              "the heap lock should already be held by or for this thread");
  2.1032 -  return  (g1_policy()->in_young_gc_mode() &&
  2.1033 -           g1_policy()->should_add_next_region_to_young_list());
  2.1034 -}
  2.1035 -
  2.1036  void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  2.1037    assert(heap_lock_held_for_gc(),
  2.1038                "the heap lock should already be held by or for this thread");
     3.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Dec 04 00:09:05 2010 -0500
     3.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Dec 06 15:37:00 2010 -0500
     3.3 @@ -290,6 +290,63 @@
     3.4    // started is maintained in _total_full_collections in CollectedHeap.
     3.5    volatile unsigned int _full_collections_completed;
     3.6  
     3.7 +  // These are macros so that, if the assert fires, we get the correct
     3.8 +  // line number, file, etc.
     3.9 +
    3.10 +#define heap_locking_asserts_err_msg(__extra_message)                         \
    3.11 +  err_msg("%s : Heap_lock %slocked, %sat a safepoint",                        \
    3.12 +          (__extra_message),                                                  \
    3.13 +          (!Heap_lock->owned_by_self()) ? "NOT " : "",                        \
    3.14 +          (!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "")
    3.15 +
    3.16 +#define assert_heap_locked()                                                  \
    3.17 +  do {                                                                        \
    3.18 +    assert(Heap_lock->owned_by_self(),                                        \
    3.19 +           heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
    3.20 +  } while (0)
    3.21 +
    3.22 +#define assert_heap_locked_or_at_safepoint()                                  \
    3.23 +  do {                                                                        \
    3.24 +    assert(Heap_lock->owned_by_self() ||                                      \
    3.25 +                                     SafepointSynchronize::is_at_safepoint(), \
    3.26 +           heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
    3.27 +                                        "should be at a safepoint"));         \
    3.28 +  } while (0)
    3.29 +
    3.30 +#define assert_heap_locked_and_not_at_safepoint()                             \
    3.31 +  do {                                                                        \
    3.32 +    assert(Heap_lock->owned_by_self() &&                                      \
    3.33 +                                    !SafepointSynchronize::is_at_safepoint(), \
    3.34 +          heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
    3.35 +                                       "should not be at a safepoint"));      \
    3.36 +  } while (0)
    3.37 +
    3.38 +#define assert_heap_not_locked()                                              \
    3.39 +  do {                                                                        \
    3.40 +    assert(!Heap_lock->owned_by_self(),                                       \
    3.41 +        heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
    3.42 +  } while (0)
    3.43 +
    3.44 +#define assert_heap_not_locked_and_not_at_safepoint()                         \
    3.45 +  do {                                                                        \
    3.46 +    assert(!Heap_lock->owned_by_self() &&                                     \
    3.47 +                                    !SafepointSynchronize::is_at_safepoint(), \
    3.48 +      heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
    3.49 +                                   "should not be at a safepoint"));          \
    3.50 +  } while (0)
    3.51 +
    3.52 +#define assert_at_safepoint()                                                 \
    3.53 +  do {                                                                        \
    3.54 +    assert(SafepointSynchronize::is_at_safepoint(),                           \
    3.55 +           heap_locking_asserts_err_msg("should be at a safepoint"));         \
    3.56 +  } while (0)
    3.57 +
    3.58 +#define assert_not_at_safepoint()                                             \
    3.59 +  do {                                                                        \
    3.60 +    assert(!SafepointSynchronize::is_at_safepoint(),                          \
    3.61 +           heap_locking_asserts_err_msg("should not be at a safepoint"));     \
    3.62 +  } while (0)
    3.63 +
    3.64  protected:
    3.65  
    3.66    // Returns "true" iff none of the gc alloc regions have any allocations
    3.67 @@ -329,31 +386,162 @@
    3.68  
    3.69    // Attempt to allocate an object of the given (very large) "word_size".
    3.70    // Returns "NULL" on failure.
    3.71 -  virtual HeapWord* humongousObjAllocate(size_t word_size);
    3.72 +  virtual HeapWord* humongous_obj_allocate(size_t word_size);
    3.73  
    3.74 -  // If possible, allocate a block of the given word_size, else return "NULL".
    3.75 -  // Returning NULL will trigger GC or heap expansion.
    3.76 -  // These two methods have rather awkward pre- and
    3.77 -  // post-conditions. If they are called outside a safepoint, then
    3.78 -  // they assume that the caller is holding the heap lock. Upon return
    3.79 -  // they release the heap lock, if they are returning a non-NULL
    3.80 -  // value. attempt_allocation_slow() also dirties the cards of a
    3.81 -  // newly-allocated young region after it releases the heap
    3.82 -  // lock. This change in interface was the neatest way to achieve
    3.83 -  // this card dirtying without affecting mem_allocate(), which is a
    3.84 -  // more frequently called method. We tried two or three different
    3.85 -  // approaches, but they were even more hacky.
    3.86 -  HeapWord* attempt_allocation(size_t word_size,
    3.87 -                               bool permit_collection_pause = true);
    3.88 +  // The following two methods, allocate_new_tlab() and
    3.89 +  // mem_allocate(), are the two main entry points from the runtime
    3.90 +  // into the G1's allocation routines. They have the following
    3.91 +  // assumptions:
    3.92 +  //
    3.93 +  // * They should both be called outside safepoints.
    3.94 +  //
    3.95 +  // * They should both be called without holding the Heap_lock.
    3.96 +  //
    3.97 +  // * All allocation requests for new TLABs should go to
    3.98 +  //   allocate_new_tlab().
    3.99 +  //
   3.100 +  // * All non-TLAB allocation requests should go to mem_allocate()
   3.101 +  //   and mem_allocate() should never be called with is_tlab == true.
   3.102 +  //
   3.103 +  // * If the GC locker is active we currently stall until we can
   3.104 +  //   allocate a new young region. This will be changed in the
   3.105 +  //   near future (see CR 6994056).
   3.106 +  //
   3.107 +  // * If either call cannot satisfy the allocation request using the
   3.108 +  //   current allocating region, they will try to get a new one. If
   3.109 +  //   this fails, they will attempt to do an evacuation pause and
   3.110 +  //   retry the allocation.
   3.111 +  //
   3.112 +  // * If all allocation attempts fail, even after trying to schedule
   3.113 +  //   an evacuation pause, allocate_new_tlab() will return NULL,
   3.114 +  //   whereas mem_allocate() will attempt a heap expansion and/or
   3.115 +  //   schedule a Full GC.
   3.116 +  //
   3.117 +  // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
   3.118 +  //   should never be called with word_size being humongous. All
   3.119 +  //   humongous allocation requests should go to mem_allocate() which
   3.120 +  //   will satisfy them with a special path.
   3.121  
   3.122 -  HeapWord* attempt_allocation_slow(size_t word_size,
   3.123 -                                    bool permit_collection_pause = true);
   3.124 +  virtual HeapWord* allocate_new_tlab(size_t word_size);
   3.125 +
   3.126 +  virtual HeapWord* mem_allocate(size_t word_size,
   3.127 +                                 bool   is_noref,
   3.128 +                                 bool   is_tlab, /* expected to be false */
   3.129 +                                 bool*  gc_overhead_limit_was_exceeded);
   3.130 +
   3.131 +  // The following methods, allocate_from_cur_allocation_region(),
   3.132 +  // attempt_allocation(), replace_cur_alloc_region_and_allocate(),
   3.133 +  // attempt_allocation_slow(), and attempt_allocation_humongous()
   3.134 +  // have very awkward pre- and post-conditions with respect to
   3.135 +  // locking:
   3.136 +  //
   3.137 +  // If they are called outside a safepoint they assume the caller
   3.138 +  // holds the Heap_lock when it calls them. However, on exit they
   3.139 +  // will release the Heap_lock if they return a non-NULL result, but
   3.140 +  // keep holding the Heap_lock if they return a NULL result. The
   3.141 +  // reason for this is that we need to dirty the cards that span
   3.142 +  // allocated blocks on young regions to avoid having to take the
   3.143 +  // slow path of the write barrier (for performance reasons we don't
   3.144 +  // update RSets for references whose source is a young region, so we
   3.145 +  // don't need to look at dirty cards on young regions). But, doing
   3.146 +  // this card dirtying while holding the Heap_lock can be a
   3.147 +  // scalability bottleneck, especially given that some allocation
   3.148 +  // requests might be of non-trivial size (and the larger the region
   3.149 +  // size is, the fewer allocations requests will be considered
   3.150 +  // humongous, as the humongous size limit is a fraction of the
   3.151 +  // region size). So, when one of these calls succeeds in allocating
   3.152 +  // a block it does the card dirtying after it releases the Heap_lock
   3.153 +  // which is why it will return without holding it.
   3.154 +  //
   3.155 +  // The above assymetry is the reason why locking / unlocking is done
   3.156 +  // explicitly (i.e., with Heap_lock->lock() and
   3.157 +  // Heap_lock->unlocked()) instead of using MutexLocker and
   3.158 +  // MutexUnlocker objects. The latter would ensure that the lock is
   3.159 +  // unlocked / re-locked at every possible exit out of the basic
   3.160 +  // block. However, we only want that action to happen in selected
   3.161 +  // places.
   3.162 +  //
   3.163 +  // Further, if the above methods are called during a safepoint, then
   3.164 +  // naturally there's no assumption about the Heap_lock being held or
   3.165 +  // there's no attempt to unlock it. The parameter at_safepoint
   3.166 +  // indicates whether the call is made during a safepoint or not (as
   3.167 +  // an optimization, to avoid reading the global flag with
   3.168 +  // SafepointSynchronize::is_at_safepoint()).
   3.169 +  //
   3.170 +  // The methods share these parameters:
   3.171 +  //
   3.172 +  // * word_size     : the size of the allocation request in words
   3.173 +  // * at_safepoint  : whether the call is done at a safepoint; this
   3.174 +  //                   also determines whether a GC is permitted
   3.175 +  //                   (at_safepoint == false) or not (at_safepoint == true)
   3.176 +  // * do_dirtying   : whether the method should dirty the allocated
   3.177 +  //                   block before returning
   3.178 +  //
   3.179 +  // They all return either the address of the block, if they
   3.180 +  // successfully manage to allocate it, or NULL.
   3.181 +
   3.182 +  // It tries to satisfy an allocation request out of the current
   3.183 +  // allocating region, which is passed as a parameter. It assumes
   3.184 +  // that the caller has checked that the current allocating region is
   3.185 +  // not NULL. Given that the caller has to check the current
   3.186 +  // allocating region for at least NULL, it might as well pass it as
   3.187 +  // the first parameter so that the method doesn't have to read it
   3.188 +  // from the _cur_alloc_region field again.
   3.189 +  inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
   3.190 +                                                  size_t word_size);
   3.191 +
   3.192 +  // It attempts to allocate out of the current alloc region. If that
   3.193 +  // fails, it retires the current alloc region (if there is one),
   3.194 +  // tries to get a new one and retries the allocation.
   3.195 +  inline HeapWord* attempt_allocation(size_t word_size);
   3.196 +
   3.197 +  // It assumes that the current alloc region has been retired and
   3.198 +  // tries to allocate a new one. If it's successful, it performs
   3.199 +  // the allocation out of the new current alloc region and updates
   3.200 +  // _cur_alloc_region.
   3.201 +  HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
   3.202 +                                                  bool at_safepoint,
   3.203 +                                                  bool do_dirtying);
   3.204 +
   3.205 +  // The slow path when we are unable to allocate a new current alloc
   3.206 +  // region to satisfy an allocation request (i.e., when
   3.207 +  // attempt_allocation() fails). It will try to do an evacuation
   3.208 +  // pause, which might stall due to the GC locker, and retry the
   3.209 +  // allocation attempt when appropriate.
   3.210 +  HeapWord* attempt_allocation_slow(size_t word_size);
   3.211 +
   3.212 +  // The method that tries to satisfy a humongous allocation
   3.213 +  // request. If it cannot satisfy it it will try to do an evacuation
   3.214 +  // pause to perhaps reclaim enough space to be able to satisfy the
   3.215 +  // allocation request afterwards.
   3.216 +  HeapWord* attempt_allocation_humongous(size_t word_size,
   3.217 +                                         bool at_safepoint);
   3.218 +
   3.219 +  // It does the common work when we are retiring the current alloc region.
   3.220 +  inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region);
   3.221 +
   3.222 +  // It retires the current alloc region, which is passed as a
   3.223 +  // parameter (since, typically, the caller is already holding on to
   3.224 +  // it). It sets _cur_alloc_region to NULL.
   3.225 +  void retire_cur_alloc_region(HeapRegion* cur_alloc_region);
   3.226 +
   3.227 +  // It attempts to do an allocation immediately before or after an
   3.228 +  // evacuation pause and can only be called by the VM thread. It has
   3.229 +  // slightly different assumptions that the ones before (i.e.,
   3.230 +  // assumes that the current alloc region has been retired).
   3.231 +  HeapWord* attempt_allocation_at_safepoint(size_t word_size,
   3.232 +                                            bool expect_null_cur_alloc_region);
   3.233 +
   3.234 +  // It dirties the cards that cover the block so that so that the post
   3.235 +  // write barrier never queues anything when updating objects on this
   3.236 +  // block. It is assumed (and in fact we assert) that the block
   3.237 +  // belongs to a young region.
   3.238 +  inline void dirty_young_block(HeapWord* start, size_t word_size);
   3.239  
   3.240    // Allocate blocks during garbage collection. Will ensure an
   3.241    // allocation region, either by picking one or expanding the
   3.242    // heap, and then allocate a block of the given size. The block
   3.243    // may not be a humongous - it must fit into a single heap region.
   3.244 -  HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
   3.245    HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
   3.246  
   3.247    HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
   3.248 @@ -370,12 +558,14 @@
   3.249    void  retire_alloc_region(HeapRegion* alloc_region, bool par);
   3.250  
   3.251    // - if explicit_gc is true, the GC is for a System.gc() or a heap
   3.252 -  // inspection request and should collect the entire heap
   3.253 -  // - if clear_all_soft_refs is true, all soft references are cleared
   3.254 -  // during the GC
   3.255 +  //   inspection request and should collect the entire heap
   3.256 +  // - if clear_all_soft_refs is true, all soft references should be
   3.257 +  //   cleared during the GC
   3.258    // - if explicit_gc is false, word_size describes the allocation that
   3.259 -  // the GC should attempt (at least) to satisfy
   3.260 -  void do_collection(bool explicit_gc,
   3.261 +  //   the GC should attempt (at least) to satisfy
   3.262 +  // - it returns false if it is unable to do the collection due to the
   3.263 +  //   GC locker being active, true otherwise
   3.264 +  bool do_collection(bool explicit_gc,
   3.265                       bool clear_all_soft_refs,
   3.266                       size_t word_size);
   3.267  
   3.268 @@ -391,13 +581,13 @@
   3.269    // Callback from VM_G1CollectForAllocation operation.
   3.270    // This function does everything necessary/possible to satisfy a
   3.271    // failed allocation request (including collection, expansion, etc.)
   3.272 -  HeapWord* satisfy_failed_allocation(size_t word_size);
   3.273 +  HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
   3.274  
   3.275    // Attempting to expand the heap sufficiently
   3.276    // to support an allocation of the given "word_size".  If
   3.277    // successful, perform the allocation and return the address of the
   3.278    // allocated block, or else "NULL".
   3.279 -  virtual HeapWord* expand_and_allocate(size_t word_size);
   3.280 +  HeapWord* expand_and_allocate(size_t word_size);
   3.281  
   3.282  public:
   3.283    // Expand the garbage-first heap by at least the given size (in bytes!).
   3.284 @@ -478,21 +668,27 @@
   3.285    void reset_taskqueue_stats();
   3.286    #endif // TASKQUEUE_STATS
   3.287  
   3.288 -  // Do an incremental collection: identify a collection set, and evacuate
   3.289 -  // its live objects elsewhere.
   3.290 -  virtual void do_collection_pause();
   3.291 +  // Schedule the VM operation that will do an evacuation pause to
   3.292 +  // satisfy an allocation request of word_size. *succeeded will
   3.293 +  // return whether the VM operation was successful (it did do an
   3.294 +  // evacuation pause) or not (another thread beat us to it or the GC
   3.295 +  // locker was active). Given that we should not be holding the
   3.296 +  // Heap_lock when we enter this method, we will pass the
   3.297 +  // gc_count_before (i.e., total_collections()) as a parameter since
   3.298 +  // it has to be read while holding the Heap_lock. Currently, both
   3.299 +  // methods that call do_collection_pause() release the Heap_lock
   3.300 +  // before the call, so it's easy to read gc_count_before just before.
   3.301 +  HeapWord* do_collection_pause(size_t       word_size,
   3.302 +                                unsigned int gc_count_before,
   3.303 +                                bool*        succeeded);
   3.304  
   3.305    // The guts of the incremental collection pause, executed by the vm
   3.306 -  // thread.
   3.307 -  virtual void do_collection_pause_at_safepoint(double target_pause_time_ms);
   3.308 +  // thread. It returns false if it is unable to do the collection due
   3.309 +  // to the GC locker being active, true otherwise
   3.310 +  bool do_collection_pause_at_safepoint(double target_pause_time_ms);
   3.311  
   3.312    // Actually do the work of evacuating the collection set.
   3.313 -  virtual void evacuate_collection_set();
   3.314 -
   3.315 -  // If this is an appropriate right time, do a collection pause.
   3.316 -  // The "word_size" argument, if non-zero, indicates the size of an
   3.317 -  // allocation request that is prompting this query.
   3.318 -  void do_collection_pause_if_appropriate(size_t word_size);
   3.319 +  void evacuate_collection_set();
   3.320  
   3.321    // The g1 remembered set of the heap.
   3.322    G1RemSet* _g1_rem_set;
   3.323 @@ -762,11 +958,6 @@
   3.324  #endif // PRODUCT
   3.325  
   3.326    // These virtual functions do the actual allocation.
   3.327 -  virtual HeapWord* mem_allocate(size_t word_size,
   3.328 -                                 bool   is_noref,
   3.329 -                                 bool   is_tlab,
   3.330 -                                 bool* gc_overhead_limit_was_exceeded);
   3.331 -
   3.332    // Some heaps may offer a contiguous region for shared non-blocking
   3.333    // allocation, via inlined code (by exporting the address of the top and
   3.334    // end fields defining the extent of the contiguous allocation region.)
   3.335 @@ -1046,7 +1237,6 @@
   3.336    virtual bool supports_tlab_allocation() const;
   3.337    virtual size_t tlab_capacity(Thread* thr) const;
   3.338    virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
   3.339 -  virtual HeapWord* allocate_new_tlab(size_t word_size);
   3.340  
   3.341    // Can a compiler initialize a new object without store barriers?
   3.342    // This permission only extends from the creation of a new object
   3.343 @@ -1186,7 +1376,6 @@
   3.344    static G1CollectedHeap* heap();
   3.345  
   3.346    void empty_young_list();
   3.347 -  bool should_set_young_locked();
   3.348  
   3.349    void set_region_short_lived_locked(HeapRegion* hr);
   3.350    // add appropriate methods for any other surv rate groups
   3.351 @@ -1339,8 +1528,6 @@
   3.352  protected:
   3.353    size_t _max_heap_capacity;
   3.354  
   3.355 -//  debug_only(static void check_for_valid_allocation_state();)
   3.356 -
   3.357  public:
   3.358    // Temporary: call to mark things unimplemented for the G1 heap (e.g.,
   3.359    // MemoryService).  In productization, we can make this assert false
     4.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Sat Dec 04 00:09:05 2010 -0500
     4.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Mon Dec 06 15:37:00 2010 -0500
     4.3 @@ -27,6 +27,7 @@
     4.4  
     4.5  #include "gc_implementation/g1/concurrentMark.hpp"
     4.6  #include "gc_implementation/g1/g1CollectedHeap.hpp"
     4.7 +#include "gc_implementation/g1/g1CollectorPolicy.hpp"
     4.8  #include "gc_implementation/g1/heapRegionSeq.hpp"
     4.9  #include "utilities/taskqueue.hpp"
    4.10  
    4.11 @@ -58,37 +59,114 @@
    4.12    return r != NULL && r->in_collection_set();
    4.13  }
    4.14  
    4.15 -inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
    4.16 -                                              bool permit_collection_pause) {
    4.17 -  HeapWord* res = NULL;
    4.18 +// See the comment in the .hpp file about the locking protocol and
    4.19 +// assumptions of this method (and other related ones).
    4.20 +inline HeapWord*
    4.21 +G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
    4.22 +                                                size_t word_size) {
    4.23 +  assert_heap_locked_and_not_at_safepoint();
    4.24 +  assert(cur_alloc_region != NULL, "pre-condition of the method");
    4.25 +  assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
    4.26 +  assert(cur_alloc_region->is_young(),
    4.27 +         "we only support young current alloc regions");
    4.28 +  assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
    4.29 +         "should not be used for humongous allocations");
    4.30 +  assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
    4.31  
    4.32 -  assert( SafepointSynchronize::is_at_safepoint() ||
    4.33 -          Heap_lock->owned_by_self(), "pre-condition of the call" );
    4.34 +  assert(!cur_alloc_region->is_empty(),
    4.35 +         err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
    4.36 +                 cur_alloc_region->bottom(), cur_alloc_region->end()));
    4.37 +  // This allocate method does BOT updates and we don't need them in
    4.38 +  // the young generation. This will be fixed in the near future by
    4.39 +  // CR 6994297.
    4.40 +  HeapWord* result = cur_alloc_region->allocate(word_size);
    4.41 +  if (result != NULL) {
    4.42 +    assert(is_in(result), "result should be in the heap");
    4.43 +    Heap_lock->unlock();
    4.44  
    4.45 -  // All humongous allocation requests should go through the slow path in
    4.46 -  // attempt_allocation_slow().
    4.47 -  if (!isHumongous(word_size) && _cur_alloc_region != NULL) {
    4.48 -    // If this allocation causes a region to become non empty,
    4.49 -    // then we need to update our free_regions count.
    4.50 +    // Do the dirtying after we release the Heap_lock.
    4.51 +    dirty_young_block(result, word_size);
    4.52 +    return result;
    4.53 +  }
    4.54  
    4.55 -    if (_cur_alloc_region->is_empty()) {
    4.56 -      res = _cur_alloc_region->allocate(word_size);
    4.57 -      if (res != NULL)
    4.58 -        _free_regions--;
    4.59 -    } else {
    4.60 -      res = _cur_alloc_region->allocate(word_size);
    4.61 +  assert_heap_locked();
    4.62 +  return NULL;
    4.63 +}
    4.64 +
    4.65 +// See the comment in the .hpp file about the locking protocol and
    4.66 +// assumptions of this method (and other related ones).
    4.67 +inline HeapWord*
    4.68 +G1CollectedHeap::attempt_allocation(size_t word_size) {
    4.69 +  assert_heap_locked_and_not_at_safepoint();
    4.70 +  assert(!isHumongous(word_size), "attempt_allocation() should not be called "
    4.71 +         "for humongous allocation requests");
    4.72 +
    4.73 +  HeapRegion* cur_alloc_region = _cur_alloc_region;
    4.74 +  if (cur_alloc_region != NULL) {
    4.75 +    HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
    4.76 +                                                      word_size);
    4.77 +    if (result != NULL) {
    4.78 +      assert_heap_not_locked();
    4.79 +      return result;
    4.80      }
    4.81  
    4.82 -    if (res != NULL) {
    4.83 -      if (!SafepointSynchronize::is_at_safepoint()) {
    4.84 -        assert( Heap_lock->owned_by_self(), "invariant" );
    4.85 -        Heap_lock->unlock();
    4.86 -      }
    4.87 -      return res;
    4.88 -    }
    4.89 +    assert_heap_locked();
    4.90 +
    4.91 +    // Since we couldn't successfully allocate into it, retire the
    4.92 +    // current alloc region.
    4.93 +    retire_cur_alloc_region(cur_alloc_region);
    4.94    }
    4.95 -  // attempt_allocation_slow will also unlock the heap lock when appropriate.
    4.96 -  return attempt_allocation_slow(word_size, permit_collection_pause);
    4.97 +
    4.98 +  // Try to get a new region and allocate out of it
    4.99 +  HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
   4.100 +                                                      false, /* at safepoint */
   4.101 +                                                      true   /* do_dirtying */);
   4.102 +  if (result != NULL) {
   4.103 +    assert_heap_not_locked();
   4.104 +    return result;
   4.105 +  }
   4.106 +
   4.107 +  assert_heap_locked();
   4.108 +  return NULL;
   4.109 +}
   4.110 +
   4.111 +inline void
   4.112 +G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
   4.113 +  assert_heap_locked_or_at_safepoint();
   4.114 +  assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
   4.115 +         "pre-condition of the call");
   4.116 +  assert(cur_alloc_region->is_young(),
   4.117 +         "we only support young current alloc regions");
   4.118 +
   4.119 +  // The region is guaranteed to be young
   4.120 +  g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
   4.121 +  _summary_bytes_used += cur_alloc_region->used();
   4.122 +  _cur_alloc_region = NULL;
   4.123 +}
   4.124 +
   4.125 +// It dirties the cards that cover the block so that so that the post
   4.126 +// write barrier never queues anything when updating objects on this
   4.127 +// block. It is assumed (and in fact we assert) that the block
   4.128 +// belongs to a young region.
   4.129 +inline void
   4.130 +G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
   4.131 +  assert_heap_not_locked();
   4.132 +
   4.133 +  // Assign the containing region to containing_hr so that we don't
   4.134 +  // have to keep calling heap_region_containing_raw() in the
   4.135 +  // asserts below.
   4.136 +  DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
   4.137 +  assert(containing_hr != NULL && start != NULL && word_size > 0,
   4.138 +         "pre-condition");
   4.139 +  assert(containing_hr->is_in(start), "it should contain start");
   4.140 +  assert(containing_hr->is_young(), "it should be young");
   4.141 +  assert(!containing_hr->isHumongous(), "it should not be humongous");
   4.142 +
   4.143 +  HeapWord* end = start + word_size;
   4.144 +  assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
   4.145 +
   4.146 +  MemRegion mr(start, end);
   4.147 +  ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
   4.148  }
   4.149  
   4.150  inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
     5.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Sat Dec 04 00:09:05 2010 -0500
     5.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Dec 06 15:37:00 2010 -0500
     5.3 @@ -458,8 +458,8 @@
     5.4      double now_sec = os::elapsedTime();
     5.5      double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
     5.6      double alloc_rate_ms = predict_alloc_rate_ms();
     5.7 -    int min_regions = (int) ceil(alloc_rate_ms * when_ms);
     5.8 -    int current_region_num = (int) _g1->young_list()->length();
     5.9 +    size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
    5.10 +    size_t current_region_num = _g1->young_list()->length();
    5.11      _young_list_min_length = min_regions + current_region_num;
    5.12    }
    5.13  }
    5.14 @@ -473,9 +473,12 @@
    5.15        _young_list_target_length = _young_list_fixed_length;
    5.16      else
    5.17        _young_list_target_length = _young_list_fixed_length / 2;
    5.18 -
    5.19 -    _young_list_target_length = MAX2(_young_list_target_length, (size_t)1);
    5.20    }
    5.21 +
    5.22 +  // Make sure we allow the application to allocate at least one
    5.23 +  // region before we need to do a collection again.
    5.24 +  size_t min_length = _g1->young_list()->length() + 1;
    5.25 +  _young_list_target_length = MAX2(_young_list_target_length, min_length);
    5.26    calculate_survivors_policy();
    5.27  }
    5.28  
    5.29 @@ -568,7 +571,7 @@
    5.30  
    5.31      // we should have at least one region in the target young length
    5.32      _young_list_target_length =
    5.33 -        MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
    5.34 +                              final_young_length + _recorded_survivor_regions;
    5.35  
    5.36      // let's keep an eye of how long we spend on this calculation
    5.37      // right now, I assume that we'll print it when we need it; we
    5.38 @@ -617,8 +620,7 @@
    5.39                             _young_list_min_length);
    5.40  #endif // TRACE_CALC_YOUNG_LENGTH
    5.41      // we'll do the pause as soon as possible by choosing the minimum
    5.42 -    _young_list_target_length =
    5.43 -      MAX2(_young_list_min_length, (size_t) 1);
    5.44 +    _young_list_target_length = _young_list_min_length;
    5.45    }
    5.46  
    5.47    _rs_lengths_prediction = rs_lengths;
    5.48 @@ -801,7 +803,7 @@
    5.49    _survivor_surv_rate_group->reset();
    5.50    calculate_young_list_min_length();
    5.51    calculate_young_list_target_length();
    5.52 - }
    5.53 +}
    5.54  
    5.55  void G1CollectorPolicy::record_before_bytes(size_t bytes) {
    5.56    _bytes_in_to_space_before_gc += bytes;
    5.57 @@ -824,9 +826,9 @@
    5.58        gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
    5.59    }
    5.60  
    5.61 -  assert(_g1->used_regions() == _g1->recalculate_used_regions(),
    5.62 -         "sanity");
    5.63 -  assert(_g1->used() == _g1->recalculate_used(), "sanity");
    5.64 +  assert(_g1->used() == _g1->recalculate_used(),
    5.65 +         err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
    5.66 +                 _g1->used(), _g1->recalculate_used()));
    5.67  
    5.68    double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
    5.69    _all_stop_world_times_ms->add(s_w_t_ms);
    5.70 @@ -2266,24 +2268,13 @@
    5.71  #endif // PRODUCT
    5.72  }
    5.73  
    5.74 -bool
    5.75 -G1CollectorPolicy::should_add_next_region_to_young_list() {
    5.76 -  assert(in_young_gc_mode(), "should be in young GC mode");
    5.77 -  bool ret;
    5.78 -  size_t young_list_length = _g1->young_list()->length();
    5.79 -  size_t young_list_max_length = _young_list_target_length;
    5.80 -  if (G1FixedEdenSize) {
    5.81 -    young_list_max_length -= _max_survivor_regions;
    5.82 -  }
    5.83 -  if (young_list_length < young_list_max_length) {
    5.84 -    ret = true;
    5.85 +void
    5.86 +G1CollectorPolicy::update_region_num(bool young) {
    5.87 +  if (young) {
    5.88      ++_region_num_young;
    5.89    } else {
    5.90 -    ret = false;
    5.91      ++_region_num_tenured;
    5.92    }
    5.93 -
    5.94 -  return ret;
    5.95  }
    5.96  
    5.97  #ifndef PRODUCT
    5.98 @@ -2327,32 +2318,6 @@
    5.99    }
   5.100  }
   5.101  
   5.102 -bool
   5.103 -G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
   5.104 -                                                               word_size) {
   5.105 -  assert(_g1->regions_accounted_for(), "Region leakage!");
   5.106 -  double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   5.107 -
   5.108 -  size_t young_list_length = _g1->young_list()->length();
   5.109 -  size_t young_list_max_length = _young_list_target_length;
   5.110 -  if (G1FixedEdenSize) {
   5.111 -    young_list_max_length -= _max_survivor_regions;
   5.112 -  }
   5.113 -  bool reached_target_length = young_list_length >= young_list_max_length;
   5.114 -
   5.115 -  if (in_young_gc_mode()) {
   5.116 -    if (reached_target_length) {
   5.117 -      assert( young_list_length > 0 && _g1->young_list()->length() > 0,
   5.118 -              "invariant" );
   5.119 -      return true;
   5.120 -    }
   5.121 -  } else {
   5.122 -    guarantee( false, "should not reach here" );
   5.123 -  }
   5.124 -
   5.125 -  return false;
   5.126 -}
   5.127 -
   5.128  #ifndef PRODUCT
   5.129  class HRSortIndexIsOKClosure: public HeapRegionClosure {
   5.130    CollectionSetChooser* _chooser;
     6.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Sat Dec 04 00:09:05 2010 -0500
     6.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Dec 06 15:37:00 2010 -0500
     6.3 @@ -993,11 +993,6 @@
     6.4    void record_before_bytes(size_t bytes);
     6.5    void record_after_bytes(size_t bytes);
     6.6  
     6.7 -  // Returns "true" if this is a good time to do a collection pause.
     6.8 -  // The "word_size" argument, if non-zero, indicates the size of an
     6.9 -  // allocation request that is prompting this query.
    6.10 -  virtual bool should_do_collection_pause(size_t word_size) = 0;
    6.11 -
    6.12    // Choose a new collection set.  Marks the chosen regions as being
    6.13    // "in_collection_set", and links them together.  The head and number of
    6.14    // the collection set are available via access methods.
    6.15 @@ -1116,7 +1111,16 @@
    6.16      // do that for any other surv rate groups
    6.17    }
    6.18  
    6.19 -  bool should_add_next_region_to_young_list();
    6.20 +  bool is_young_list_full() {
    6.21 +    size_t young_list_length = _g1->young_list()->length();
    6.22 +    size_t young_list_max_length = _young_list_target_length;
    6.23 +    if (G1FixedEdenSize) {
    6.24 +      young_list_max_length -= _max_survivor_regions;
    6.25 +    }
    6.26 +
    6.27 +    return young_list_length >= young_list_max_length;
    6.28 +  }
    6.29 +  void update_region_num(bool young);
    6.30  
    6.31    bool in_young_gc_mode() {
    6.32      return _in_young_gc_mode;
    6.33 @@ -1270,7 +1274,6 @@
    6.34      _collectionSetChooser = new CollectionSetChooser();
    6.35    }
    6.36    void record_collection_pause_end();
    6.37 -  bool should_do_collection_pause(size_t word_size);
    6.38    // This is not needed any more, after the CSet choosing code was
    6.39    // changed to use the pause prediction work. But let's leave the
    6.40    // hook in just in case.
     7.1 --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Sat Dec 04 00:09:05 2010 -0500
     7.2 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Dec 06 15:37:00 2010 -0500
     7.3 @@ -27,13 +27,22 @@
     7.4  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
     7.5  #include "gc_implementation/g1/vm_operations_g1.hpp"
     7.6  #include "gc_implementation/shared/isGCActiveMark.hpp"
     7.7 +#include "gc_implementation/g1/vm_operations_g1.hpp"
     7.8  #include "runtime/interfaceSupport.hpp"
     7.9  
    7.10 +VM_G1CollectForAllocation::VM_G1CollectForAllocation(
    7.11 +                                                  unsigned int gc_count_before,
    7.12 +                                                  size_t word_size)
    7.13 +  : VM_G1OperationWithAllocRequest(gc_count_before, word_size) {
    7.14 +  guarantee(word_size > 0, "an allocation should always be requested");
    7.15 +}
    7.16 +
    7.17  void VM_G1CollectForAllocation::doit() {
    7.18    JvmtiGCForAllocationMarker jgcm;
    7.19    G1CollectedHeap* g1h = G1CollectedHeap::heap();
    7.20 -  _res = g1h->satisfy_failed_allocation(_size);
    7.21 -  assert(g1h->is_in_or_null(_res), "result not in heap");
    7.22 +  _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
    7.23 +  assert(_result == NULL || _pause_succeeded,
    7.24 +         "if we get back a result, the pause should have succeeded");
    7.25  }
    7.26  
    7.27  void VM_G1CollectFull::doit() {
    7.28 @@ -43,6 +52,25 @@
    7.29    g1h->do_full_collection(false /* clear_all_soft_refs */);
    7.30  }
    7.31  
    7.32 +VM_G1IncCollectionPause::VM_G1IncCollectionPause(
    7.33 +                                      unsigned int   gc_count_before,
    7.34 +                                      size_t         word_size,
    7.35 +                                      bool           should_initiate_conc_mark,
    7.36 +                                      double         target_pause_time_ms,
    7.37 +                                      GCCause::Cause gc_cause)
    7.38 +  : VM_G1OperationWithAllocRequest(gc_count_before, word_size),
    7.39 +    _should_initiate_conc_mark(should_initiate_conc_mark),
    7.40 +    _target_pause_time_ms(target_pause_time_ms),
    7.41 +    _full_collections_completed_before(0) {
    7.42 +  guarantee(target_pause_time_ms > 0.0,
    7.43 +            err_msg("target_pause_time_ms = %1.6lf should be positive",
    7.44 +                    target_pause_time_ms));
    7.45 +  guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
    7.46 +            "we can only request an allocation if the GC cause is for "
    7.47 +            "an incremental GC pause");
    7.48 +  _gc_cause = gc_cause;
    7.49 +}
    7.50 +
    7.51  void VM_G1IncCollectionPause::doit() {
    7.52    JvmtiGCForAllocationMarker jgcm;
    7.53    G1CollectedHeap* g1h = G1CollectedHeap::heap();
    7.54 @@ -51,6 +79,18 @@
    7.55     (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
    7.56           "only a GC locker or a System.gc() induced GC should start a cycle");
    7.57  
    7.58 +  if (_word_size > 0) {
    7.59 +    // An allocation has been requested. So, try to do that first.
    7.60 +    _result = g1h->attempt_allocation_at_safepoint(_word_size,
    7.61 +                                     false /* expect_null_cur_alloc_region */);
    7.62 +    if (_result != NULL) {
    7.63 +      // If we can successfully allocate before we actually do the
    7.64 +      // pause then we will consider this pause successful.
    7.65 +      _pause_succeeded = true;
    7.66 +      return;
    7.67 +    }
    7.68 +  }
    7.69 +
    7.70    GCCauseSetter x(g1h, _gc_cause);
    7.71    if (_should_initiate_conc_mark) {
    7.72      // It's safer to read full_collections_completed() here, given
    7.73 @@ -63,7 +103,16 @@
    7.74      // will do so if one is not already in progress.
    7.75      bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
    7.76    }
    7.77 -  g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
    7.78 +
    7.79 +  _pause_succeeded =
    7.80 +    g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
    7.81 +  if (_pause_succeeded && _word_size > 0) {
    7.82 +    // An allocation had been requested.
    7.83 +    _result = g1h->attempt_allocation_at_safepoint(_word_size,
    7.84 +                                      true /* expect_null_cur_alloc_region */);
    7.85 +  } else {
    7.86 +    assert(_result == NULL, "invariant");
    7.87 +  }
    7.88  }
    7.89  
    7.90  void VM_G1IncCollectionPause::doit_epilogue() {
     8.1 --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Sat Dec 04 00:09:05 2010 -0500
     8.2 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Mon Dec 06 15:37:00 2010 -0500
     8.3 @@ -31,19 +31,33 @@
     8.4  // VM_GC_Operation:
     8.5  //   - VM_CGC_Operation
     8.6  //   - VM_G1CollectFull
     8.7 -//   - VM_G1CollectForAllocation
     8.8 -//   - VM_G1IncCollectionPause
     8.9 -//   - VM_G1PopRegionCollectionPause
    8.10 +//   - VM_G1OperationWithAllocRequest
    8.11 +//     - VM_G1CollectForAllocation
    8.12 +//     - VM_G1IncCollectionPause
    8.13 +
    8.14 +class VM_G1OperationWithAllocRequest: public VM_GC_Operation {
    8.15 +protected:
    8.16 +  size_t    _word_size;
    8.17 +  HeapWord* _result;
    8.18 +  bool      _pause_succeeded;
    8.19 +
    8.20 +public:
    8.21 +  VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
    8.22 +                                 size_t       word_size)
    8.23 +    : VM_GC_Operation(gc_count_before),
    8.24 +      _word_size(word_size), _result(NULL), _pause_succeeded(false) { }
    8.25 +  HeapWord* result() { return _result; }
    8.26 +  bool pause_succeeded() { return _pause_succeeded; }
    8.27 +};
    8.28  
    8.29  class VM_G1CollectFull: public VM_GC_Operation {
    8.30 - public:
    8.31 +public:
    8.32    VM_G1CollectFull(unsigned int gc_count_before,
    8.33                     unsigned int full_gc_count_before,
    8.34                     GCCause::Cause cause)
    8.35      : VM_GC_Operation(gc_count_before, full_gc_count_before) {
    8.36      _gc_cause = cause;
    8.37    }
    8.38 -  ~VM_G1CollectFull() {}
    8.39    virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
    8.40    virtual void doit();
    8.41    virtual const char* name() const {
    8.42 @@ -51,45 +65,28 @@
    8.43    }
    8.44  };
    8.45  
    8.46 -class VM_G1CollectForAllocation: public VM_GC_Operation {
    8.47 - private:
    8.48 -  HeapWord*   _res;
    8.49 -  size_t      _size;                       // size of object to be allocated
    8.50 - public:
    8.51 -  VM_G1CollectForAllocation(size_t size, int gc_count_before)
    8.52 -    : VM_GC_Operation(gc_count_before) {
    8.53 -    _size        = size;
    8.54 -    _res         = NULL;
    8.55 -  }
    8.56 -  ~VM_G1CollectForAllocation()        {}
    8.57 +class VM_G1CollectForAllocation: public VM_G1OperationWithAllocRequest {
    8.58 +public:
    8.59 +  VM_G1CollectForAllocation(unsigned int gc_count_before,
    8.60 +                            size_t       word_size);
    8.61    virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
    8.62    virtual void doit();
    8.63    virtual const char* name() const {
    8.64      return "garbage-first collection to satisfy allocation";
    8.65    }
    8.66 -  HeapWord* result() { return _res; }
    8.67  };
    8.68  
    8.69 -class VM_G1IncCollectionPause: public VM_GC_Operation {
    8.70 +class VM_G1IncCollectionPause: public VM_G1OperationWithAllocRequest {
    8.71  private:
    8.72 -  bool _should_initiate_conc_mark;
    8.73 -  double _target_pause_time_ms;
    8.74 +  bool         _should_initiate_conc_mark;
    8.75 +  double       _target_pause_time_ms;
    8.76    unsigned int _full_collections_completed_before;
    8.77  public:
    8.78    VM_G1IncCollectionPause(unsigned int   gc_count_before,
    8.79 +                          size_t         word_size,
    8.80                            bool           should_initiate_conc_mark,
    8.81                            double         target_pause_time_ms,
    8.82 -                          GCCause::Cause cause)
    8.83 -    : VM_GC_Operation(gc_count_before),
    8.84 -      _full_collections_completed_before(0),
    8.85 -      _should_initiate_conc_mark(should_initiate_conc_mark),
    8.86 -      _target_pause_time_ms(target_pause_time_ms) {
    8.87 -    guarantee(target_pause_time_ms > 0.0,
    8.88 -              err_msg("target_pause_time_ms = %1.6lf should be positive",
    8.89 -                      target_pause_time_ms));
    8.90 -
    8.91 -    _gc_cause = cause;
    8.92 -  }
    8.93 +                          GCCause::Cause gc_cause);
    8.94    virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
    8.95    virtual void doit();
    8.96    virtual void doit_epilogue();
    8.97 @@ -103,14 +100,9 @@
    8.98  class VM_CGC_Operation: public VM_Operation {
    8.99    VoidClosure* _cl;
   8.100    const char* _printGCMessage;
   8.101 - public:
   8.102 -  VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg) :
   8.103 -    _cl(cl),
   8.104 -    _printGCMessage(printGCMsg)
   8.105 -    {}
   8.106 -
   8.107 -  ~VM_CGC_Operation() {}
   8.108 -
   8.109 +public:
   8.110 +  VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg)
   8.111 +    : _cl(cl), _printGCMessage(printGCMsg) { }
   8.112    virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
   8.113    virtual void doit();
   8.114    virtual bool doit_prologue();
     9.1 --- a/src/share/vm/memory/referenceProcessor.cpp	Sat Dec 04 00:09:05 2010 -0500
     9.2 +++ b/src/share/vm/memory/referenceProcessor.cpp	Mon Dec 06 15:37:00 2010 -0500
     9.3 @@ -770,9 +770,8 @@
     9.4    // loop over the lists
     9.5    for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
     9.6      if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
     9.7 -      gclog_or_tty->print_cr(
     9.8 -        "\nAbandoning %s discovered list",
     9.9 -        list_name(i));
    9.10 +      gclog_or_tty->print_cr("\nAbandoning %s discovered list",
    9.11 +                             list_name(i));
    9.12      }
    9.13      abandon_partial_discovered_list(_discoveredSoftRefs[i]);
    9.14    }
    9.15 @@ -1059,9 +1058,7 @@
    9.16      // During a multi-threaded discovery phase,
    9.17      // each thread saves to its "own" list.
    9.18      Thread* thr = Thread::current();
    9.19 -    assert(thr->is_GC_task_thread(),
    9.20 -           "Dubious cast from Thread* to WorkerThread*?");
    9.21 -    id = ((WorkerThread*)thr)->id();
    9.22 +    id = thr->as_Worker_thread()->id();
    9.23    } else {
    9.24      // single-threaded discovery, we save in round-robin
    9.25      // fashion to each of the lists.
    9.26 @@ -1095,8 +1092,7 @@
    9.27        ShouldNotReachHere();
    9.28    }
    9.29    if (TraceReferenceGC && PrintGCDetails) {
    9.30 -    gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT,
    9.31 -      id, list);
    9.32 +    gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list);
    9.33    }
    9.34    return list;
    9.35  }
    9.36 @@ -1135,6 +1131,11 @@
    9.37      if (_discovered_list_needs_barrier) {
    9.38        _bs->write_ref_field((void*)discovered_addr, current_head);
    9.39      }
    9.40 +
    9.41 +    if (TraceReferenceGC) {
    9.42 +      gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)",
    9.43 +                             obj, obj->blueprint()->internal_name());
    9.44 +    }
    9.45    } else {
    9.46      // If retest was non NULL, another thread beat us to it:
    9.47      // The reference has already been discovered...
    9.48 @@ -1239,8 +1240,8 @@
    9.49        // Check assumption that an object is not potentially
    9.50        // discovered twice except by concurrent collectors that potentially
    9.51        // trace the same Reference object twice.
    9.52 -      assert(UseConcMarkSweepGC,
    9.53 -             "Only possible with an incremental-update concurrent collector");
    9.54 +      assert(UseConcMarkSweepGC || UseG1GC,
    9.55 +             "Only possible with a concurrent marking collector");
    9.56        return true;
    9.57      }
    9.58    }
    9.59 @@ -1293,26 +1294,14 @@
    9.60      }
    9.61      list->set_head(obj);
    9.62      list->inc_length(1);
    9.63 -  }
    9.64  
    9.65 -  // In the MT discovery case, it is currently possible to see
    9.66 -  // the following message multiple times if several threads
    9.67 -  // discover a reference about the same time. Only one will
    9.68 -  // however have actually added it to the disocvered queue.
    9.69 -  // One could let add_to_discovered_list_mt() return an
    9.70 -  // indication for success in queueing (by 1 thread) or
    9.71 -  // failure (by all other threads), but I decided the extra
    9.72 -  // code was not worth the effort for something that is
    9.73 -  // only used for debugging support.
    9.74 -  if (TraceReferenceGC) {
    9.75 -    oop referent = java_lang_ref_Reference::referent(obj);
    9.76 -    if (PrintGCDetails) {
    9.77 +    if (TraceReferenceGC) {
    9.78        gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
    9.79 -                             obj, obj->blueprint()->internal_name());
    9.80 +                                obj, obj->blueprint()->internal_name());
    9.81      }
    9.82 -    assert(referent->is_oop(), "Enqueued a bad referent");
    9.83    }
    9.84    assert(obj->is_oop(), "Enqueued a bad reference");
    9.85 +  assert(java_lang_ref_Reference::referent(obj)->is_oop(), "Enqueued a bad referent");
    9.86    return true;
    9.87  }
    9.88  
    10.1 --- a/src/share/vm/runtime/thread.hpp	Sat Dec 04 00:09:05 2010 -0500
    10.2 +++ b/src/share/vm/runtime/thread.hpp	Mon Dec 06 15:37:00 2010 -0500
    10.3 @@ -78,6 +78,8 @@
    10.4  class ThreadClosure;
    10.5  class IdealGraphPrinter;
    10.6  
    10.7 +class WorkerThread;
    10.8 +
    10.9  // Class hierarchy
   10.10  // - Thread
   10.11  //   - NamedThread
   10.12 @@ -289,6 +291,10 @@
   10.13    virtual bool is_Watcher_thread() const             { return false; }
   10.14    virtual bool is_ConcurrentGC_thread() const        { return false; }
   10.15    virtual bool is_Named_thread() const               { return false; }
   10.16 +  virtual bool is_Worker_thread() const              { return false; }
   10.17 +
   10.18 +  // Casts
   10.19 +  virtual WorkerThread* as_Worker_thread() const     { return NULL; }
   10.20  
   10.21    virtual char* name() const { return (char*)"Unknown thread"; }
   10.22  
   10.23 @@ -628,9 +634,16 @@
   10.24  private:
   10.25    uint _id;
   10.26  public:
   10.27 -  WorkerThread() : _id(0) { }
   10.28 -  void set_id(uint work_id) { _id = work_id; }
   10.29 -  uint id() const { return _id; }
   10.30 +  WorkerThread() : _id(0)               { }
   10.31 +  virtual bool is_Worker_thread() const { return true; }
   10.32 +
   10.33 +  virtual WorkerThread* as_Worker_thread() const {
   10.34 +    assert(is_Worker_thread(), "Dubious cast to WorkerThread*?");
   10.35 +    return (WorkerThread*) this;
   10.36 +  }
   10.37 +
   10.38 +  void set_id(uint work_id)             { _id = work_id; }
   10.39 +  uint id() const                       { return _id; }
   10.40  };
   10.41  
   10.42  // A single WatcherThread is used for simulating timer interrupts.

mercurial