src/share/vm/gc_implementation/g1/vm_operations_g1.cpp

changeset 3666
64bf7c8270cb
parent 3456
9509c20bba28
child 3710
5c86f8211d1e
     1.1 --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Mar 16 16:14:04 2012 +0100
     1.2 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Mar 12 14:59:00 2012 -0700
     1.3 @@ -34,7 +34,8 @@
     1.4  VM_G1CollectForAllocation::VM_G1CollectForAllocation(
     1.5                                                    unsigned int gc_count_before,
     1.6                                                    size_t word_size)
     1.7 -  : VM_G1OperationWithAllocRequest(gc_count_before, word_size) {
     1.8 +  : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
     1.9 +                                   GCCause::_allocation_failure) {
    1.10    guarantee(word_size > 0, "an allocation should always be requested");
    1.11  }
    1.12  
    1.13 @@ -57,9 +58,10 @@
    1.14                                        bool           should_initiate_conc_mark,
    1.15                                        double         target_pause_time_ms,
    1.16                                        GCCause::Cause gc_cause)
    1.17 -  : VM_G1OperationWithAllocRequest(gc_count_before, word_size),
    1.18 +  : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
    1.19      _should_initiate_conc_mark(should_initiate_conc_mark),
    1.20      _target_pause_time_ms(target_pause_time_ms),
    1.21 +    _should_retry_gc(false),
    1.22      _full_collections_completed_before(0) {
    1.23    guarantee(target_pause_time_ms > 0.0,
    1.24              err_msg("target_pause_time_ms = %1.6lf should be positive",
    1.25 @@ -70,6 +72,22 @@
    1.26    _gc_cause = gc_cause;
    1.27  }
    1.28  
    1.29 +bool VM_G1IncCollectionPause::doit_prologue() {
    1.30 +  bool res = VM_GC_Operation::doit_prologue();
    1.31 +  if (!res) {
    1.32 +    if (_should_initiate_conc_mark) {
    1.33 +      // The prologue can fail for a couple of reasons. The first is that another GC
    1.34 +      // got scheduled and prevented the scheduling of the initial mark GC. The
    1.35 +      // second is that the GC locker may be active and the heap can't be expanded.
    1.36 +      // In both cases we want to retry the GC so that the initial mark pause is
    1.37 +      // actually scheduled. In the second case, however, we should stall until
    1.38 +      // until the GC locker is no longer active and then retry the initial mark GC.
    1.39 +      _should_retry_gc = true;
    1.40 +    }
    1.41 +  }
    1.42 +  return res;
    1.43 +}
    1.44 +
    1.45  void VM_G1IncCollectionPause::doit() {
    1.46    G1CollectedHeap* g1h = G1CollectedHeap::heap();
    1.47    assert(!_should_initiate_conc_mark ||
    1.48 @@ -106,11 +124,25 @@
    1.49      // next GC pause to be an initial mark; it returns false if a
    1.50      // marking cycle is already in progress.
    1.51      //
    1.52 -    // If a marking cycle is already in progress just return and skip
    1.53 -    // the pause - the requesting thread should block in doit_epilogue
    1.54 -    // until the marking cycle is complete.
    1.55 +    // If a marking cycle is already in progress just return and skip the
    1.56 +    // pause below - if the reason for requesting this initial mark pause
    1.57 +    // was due to a System.gc() then the requesting thread should block in
    1.58 +    // doit_epilogue() until the marking cycle is complete.
    1.59 +    //
    1.60 +    // If this initial mark pause was requested as part of a humongous
    1.61 +    // allocation then we know that the marking cycle must just have
    1.62 +    // been started by another thread (possibly also allocating a humongous
    1.63 +    // object) as there was no active marking cycle when the requesting
    1.64 +    // thread checked before calling collect() in
    1.65 +    // attempt_allocation_humongous(). Retrying the GC, in this case,
    1.66 +    // will cause the requesting thread to spin inside collect() until the
    1.67 +    // just started marking cycle is complete - which may be a while. So
    1.68 +    // we do NOT retry the GC.
    1.69      if (!res) {
    1.70 -      assert(_word_size == 0, "ExplicitGCInvokesConcurrent shouldn't be allocating");
    1.71 +      assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
    1.72 +      if (_gc_cause != GCCause::_g1_humongous_allocation) {
    1.73 +        _should_retry_gc = true;
    1.74 +      }
    1.75        return;
    1.76      }
    1.77    }
    1.78 @@ -123,6 +155,13 @@
    1.79                                        true /* expect_null_cur_alloc_region */);
    1.80    } else {
    1.81      assert(_result == NULL, "invariant");
    1.82 +    if (!_pause_succeeded) {
    1.83 +      // Another possible reason reason for the pause to not be successful
    1.84 +      // is that, again, the GC locker is active (and has become active
    1.85 +      // since the prologue was executed). In this case we should retry
    1.86 +      // the pause after waiting for the GC locker to become inactive.
    1.87 +      _should_retry_gc = true;
    1.88 +    }
    1.89    }
    1.90  }
    1.91  
    1.92 @@ -168,6 +207,7 @@
    1.93  }
    1.94  
    1.95  void VM_CGC_Operation::acquire_pending_list_lock() {
    1.96 +  assert(_needs_pll, "don't call this otherwise");
    1.97    // The caller may block while communicating
    1.98    // with the SLT thread in order to acquire/release the PLL.
    1.99    ConcurrentMarkThread::slt()->
   1.100 @@ -175,6 +215,7 @@
   1.101  }
   1.102  
   1.103  void VM_CGC_Operation::release_and_notify_pending_list_lock() {
   1.104 +  assert(_needs_pll, "don't call this otherwise");
   1.105    // The caller may block while communicating
   1.106    // with the SLT thread in order to acquire/release the PLL.
   1.107    ConcurrentMarkThread::slt()->
   1.108 @@ -198,7 +239,9 @@
   1.109  bool VM_CGC_Operation::doit_prologue() {
   1.110    // Note the relative order of the locks must match that in
   1.111    // VM_GC_Operation::doit_prologue() or deadlocks can occur
   1.112 -  acquire_pending_list_lock();
   1.113 +  if (_needs_pll) {
   1.114 +    acquire_pending_list_lock();
   1.115 +  }
   1.116  
   1.117    Heap_lock->lock();
   1.118    SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
   1.119 @@ -210,5 +253,7 @@
   1.120    // VM_GC_Operation::doit_epilogue()
   1.121    SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
   1.122    Heap_lock->unlock();
   1.123 -  release_and_notify_pending_list_lock();
   1.124 +  if (_needs_pll) {
   1.125 +    release_and_notify_pending_list_lock();
   1.126 +  }
   1.127  }

mercurial