Thu, 28 Mar 2013 10:27:28 +0100
7014552: gc/lock/jni/jnilockXXX works too slow on 1-processor machine
Summary: Keep a counter of how many times we were stalled by the GC locker, add a diagnostic flag which sets the limit.
Reviewed-by: brutisso, ehelin, johnc
1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Mar 27 10:55:37 2013 +0100 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Mar 28 10:27:28 2013 +0100 1.3 @@ -854,7 +854,8 @@ 1.4 assert(!isHumongous(word_size), "we do not allow humongous TLABs"); 1.5 1.6 unsigned int dummy_gc_count_before; 1.7 - return attempt_allocation(word_size, &dummy_gc_count_before); 1.8 + int dummy_gclocker_retry_count = 0; 1.9 + return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count); 1.10 } 1.11 1.12 HeapWord* 1.13 @@ -863,14 +864,14 @@ 1.14 assert_heap_not_locked_and_not_at_safepoint(); 1.15 1.16 // Loop until the allocation is satisified, or unsatisfied after GC. 1.17 - for (int try_count = 1; /* we'll return */; try_count += 1) { 1.18 + for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { 1.19 unsigned int gc_count_before; 1.20 1.21 HeapWord* result = NULL; 1.22 if (!isHumongous(word_size)) { 1.23 - result = attempt_allocation(word_size, &gc_count_before); 1.24 + result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count); 1.25 } else { 1.26 - result = attempt_allocation_humongous(word_size, &gc_count_before); 1.27 + result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count); 1.28 } 1.29 if (result != NULL) { 1.30 return result; 1.31 @@ -894,6 +895,9 @@ 1.32 } 1.33 return result; 1.34 } else { 1.35 + if (gclocker_retry_count > GCLockerRetryAllocationCount) { 1.36 + return NULL; 1.37 + } 1.38 assert(op.result() == NULL, 1.39 "the result should be NULL if the VM op did not succeed"); 1.40 } 1.41 @@ -910,7 +914,8 @@ 1.42 } 1.43 1.44 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, 1.45 - unsigned int *gc_count_before_ret) { 1.46 + unsigned int *gc_count_before_ret, 1.47 + int* gclocker_retry_count_ret) { 1.48 // Make sure you read the note in attempt_allocation_humongous(). 1.49 1.50 assert_heap_not_locked_and_not_at_safepoint(); 1.51 @@ -986,10 +991,16 @@ 1.52 return NULL; 1.53 } 1.54 } else { 1.55 + if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) { 1.56 + MutexLockerEx x(Heap_lock); 1.57 + *gc_count_before_ret = total_collections(); 1.58 + return NULL; 1.59 + } 1.60 // The GCLocker is either active or the GCLocker initiated 1.61 // GC has not yet been performed. Stall until it is and 1.62 // then retry the allocation. 1.63 GC_locker::stall_until_clear(); 1.64 + (*gclocker_retry_count_ret) += 1; 1.65 } 1.66 1.67 // We can reach here if we were unsuccessul in scheduling a 1.68 @@ -1019,7 +1030,8 @@ 1.69 } 1.70 1.71 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, 1.72 - unsigned int * gc_count_before_ret) { 1.73 + unsigned int * gc_count_before_ret, 1.74 + int* gclocker_retry_count_ret) { 1.75 // The structure of this method has a lot of similarities to 1.76 // attempt_allocation_slow(). The reason these two were not merged 1.77 // into a single one is that such a method would require several "if 1.78 @@ -1104,10 +1116,16 @@ 1.79 return NULL; 1.80 } 1.81 } else { 1.82 + if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) { 1.83 + MutexLockerEx x(Heap_lock); 1.84 + *gc_count_before_ret = total_collections(); 1.85 + return NULL; 1.86 + } 1.87 // The GCLocker is either active or the GCLocker initiated 1.88 // GC has not yet been performed. Stall until it is and 1.89 // then retry the allocation. 1.90 GC_locker::stall_until_clear(); 1.91 + (*gclocker_retry_count_ret) += 1; 1.92 } 1.93 1.94 // We can reach here if we were unsuccessul in scheduling a
2.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Mar 27 10:55:37 2013 +0100 2.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Mar 28 10:27:28 2013 +0100 2.3 @@ -559,18 +559,21 @@ 2.4 // the mutator alloc region without taking the Heap_lock. This 2.5 // should only be used for non-humongous allocations. 2.6 inline HeapWord* attempt_allocation(size_t word_size, 2.7 - unsigned int* gc_count_before_ret); 2.8 + unsigned int* gc_count_before_ret, 2.9 + int* gclocker_retry_count_ret); 2.10 2.11 // Second-level mutator allocation attempt: take the Heap_lock and 2.12 // retry the allocation attempt, potentially scheduling a GC 2.13 // pause. This should only be used for non-humongous allocations. 2.14 HeapWord* attempt_allocation_slow(size_t word_size, 2.15 - unsigned int* gc_count_before_ret); 2.16 + unsigned int* gc_count_before_ret, 2.17 + int* gclocker_retry_count_ret); 2.18 2.19 // Takes the Heap_lock and attempts a humongous allocation. It can 2.20 // potentially schedule a GC pause. 2.21 HeapWord* attempt_allocation_humongous(size_t word_size, 2.22 - unsigned int* gc_count_before_ret); 2.23 + unsigned int* gc_count_before_ret, 2.24 + int* gclocker_retry_count_ret); 2.25 2.26 // Allocation attempt that should be called during safepoints (e.g., 2.27 // at the end of a successful GC). expect_null_mutator_alloc_region
3.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Wed Mar 27 10:55:37 2013 +0100 3.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Mar 28 10:27:28 2013 +0100 3.3 @@ -60,7 +60,8 @@ 3.4 3.5 inline HeapWord* 3.6 G1CollectedHeap::attempt_allocation(size_t word_size, 3.7 - unsigned int* gc_count_before_ret) { 3.8 + unsigned int* gc_count_before_ret, 3.9 + int* gclocker_retry_count_ret) { 3.10 assert_heap_not_locked_and_not_at_safepoint(); 3.11 assert(!isHumongous(word_size), "attempt_allocation() should not " 3.12 "be called for humongous allocation requests"); 3.13 @@ -68,7 +69,9 @@ 3.14 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, 3.15 false /* bot_updates */); 3.16 if (result == NULL) { 3.17 - result = attempt_allocation_slow(word_size, gc_count_before_ret); 3.18 + result = attempt_allocation_slow(word_size, 3.19 + gc_count_before_ret, 3.20 + gclocker_retry_count_ret); 3.21 } 3.22 assert_heap_not_locked(); 3.23 if (result != NULL) {
4.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Wed Mar 27 10:55:37 2013 +0100 4.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Mar 28 10:27:28 2013 +0100 4.3 @@ -326,6 +326,7 @@ 4.4 4.5 uint loop_count = 0; 4.6 uint gc_count = 0; 4.7 + int gclocker_stalled_count = 0; 4.8 4.9 while (result == NULL) { 4.10 // We don't want to have multiple collections for a single filled generation. 4.11 @@ -354,6 +355,10 @@ 4.12 return result; 4.13 } 4.14 4.15 + if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 4.16 + return NULL; 4.17 + } 4.18 + 4.19 // Failed to allocate without a gc. 4.20 if (GC_locker::is_active_and_needs_gc()) { 4.21 // If this thread is not in a jni critical section, we stall 4.22 @@ -366,6 +371,7 @@ 4.23 if (!jthr->in_critical()) { 4.24 MutexUnlocker mul(Heap_lock); 4.25 GC_locker::stall_until_clear(); 4.26 + gclocker_stalled_count += 1; 4.27 continue; 4.28 } else { 4.29 if (CheckJNICalls) {
5.1 --- a/src/share/vm/memory/collectorPolicy.cpp Wed Mar 27 10:55:37 2013 +0100 5.2 +++ b/src/share/vm/memory/collectorPolicy.cpp Thu Mar 28 10:27:28 2013 +0100 5.3 @@ -532,7 +532,7 @@ 5.4 5.5 // Loop until the allocation is satisified, 5.6 // or unsatisfied after GC. 5.7 - for (int try_count = 1; /* return or throw */; try_count += 1) { 5.8 + for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { 5.9 HandleMark hm; // discard any handles allocated in each iteration 5.10 5.11 // First allocation attempt is lock-free. 5.12 @@ -576,6 +576,10 @@ 5.13 } 5.14 } 5.15 5.16 + if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 5.17 + return NULL; // we didn't get to do a GC and we didn't get any memory 5.18 + } 5.19 + 5.20 // If this thread is not in a jni critical section, we stall 5.21 // the requestor until the critical section has cleared and 5.22 // GC allowed. When the critical section clears, a GC is 5.23 @@ -587,6 +591,7 @@ 5.24 MutexUnlocker mul(Heap_lock); 5.25 // Wait for JNI critical section to be exited 5.26 GC_locker::stall_until_clear(); 5.27 + gclocker_stalled_count += 1; 5.28 continue; 5.29 } else { 5.30 if (CheckJNICalls) {
6.1 --- a/src/share/vm/runtime/globals.hpp Wed Mar 27 10:55:37 2013 +0100 6.2 +++ b/src/share/vm/runtime/globals.hpp Thu Mar 28 10:27:28 2013 +0100 6.3 @@ -1402,6 +1402,10 @@ 6.4 "How much the GC can expand the eden by while the GC locker " \ 6.5 "is active (as a percentage)") \ 6.6 \ 6.7 + diagnostic(intx, GCLockerRetryAllocationCount, 2, \ 6.8 + "Number of times to retry allocations when" \ 6.9 + " blocked by the GC locker") \ 6.10 + \ 6.11 develop(bool, UseCMSAdaptiveFreeLists, true, \ 6.12 "Use Adaptive Free Lists in the CMS generation") \ 6.13 \