1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Sep 04 16:53:27 2014 -0700 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Sep 05 09:49:19 2014 +0200 1.3 @@ -98,10 +98,12 @@ 1.4 assert(!isHumongous(word_size), "attempt_allocation() should not " 1.5 "be called for humongous allocation requests"); 1.6 1.7 - HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, 1.8 - false /* bot_updates */); 1.9 + AllocationContext_t context = AllocationContext::current(); 1.10 + HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size, 1.11 + false /* bot_updates */); 1.12 if (result == NULL) { 1.13 result = attempt_allocation_slow(word_size, 1.14 + context, 1.15 gc_count_before_ret, 1.16 gclocker_retry_count_ret); 1.17 } 1.18 @@ -112,17 +114,17 @@ 1.19 return result; 1.20 } 1.21 1.22 -inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t 1.23 - word_size) { 1.24 +inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size, 1.25 + AllocationContext_t context) { 1.26 assert(!isHumongous(word_size), 1.27 "we should not be seeing humongous-size allocations in this path"); 1.28 1.29 - HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size, 1.30 - false /* bot_updates */); 1.31 + HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size, 1.32 + false /* bot_updates */); 1.33 if (result == NULL) { 1.34 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 1.35 - result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size, 1.36 - false /* bot_updates */); 1.37 + result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size, 1.38 + false /* bot_updates */); 1.39 } 1.40 if (result != NULL) { 1.41 dirty_young_block(result, word_size); 1.42 @@ -130,16 +132,17 @@ 1.43 return result; 1.44 } 1.45 1.46 -inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) { 1.47 +inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size, 1.48 + AllocationContext_t context) { 1.49 assert(!isHumongous(word_size), 1.50 "we should not be seeing humongous-size allocations in this path"); 1.51 1.52 - HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size, 1.53 - true /* bot_updates */); 1.54 + HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size, 1.55 + true /* bot_updates */); 1.56 if (result == NULL) { 1.57 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 1.58 - result = _old_gc_alloc_region.attempt_allocation_locked(word_size, 1.59 - true /* bot_updates */); 1.60 + result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size, 1.61 + true /* bot_updates */); 1.62 } 1.63 return result; 1.64 }