1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Aug 11 11:36:29 2011 -0700 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Aug 12 11:31:06 2011 -0400 1.3 @@ -77,6 +77,38 @@ 1.4 return result; 1.5 } 1.6 1.7 +inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t 1.8 + word_size) { 1.9 + assert(!isHumongous(word_size), 1.10 + "we should not be seeing humongous-size allocations in this path"); 1.11 + 1.12 + HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size, 1.13 + false /* bot_updates */); 1.14 + if (result == NULL) { 1.15 + MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 1.16 + result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size, 1.17 + false /* bot_updates */); 1.18 + } 1.19 + if (result != NULL) { 1.20 + dirty_young_block(result, word_size); 1.21 + } 1.22 + return result; 1.23 +} 1.24 + 1.25 +inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) { 1.26 + assert(!isHumongous(word_size), 1.27 + "we should not be seeing humongous-size allocations in this path"); 1.28 + 1.29 + HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size, 1.30 + true /* bot_updates */); 1.31 + if (result == NULL) { 1.32 + MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 1.33 + result = _old_gc_alloc_region.attempt_allocation_locked(word_size, 1.34 + true /* bot_updates */); 1.35 + } 1.36 + return result; 1.37 +} 1.38 + 1.39 // It dirties the cards that cover the block so that so that the post 1.40 // write barrier never queues anything when updating objects on this 1.41 // block. It is assumed (and in fact we assert) that the block