1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Aug 03 13:24:02 2012 -0700 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Aug 06 12:20:14 2012 -0700 1.3 @@ -33,7 +33,7 @@ 1.4 #include "gc_implementation/g1/heapRegionSeq.hpp" 1.5 #include "gc_implementation/g1/heapRegionSets.hpp" 1.6 #include "gc_implementation/shared/hSpaceCounters.hpp" 1.7 -#include "gc_implementation/parNew/parGCAllocBuffer.hpp" 1.8 +#include "gc_implementation/shared/parGCAllocBuffer.hpp" 1.9 #include "memory/barrierSet.hpp" 1.10 #include "memory/memRegion.hpp" 1.11 #include "memory/sharedHeap.hpp" 1.12 @@ -278,10 +278,33 @@ 1.13 // survivor objects. 1.14 SurvivorGCAllocRegion _survivor_gc_alloc_region; 1.15 1.16 + // PLAB sizing policy for survivors. 1.17 + PLABStats _survivor_plab_stats; 1.18 + 1.19 // Alloc region used to satisfy allocation requests by the GC for 1.20 // old objects. 1.21 OldGCAllocRegion _old_gc_alloc_region; 1.22 1.23 + // PLAB sizing policy for tenured objects. 1.24 + PLABStats _old_plab_stats; 1.25 + 1.26 + PLABStats* stats_for_purpose(GCAllocPurpose purpose) { 1.27 + PLABStats* stats = NULL; 1.28 + 1.29 + switch (purpose) { 1.30 + case GCAllocForSurvived: 1.31 + stats = &_survivor_plab_stats; 1.32 + break; 1.33 + case GCAllocForTenured: 1.34 + stats = &_old_plab_stats; 1.35 + break; 1.36 + default: 1.37 + assert(false, "unrecognized GCAllocPurpose"); 1.38 + } 1.39 + 1.40 + return stats; 1.41 + } 1.42 + 1.43 // The last old region we allocated to during the last GC. 1.44 // Typically, it is not full so we should re-use it during the next GC. 1.45 HeapRegion* _retained_old_gc_alloc_region; 1.46 @@ -314,7 +337,7 @@ 1.47 G1MonitoringSupport* _g1mm; 1.48 1.49 // Determines PLAB size for a particular allocation purpose. 1.50 - static size_t desired_plab_sz(GCAllocPurpose purpose); 1.51 + size_t desired_plab_sz(GCAllocPurpose purpose); 1.52 1.53 // Outside of GC pauses, the number of bytes used in all regions other 1.54 // than the current allocation region. 1.55 @@ -1811,19 +1834,19 @@ 1.56 } 1.57 1.58 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 1.59 - 1.60 HeapWord* obj = NULL; 1.61 size_t gclab_word_size = _g1h->desired_plab_sz(purpose); 1.62 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 1.63 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); 1.64 - assert(gclab_word_size == alloc_buf->word_sz(), 1.65 - "dynamic resizing is not supported"); 1.66 add_to_alloc_buffer_waste(alloc_buf->words_remaining()); 1.67 - alloc_buf->retire(false, false); 1.68 + alloc_buf->flush_stats_and_retire(_g1h->stats_for_purpose(purpose), 1.69 + false /* end_of_gc */, 1.70 + false /* retain */); 1.71 1.72 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); 1.73 if (buf == NULL) return NULL; // Let caller handle allocation failure. 1.74 // Otherwise. 1.75 + alloc_buf->set_word_size(gclab_word_size); 1.76 alloc_buf->set_buf(buf); 1.77 1.78 obj = alloc_buf->allocate(word_sz); 1.79 @@ -1908,7 +1931,9 @@ 1.80 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 1.81 size_t waste = _alloc_buffers[ap]->words_remaining(); 1.82 add_to_alloc_buffer_waste(waste); 1.83 - _alloc_buffers[ap]->retire(true, false); 1.84 + _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap), 1.85 + true /* end_of_gc */, 1.86 + false /* retain */); 1.87 } 1.88 } 1.89