1.1 --- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Tue Nov 22 04:47:10 2011 -0500 1.2 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Tue Aug 09 10:16:01 2011 -0700 1.3 @@ -255,7 +255,18 @@ 1.4 CollectionSetChooser:: 1.5 prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) { 1.6 _first_par_unreserved_idx = 0; 1.7 - size_t max_waste = ParallelGCThreads * chunkSize; 1.8 + int n_threads = ParallelGCThreads; 1.9 + if (UseDynamicNumberOfGCThreads) { 1.10 + assert(G1CollectedHeap::heap()->workers()->active_workers() > 0, 1.11 + "Should have been set earlier"); 1.12 + // This is defensive code. As the assertion above says, the number 1.13 + // of active threads should be > 0, but in case there is some path 1.14 + // or some improperly initialized variable with leads to no 1.15 + // active threads, protect against that in a product build. 1.16 + n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), 1.17 + 1); 1.18 + } 1.19 + size_t max_waste = n_threads * chunkSize; 1.20 // it should be aligned with respect to chunkSize 1.21 size_t aligned_n_regions = 1.22 (n_regions + (chunkSize - 1)) / chunkSize * chunkSize; 1.23 @@ -265,6 +276,11 @@ 1.24 1.25 jint 1.26 CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { 1.27 + // Don't do this assert because this can be called at a point 1.28 + // where the loop up stream will not execute again but might 1.29 + // try to claim more chunks (loop test has not been done yet). 1.30 + // assert(_markedRegions.length() > _first_par_unreserved_idx, 1.31 + // "Striding beyond the marked regions"); 1.32 jint res = Atomic::add(n_regions, &_first_par_unreserved_idx); 1.33 assert(_markedRegions.length() > res + n_regions - 1, 1.34 "Should already have been expanded");