Merge

Fri, 17 Aug 2012 15:41:04 -0700

author
amurillo
date
Fri, 17 Aug 2012 15:41:04 -0700
changeset 3984
3958f0acde31
parent 3981
d5ec46c7da5c
parent 3983
eff5d59db7e1
child 3996
b63c0564035a
child 3997
f99a36499b8c

Merge

make/jprt.properties file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/make/jprt.properties	Wed Aug 15 16:49:38 2012 -0700
     1.2 +++ b/make/jprt.properties	Fri Aug 17 15:41:04 2012 -0700
     1.3 @@ -54,77 +54,77 @@
     1.4  # Define the Solaris platforms we want for the various releases
     1.5  jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
     1.6  jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
     1.7 -jprt.my.solaris.sparc.jdk7u6=${jprt.my.solaris.sparc.jdk7}
     1.8 +jprt.my.solaris.sparc.jdk7u8=${jprt.my.solaris.sparc.jdk7}
     1.9  jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
    1.10  
    1.11  jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
    1.12  jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
    1.13 -jprt.my.solaris.sparcv9.jdk7u6=${jprt.my.solaris.sparcv9.jdk7}
    1.14 +jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7}
    1.15  jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
    1.16  
    1.17  jprt.my.solaris.i586.jdk8=solaris_i586_5.10
    1.18  jprt.my.solaris.i586.jdk7=solaris_i586_5.10
    1.19 -jprt.my.solaris.i586.jdk7u6=${jprt.my.solaris.i586.jdk7}
    1.20 +jprt.my.solaris.i586.jdk7u8=${jprt.my.solaris.i586.jdk7}
    1.21  jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
    1.22  
    1.23  jprt.my.solaris.x64.jdk8=solaris_x64_5.10
    1.24  jprt.my.solaris.x64.jdk7=solaris_x64_5.10
    1.25 -jprt.my.solaris.x64.jdk7u6=${jprt.my.solaris.x64.jdk7}
    1.26 +jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7}
    1.27  jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
    1.28  
    1.29  jprt.my.linux.i586.jdk8=linux_i586_2.6
    1.30  jprt.my.linux.i586.jdk7=linux_i586_2.6
    1.31 -jprt.my.linux.i586.jdk7u6=${jprt.my.linux.i586.jdk7}
    1.32 +jprt.my.linux.i586.jdk7u8=${jprt.my.linux.i586.jdk7}
    1.33  jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
    1.34  
    1.35  jprt.my.linux.x64.jdk8=linux_x64_2.6
    1.36  jprt.my.linux.x64.jdk7=linux_x64_2.6
    1.37 -jprt.my.linux.x64.jdk7u6=${jprt.my.linux.x64.jdk7}
    1.38 +jprt.my.linux.x64.jdk7u8=${jprt.my.linux.x64.jdk7}
    1.39  jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
    1.40  
    1.41  jprt.my.linux.ppc.jdk8=linux_ppc_2.6
    1.42  jprt.my.linux.ppc.jdk7=linux_ppc_2.6
    1.43 -jprt.my.linux.ppc.jdk7u6=${jprt.my.linux.ppc.jdk7}
    1.44 +jprt.my.linux.ppc.jdk7u8=${jprt.my.linux.ppc.jdk7}
    1.45  jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
    1.46  
    1.47  jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
    1.48  jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
    1.49 -jprt.my.linux.ppcv2.jdk7u6=${jprt.my.linux.ppcv2.jdk7}
    1.50 +jprt.my.linux.ppcv2.jdk7u8=${jprt.my.linux.ppcv2.jdk7}
    1.51  jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
    1.52  
    1.53  jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
    1.54  jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
    1.55 -jprt.my.linux.ppcsflt.jdk7u6=${jprt.my.linux.ppcsflt.jdk7}
    1.56 +jprt.my.linux.ppcsflt.jdk7u8=${jprt.my.linux.ppcsflt.jdk7}
    1.57  jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
    1.58  
    1.59  jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
    1.60  jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
    1.61 -jprt.my.linux.armvfp.jdk7u6=${jprt.my.linux.armvfp.jdk7}
    1.62 +jprt.my.linux.armvfp.jdk7u8=${jprt.my.linux.armvfp.jdk7}
    1.63  jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
    1.64  
    1.65  jprt.my.linux.armv6.jdk8=linux_armv6_2.6
    1.66  jprt.my.linux.armv6.jdk7=linux_armv6_2.6
    1.67 -jprt.my.linux.armv6.jdk7u6=${jprt.my.linux.armv6.jdk7}
    1.68 +jprt.my.linux.armv6.jdk7u8=${jprt.my.linux.armv6.jdk7}
    1.69  jprt.my.linux.armv6=${jprt.my.linux.armv6.${jprt.tools.default.release}}
    1.70  
    1.71  jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
    1.72  jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
    1.73 -jprt.my.linux.armsflt.jdk7u6=${jprt.my.linux.armsflt.jdk7}
    1.74 +jprt.my.linux.armsflt.jdk7u8=${jprt.my.linux.armsflt.jdk7}
    1.75  jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
    1.76  
    1.77  jprt.my.macosx.x64.jdk8=macosx_x64_10.7
    1.78  jprt.my.macosx.x64.jdk7=macosx_x64_10.7
    1.79 -jprt.my.macosx.x64.jdk7u6=${jprt.my.macosx.x64.jdk7}
    1.80 +jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
    1.81  jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
    1.82  
    1.83  jprt.my.windows.i586.jdk8=windows_i586_5.1
    1.84  jprt.my.windows.i586.jdk7=windows_i586_5.1
    1.85 -jprt.my.windows.i586.jdk7u6=${jprt.my.windows.i586.jdk7}
    1.86 +jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
    1.87  jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
    1.88  
    1.89  jprt.my.windows.x64.jdk8=windows_x64_5.2
    1.90  jprt.my.windows.x64.jdk7=windows_x64_5.2
    1.91 -jprt.my.windows.x64.jdk7u6=${jprt.my.windows.x64.jdk7}
    1.92 +jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
    1.93  jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
    1.94  
    1.95  # Standard list of jprt build targets for this source tree
    1.96 @@ -159,7 +159,7 @@
    1.97  
    1.98  jprt.build.targets.jdk8=${jprt.build.targets.all}
    1.99  jprt.build.targets.jdk7=${jprt.build.targets.all}
   1.100 -jprt.build.targets.jdk7u6=${jprt.build.targets.all}
   1.101 +jprt.build.targets.jdk7u8=${jprt.build.targets.all}
   1.102  jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
   1.103  
   1.104  # Subset lists of test targets for this source tree
   1.105 @@ -452,7 +452,7 @@
   1.106  
   1.107  jprt.test.targets.jdk8=${jprt.test.targets.standard}
   1.108  jprt.test.targets.jdk7=${jprt.test.targets.standard}
   1.109 -jprt.test.targets.jdk7u6=${jprt.test.targets.jdk7}
   1.110 +jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7}
   1.111  jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
   1.112  
   1.113  # The default test/Makefile targets that should be run
   1.114 @@ -512,7 +512,7 @@
   1.115  
   1.116  jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
   1.117  jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
   1.118 -jprt.make.rule.test.targets.jdk7u6=${jprt.make.rule.test.targets.jdk7}
   1.119 +jprt.make.rule.test.targets.jdk7u8=${jprt.make.rule.test.targets.jdk7}
   1.120  jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
   1.121  
   1.122  # 7155453: Work-around to prevent popups on OSX from blocking test completion
     2.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Aug 15 16:49:38 2012 -0700
     2.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 17 15:41:04 2012 -0700
     2.3 @@ -1891,6 +1891,8 @@
     2.4    _young_list(new YoungList(this)),
     2.5    _gc_time_stamp(0),
     2.6    _retained_old_gc_alloc_region(NULL),
     2.7 +  _survivor_plab_stats(YoungPLABSize, PLABWeight),
     2.8 +  _old_plab_stats(OldPLABSize, PLABWeight),
     2.9    _expand_heap_after_alloc_failure(true),
    2.10    _surviving_young_words(NULL),
    2.11    _old_marking_cycles_started(0),
    2.12 @@ -4099,17 +4101,22 @@
    2.13    size_t gclab_word_size;
    2.14    switch (purpose) {
    2.15      case GCAllocForSurvived:
    2.16 -      gclab_word_size = YoungPLABSize;
    2.17 +      gclab_word_size = _survivor_plab_stats.desired_plab_sz();
    2.18        break;
    2.19      case GCAllocForTenured:
    2.20 -      gclab_word_size = OldPLABSize;
    2.21 +      gclab_word_size = _old_plab_stats.desired_plab_sz();
    2.22        break;
    2.23      default:
    2.24        assert(false, "unknown GCAllocPurpose");
    2.25 -      gclab_word_size = OldPLABSize;
    2.26 +      gclab_word_size = _old_plab_stats.desired_plab_sz();
    2.27        break;
    2.28    }
    2.29 -  return gclab_word_size;
    2.30 +
    2.31 +  // Prevent humongous PLAB sizes for two reasons:
    2.32 +  // * PLABs are allocated using a similar paths as oops, but should
    2.33 +  //   never be in a humongous region
    2.34 +  // * Allowing humongous PLABs needlessly churns the region free lists
    2.35 +  return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
    2.36  }
    2.37  
    2.38  void G1CollectedHeap::init_mutator_alloc_region() {
    2.39 @@ -4165,6 +4172,11 @@
    2.40    // want either way so no reason to check explicitly for either
    2.41    // condition.
    2.42    _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
    2.43 +
    2.44 +  if (ResizePLAB) {
    2.45 +    _survivor_plab_stats.adjust_desired_plab_sz();
    2.46 +    _old_plab_stats.adjust_desired_plab_sz();
    2.47 +  }
    2.48  }
    2.49  
    2.50  void G1CollectedHeap::abandon_gc_alloc_regions() {
     3.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Aug 15 16:49:38 2012 -0700
     3.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Aug 17 15:41:04 2012 -0700
     3.3 @@ -33,7 +33,7 @@
     3.4  #include "gc_implementation/g1/heapRegionSeq.hpp"
     3.5  #include "gc_implementation/g1/heapRegionSets.hpp"
     3.6  #include "gc_implementation/shared/hSpaceCounters.hpp"
     3.7 -#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
     3.8 +#include "gc_implementation/shared/parGCAllocBuffer.hpp"
     3.9  #include "memory/barrierSet.hpp"
    3.10  #include "memory/memRegion.hpp"
    3.11  #include "memory/sharedHeap.hpp"
    3.12 @@ -278,10 +278,33 @@
    3.13    // survivor objects.
    3.14    SurvivorGCAllocRegion _survivor_gc_alloc_region;
    3.15  
    3.16 +  // PLAB sizing policy for survivors.
    3.17 +  PLABStats _survivor_plab_stats;
    3.18 +
    3.19    // Alloc region used to satisfy allocation requests by the GC for
    3.20    // old objects.
    3.21    OldGCAllocRegion _old_gc_alloc_region;
    3.22  
    3.23 +  // PLAB sizing policy for tenured objects.
    3.24 +  PLABStats _old_plab_stats;
    3.25 +
    3.26 +  PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
    3.27 +    PLABStats* stats = NULL;
    3.28 +
    3.29 +    switch (purpose) {
    3.30 +    case GCAllocForSurvived:
    3.31 +      stats = &_survivor_plab_stats;
    3.32 +      break;
    3.33 +    case GCAllocForTenured:
    3.34 +      stats = &_old_plab_stats;
    3.35 +      break;
    3.36 +    default:
    3.37 +      assert(false, "unrecognized GCAllocPurpose");
    3.38 +    }
    3.39 +
    3.40 +    return stats;
    3.41 +  }
    3.42 +
    3.43    // The last old region we allocated to during the last GC.
    3.44    // Typically, it is not full so we should re-use it during the next GC.
    3.45    HeapRegion* _retained_old_gc_alloc_region;
    3.46 @@ -314,7 +337,7 @@
    3.47    G1MonitoringSupport* _g1mm;
    3.48  
    3.49    // Determines PLAB size for a particular allocation purpose.
    3.50 -  static size_t desired_plab_sz(GCAllocPurpose purpose);
    3.51 +  size_t desired_plab_sz(GCAllocPurpose purpose);
    3.52  
    3.53    // Outside of GC pauses, the number of bytes used in all regions other
    3.54    // than the current allocation region.
    3.55 @@ -1811,19 +1834,19 @@
    3.56    }
    3.57  
    3.58    HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
    3.59 -
    3.60      HeapWord* obj = NULL;
    3.61      size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
    3.62      if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
    3.63        G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
    3.64 -      assert(gclab_word_size == alloc_buf->word_sz(),
    3.65 -             "dynamic resizing is not supported");
    3.66        add_to_alloc_buffer_waste(alloc_buf->words_remaining());
    3.67 -      alloc_buf->retire(false, false);
    3.68 +      alloc_buf->flush_stats_and_retire(_g1h->stats_for_purpose(purpose),
    3.69 +                                        false /* end_of_gc */,
    3.70 +                                        false /* retain */);
    3.71  
    3.72        HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
    3.73        if (buf == NULL) return NULL; // Let caller handle allocation failure.
    3.74        // Otherwise.
    3.75 +      alloc_buf->set_word_size(gclab_word_size);
    3.76        alloc_buf->set_buf(buf);
    3.77  
    3.78        obj = alloc_buf->allocate(word_sz);
    3.79 @@ -1908,7 +1931,9 @@
    3.80      for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
    3.81        size_t waste = _alloc_buffers[ap]->words_remaining();
    3.82        add_to_alloc_buffer_waste(waste);
    3.83 -      _alloc_buffers[ap]->retire(true, false);
    3.84 +      _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
    3.85 +                                                 true /* end_of_gc */,
    3.86 +                                                 false /* retain */);
    3.87      }
    3.88    }
    3.89  
     4.1 --- a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp	Wed Aug 15 16:49:38 2012 -0700
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,344 +0,0 @@
     4.4 -/*
     4.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     4.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4.7 - *
     4.8 - * This code is free software; you can redistribute it and/or modify it
     4.9 - * under the terms of the GNU General Public License version 2 only, as
    4.10 - * published by the Free Software Foundation.
    4.11 - *
    4.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
    4.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    4.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    4.15 - * version 2 for more details (a copy is included in the LICENSE file that
    4.16 - * accompanied this code).
    4.17 - *
    4.18 - * You should have received a copy of the GNU General Public License version
    4.19 - * 2 along with this work; if not, write to the Free Software Foundation,
    4.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    4.21 - *
    4.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    4.23 - * or visit www.oracle.com if you need additional information or have any
    4.24 - * questions.
    4.25 - *
    4.26 - */
    4.27 -
    4.28 -#include "precompiled.hpp"
    4.29 -#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
    4.30 -#include "memory/sharedHeap.hpp"
    4.31 -#include "oops/arrayOop.hpp"
    4.32 -#include "oops/oop.inline.hpp"
    4.33 -
    4.34 -ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
    4.35 -  _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
    4.36 -  _end(NULL), _hard_end(NULL),
    4.37 -  _retained(false), _retained_filler(),
    4.38 -  _allocated(0), _wasted(0)
    4.39 -{
    4.40 -  assert (min_size() > AlignmentReserve, "Inconsistency!");
    4.41 -  // arrayOopDesc::header_size depends on command line initialization.
    4.42 -  FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
    4.43 -  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
    4.44 -}
    4.45 -
    4.46 -size_t ParGCAllocBuffer::FillerHeaderSize;
    4.47 -
    4.48 -// If the minimum object size is greater than MinObjAlignment, we can
    4.49 -// end up with a shard at the end of the buffer that's smaller than
    4.50 -// the smallest object.  We can't allow that because the buffer must
    4.51 -// look like it's full of objects when we retire it, so we make
    4.52 -// sure we have enough space for a filler int array object.
    4.53 -size_t ParGCAllocBuffer::AlignmentReserve;
    4.54 -
    4.55 -void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
    4.56 -  assert(!retain || end_of_gc, "Can only retain at GC end.");
    4.57 -  if (_retained) {
    4.58 -    // If the buffer had been retained shorten the previous filler object.
    4.59 -    assert(_retained_filler.end() <= _top, "INVARIANT");
    4.60 -    CollectedHeap::fill_with_object(_retained_filler);
    4.61 -    // Wasted space book-keeping, otherwise (normally) done in invalidate()
    4.62 -    _wasted += _retained_filler.word_size();
    4.63 -    _retained = false;
    4.64 -  }
    4.65 -  assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
    4.66 -  if (_top < _hard_end) {
    4.67 -    CollectedHeap::fill_with_object(_top, _hard_end);
    4.68 -    if (!retain) {
    4.69 -      invalidate();
    4.70 -    } else {
    4.71 -      // Is there wasted space we'd like to retain for the next GC?
    4.72 -      if (pointer_delta(_end, _top) > FillerHeaderSize) {
    4.73 -        _retained = true;
    4.74 -        _retained_filler = MemRegion(_top, FillerHeaderSize);
    4.75 -        _top = _top + FillerHeaderSize;
    4.76 -      } else {
    4.77 -        invalidate();
    4.78 -      }
    4.79 -    }
    4.80 -  }
    4.81 -}
    4.82 -
    4.83 -void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
    4.84 -  assert(ResizePLAB, "Wasted work");
    4.85 -  stats->add_allocated(_allocated);
    4.86 -  stats->add_wasted(_wasted);
    4.87 -  stats->add_unused(pointer_delta(_end, _top));
    4.88 -}
    4.89 -
    4.90 -// Compute desired plab size and latch result for later
    4.91 -// use. This should be called once at the end of parallel
    4.92 -// scavenge; it clears the sensor accumulators.
    4.93 -void PLABStats::adjust_desired_plab_sz() {
    4.94 -  assert(ResizePLAB, "Not set");
    4.95 -  if (_allocated == 0) {
    4.96 -    assert(_unused == 0, "Inconsistency in PLAB stats");
    4.97 -    _allocated = 1;
    4.98 -  }
    4.99 -  double wasted_frac    = (double)_unused/(double)_allocated;
   4.100 -  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
   4.101 -                                   TargetPLABWastePct);
   4.102 -  if (target_refills == 0) {
   4.103 -    target_refills = 1;
   4.104 -  }
   4.105 -  _used = _allocated - _wasted - _unused;
   4.106 -  size_t plab_sz = _used/(target_refills*ParallelGCThreads);
   4.107 -  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
   4.108 -  // Take historical weighted average
   4.109 -  _filter.sample(plab_sz);
   4.110 -  // Clip from above and below, and align to object boundary
   4.111 -  plab_sz = MAX2(min_size(), (size_t)_filter.average());
   4.112 -  plab_sz = MIN2(max_size(), plab_sz);
   4.113 -  plab_sz = align_object_size(plab_sz);
   4.114 -  // Latch the result
   4.115 -  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
   4.116 -  if (ResizePLAB) {
   4.117 -    _desired_plab_sz = plab_sz;
   4.118 -  }
   4.119 -  // Now clear the accumulators for next round:
   4.120 -  // note this needs to be fixed in the case where we
   4.121 -  // are retaining across scavenges. FIX ME !!! XXX
   4.122 -  _allocated = 0;
   4.123 -  _wasted    = 0;
   4.124 -  _unused    = 0;
   4.125 -}
   4.126 -
   4.127 -#ifndef PRODUCT
   4.128 -void ParGCAllocBuffer::print() {
   4.129 -  gclog_or_tty->print("parGCAllocBuffer: _bottom: %p  _top: %p  _end: %p  _hard_end: %p"
   4.130 -             "_retained: %c _retained_filler: [%p,%p)\n",
   4.131 -             _bottom, _top, _end, _hard_end,
   4.132 -             "FT"[_retained], _retained_filler.start(), _retained_filler.end());
   4.133 -}
   4.134 -#endif // !PRODUCT
   4.135 -
   4.136 -const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
   4.137 -MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
   4.138 -     ((size_t)Generation::GenGrain)/HeapWordSize);
   4.139 -const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
   4.140 -MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
   4.141 -     (size_t)Generation::GenGrain);
   4.142 -
   4.143 -ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
   4.144 -                                                 BlockOffsetSharedArray* bsa) :
   4.145 -  ParGCAllocBuffer(word_sz),
   4.146 -  _bsa(bsa),
   4.147 -  _bt(bsa, MemRegion(_bottom, _hard_end)),
   4.148 -  _true_end(_hard_end)
   4.149 -{}
   4.150 -
   4.151 -// The buffer comes with its own BOT, with a shared (obviously) underlying
   4.152 -// BlockOffsetSharedArray. We manipulate this BOT in the normal way
   4.153 -// as we would for any contiguous space. However, on accasion we
   4.154 -// need to do some buffer surgery at the extremities before we
   4.155 -// start using the body of the buffer for allocations. Such surgery
   4.156 -// (as explained elsewhere) is to prevent allocation on a card that
   4.157 -// is in the process of being walked concurrently by another GC thread.
   4.158 -// When such surgery happens at a point that is far removed (to the
   4.159 -// right of the current allocation point, top), we use the "contig"
   4.160 -// parameter below to directly manipulate the shared array without
   4.161 -// modifying the _next_threshold state in the BOT.
   4.162 -void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
   4.163 -                                                     bool contig) {
   4.164 -  CollectedHeap::fill_with_object(mr);
   4.165 -  if (contig) {
   4.166 -    _bt.alloc_block(mr.start(), mr.end());
   4.167 -  } else {
   4.168 -    _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
   4.169 -  }
   4.170 -}
   4.171 -
   4.172 -HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
   4.173 -  HeapWord* res = NULL;
   4.174 -  if (_true_end > _hard_end) {
   4.175 -    assert((HeapWord*)align_size_down(intptr_t(_hard_end),
   4.176 -                                      ChunkSizeInBytes) == _hard_end,
   4.177 -           "or else _true_end should be equal to _hard_end");
   4.178 -    assert(_retained, "or else _true_end should be equal to _hard_end");
   4.179 -    assert(_retained_filler.end() <= _top, "INVARIANT");
   4.180 -    CollectedHeap::fill_with_object(_retained_filler);
   4.181 -    if (_top < _hard_end) {
   4.182 -      fill_region_with_block(MemRegion(_top, _hard_end), true);
   4.183 -    }
   4.184 -    HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
   4.185 -    _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
   4.186 -    _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
   4.187 -    _top      = _retained_filler.end();
   4.188 -    _hard_end = next_hard_end;
   4.189 -    _end      = _hard_end - AlignmentReserve;
   4.190 -    res       = ParGCAllocBuffer::allocate(word_sz);
   4.191 -    if (res != NULL) {
   4.192 -      _bt.alloc_block(res, word_sz);
   4.193 -    }
   4.194 -  }
   4.195 -  return res;
   4.196 -}
   4.197 -
   4.198 -void
   4.199 -ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
   4.200 -  ParGCAllocBuffer::undo_allocation(obj, word_sz);
   4.201 -  // This may back us up beyond the previous threshold, so reset.
   4.202 -  _bt.set_region(MemRegion(_top, _hard_end));
   4.203 -  _bt.initialize_threshold();
   4.204 -}
   4.205 -
   4.206 -void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
   4.207 -  assert(!retain || end_of_gc, "Can only retain at GC end.");
   4.208 -  if (_retained) {
   4.209 -    // We're about to make the retained_filler into a block.
   4.210 -    _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
   4.211 -                                      _retained_filler.end());
   4.212 -  }
   4.213 -  // Reset _hard_end to _true_end (and update _end)
   4.214 -  if (retain && _hard_end != NULL) {
   4.215 -    assert(_hard_end <= _true_end, "Invariant.");
   4.216 -    _hard_end = _true_end;
   4.217 -    _end      = MAX2(_top, _hard_end - AlignmentReserve);
   4.218 -    assert(_end <= _hard_end, "Invariant.");
   4.219 -  }
   4.220 -  _true_end = _hard_end;
   4.221 -  HeapWord* pre_top = _top;
   4.222 -
   4.223 -  ParGCAllocBuffer::retire(end_of_gc, retain);
   4.224 -  // Now any old _retained_filler is cut back to size, the free part is
   4.225 -  // filled with a filler object, and top is past the header of that
   4.226 -  // object.
   4.227 -
   4.228 -  if (retain && _top < _end) {
   4.229 -    assert(end_of_gc && retain, "Or else retain should be false.");
   4.230 -    // If the lab does not start on a card boundary, we don't want to
   4.231 -    // allocate onto that card, since that might lead to concurrent
   4.232 -    // allocation and card scanning, which we don't support.  So we fill
   4.233 -    // the first card with a garbage object.
   4.234 -    size_t first_card_index = _bsa->index_for(pre_top);
   4.235 -    HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
   4.236 -    if (first_card_start < pre_top) {
   4.237 -      HeapWord* second_card_start =
   4.238 -        _bsa->inc_by_region_size(first_card_start);
   4.239 -
   4.240 -      // Ensure enough room to fill with the smallest block
   4.241 -      second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
   4.242 -
   4.243 -      // If the end is already in the first card, don't go beyond it!
   4.244 -      // Or if the remainder is too small for a filler object, gobble it up.
   4.245 -      if (_hard_end < second_card_start ||
   4.246 -          pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
   4.247 -        second_card_start = _hard_end;
   4.248 -      }
   4.249 -      if (pre_top < second_card_start) {
   4.250 -        MemRegion first_card_suffix(pre_top, second_card_start);
   4.251 -        fill_region_with_block(first_card_suffix, true);
   4.252 -      }
   4.253 -      pre_top = second_card_start;
   4.254 -      _top = pre_top;
   4.255 -      _end = MAX2(_top, _hard_end - AlignmentReserve);
   4.256 -    }
   4.257 -
   4.258 -    // If the lab does not end on a card boundary, we don't want to
   4.259 -    // allocate onto that card, since that might lead to concurrent
   4.260 -    // allocation and card scanning, which we don't support.  So we fill
   4.261 -    // the last card with a garbage object.
   4.262 -    size_t last_card_index = _bsa->index_for(_hard_end);
   4.263 -    HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
   4.264 -    if (last_card_start < _hard_end) {
   4.265 -
   4.266 -      // Ensure enough room to fill with the smallest block
   4.267 -      last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
   4.268 -
   4.269 -      // If the top is already in the last card, don't go back beyond it!
   4.270 -      // Or if the remainder is too small for a filler object, gobble it up.
   4.271 -      if (_top > last_card_start ||
   4.272 -          pointer_delta(last_card_start, _top) < AlignmentReserve) {
   4.273 -        last_card_start = _top;
   4.274 -      }
   4.275 -      if (last_card_start < _hard_end) {
   4.276 -        MemRegion last_card_prefix(last_card_start, _hard_end);
   4.277 -        fill_region_with_block(last_card_prefix, false);
   4.278 -      }
   4.279 -      _hard_end = last_card_start;
   4.280 -      _end      = MAX2(_top, _hard_end - AlignmentReserve);
   4.281 -      _true_end = _hard_end;
   4.282 -      assert(_end <= _hard_end, "Invariant.");
   4.283 -    }
   4.284 -
   4.285 -    // At this point:
   4.286 -    //   1) we had a filler object from the original top to hard_end.
   4.287 -    //   2) We've filled in any partial cards at the front and back.
   4.288 -    if (pre_top < _hard_end) {
   4.289 -      // Now we can reset the _bt to do allocation in the given area.
   4.290 -      MemRegion new_filler(pre_top, _hard_end);
   4.291 -      fill_region_with_block(new_filler, false);
   4.292 -      _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
   4.293 -      // If there's no space left, don't retain.
   4.294 -      if (_top >= _end) {
   4.295 -        _retained = false;
   4.296 -        invalidate();
   4.297 -        return;
   4.298 -      }
   4.299 -      _retained_filler = MemRegion(pre_top, _top);
   4.300 -      _bt.set_region(MemRegion(_top, _hard_end));
   4.301 -      _bt.initialize_threshold();
   4.302 -      assert(_bt.threshold() > _top, "initialize_threshold failed!");
   4.303 -
   4.304 -      // There may be other reasons for queries into the middle of the
   4.305 -      // filler object.  When such queries are done in parallel with
   4.306 -      // allocation, bad things can happen, if the query involves object
   4.307 -      // iteration.  So we ensure that such queries do not involve object
   4.308 -      // iteration, by putting another filler object on the boundaries of
   4.309 -      // such queries.  One such is the object spanning a parallel card
   4.310 -      // chunk boundary.
   4.311 -
   4.312 -      // "chunk_boundary" is the address of the first chunk boundary less
   4.313 -      // than "hard_end".
   4.314 -      HeapWord* chunk_boundary =
   4.315 -        (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
   4.316 -      assert(chunk_boundary < _hard_end, "Or else above did not work.");
   4.317 -      assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
   4.318 -             "Consequence of last card handling above.");
   4.319 -
   4.320 -      if (_top <= chunk_boundary) {
   4.321 -        assert(_true_end == _hard_end, "Invariant.");
   4.322 -        while (_top <= chunk_boundary) {
   4.323 -          assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
   4.324 -                 "Consequence of last card handling above.");
   4.325 -          _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
   4.326 -          CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
   4.327 -          _hard_end = chunk_boundary;
   4.328 -          chunk_boundary -= ChunkSizeInWords;
   4.329 -        }
   4.330 -        _end = _hard_end - AlignmentReserve;
   4.331 -        assert(_top <= _end, "Invariant.");
   4.332 -        // Now reset the initial filler chunk so it doesn't overlap with
   4.333 -        // the one(s) inserted above.
   4.334 -        MemRegion new_filler(pre_top, _hard_end);
   4.335 -        fill_region_with_block(new_filler, false);
   4.336 -      }
   4.337 -    } else {
   4.338 -      _retained = false;
   4.339 -      invalidate();
   4.340 -    }
   4.341 -  } else {
   4.342 -    assert(!end_of_gc ||
   4.343 -           (!_retained && _true_end == _hard_end), "Checking.");
   4.344 -  }
   4.345 -  assert(_end <= _hard_end, "Invariant.");
   4.346 -  assert(_top < _end || _top == _hard_end, "Invariant");
   4.347 -}
     5.1 --- a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp	Wed Aug 15 16:49:38 2012 -0700
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,249 +0,0 @@
     5.4 -/*
     5.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     5.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5.7 - *
     5.8 - * This code is free software; you can redistribute it and/or modify it
     5.9 - * under the terms of the GNU General Public License version 2 only, as
    5.10 - * published by the Free Software Foundation.
    5.11 - *
    5.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
    5.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    5.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    5.15 - * version 2 for more details (a copy is included in the LICENSE file that
    5.16 - * accompanied this code).
    5.17 - *
    5.18 - * You should have received a copy of the GNU General Public License version
    5.19 - * 2 along with this work; if not, write to the Free Software Foundation,
    5.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    5.21 - *
    5.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    5.23 - * or visit www.oracle.com if you need additional information or have any
    5.24 - * questions.
    5.25 - *
    5.26 - */
    5.27 -
    5.28 -#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
    5.29 -#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
    5.30 -
    5.31 -#include "memory/allocation.hpp"
    5.32 -#include "memory/blockOffsetTable.hpp"
    5.33 -#include "memory/threadLocalAllocBuffer.hpp"
    5.34 -#include "utilities/globalDefinitions.hpp"
    5.35 -
    5.36 -// Forward decl.
    5.37 -
    5.38 -class PLABStats;
    5.39 -
    5.40 -// A per-thread allocation buffer used during GC.
    5.41 -class ParGCAllocBuffer: public CHeapObj<mtGC> {
    5.42 -protected:
    5.43 -  char head[32];
    5.44 -  size_t _word_sz;          // in HeapWord units
    5.45 -  HeapWord* _bottom;
    5.46 -  HeapWord* _top;
    5.47 -  HeapWord* _end;       // last allocatable address + 1
    5.48 -  HeapWord* _hard_end;  // _end + AlignmentReserve
    5.49 -  bool      _retained;  // whether we hold a _retained_filler
    5.50 -  MemRegion _retained_filler;
    5.51 -  // In support of ergonomic sizing of PLAB's
    5.52 -  size_t    _allocated;     // in HeapWord units
    5.53 -  size_t    _wasted;        // in HeapWord units
    5.54 -  char tail[32];
    5.55 -  static size_t FillerHeaderSize;
    5.56 -  static size_t AlignmentReserve;
    5.57 -
    5.58 -public:
    5.59 -  // Initializes the buffer to be empty, but with the given "word_sz".
    5.60 -  // Must get initialized with "set_buf" for an allocation to succeed.
    5.61 -  ParGCAllocBuffer(size_t word_sz);
    5.62 -
    5.63 -  static const size_t min_size() {
    5.64 -    return ThreadLocalAllocBuffer::min_size();
    5.65 -  }
    5.66 -
    5.67 -  static const size_t max_size() {
    5.68 -    return ThreadLocalAllocBuffer::max_size();
    5.69 -  }
    5.70 -
    5.71 -  // If an allocation of the given "word_sz" can be satisfied within the
    5.72 -  // buffer, do the allocation, returning a pointer to the start of the
    5.73 -  // allocated block.  If the allocation request cannot be satisfied,
    5.74 -  // return NULL.
    5.75 -  HeapWord* allocate(size_t word_sz) {
    5.76 -    HeapWord* res = _top;
    5.77 -    if (pointer_delta(_end, _top) >= word_sz) {
    5.78 -      _top = _top + word_sz;
    5.79 -      return res;
    5.80 -    } else {
    5.81 -      return NULL;
    5.82 -    }
    5.83 -  }
    5.84 -
    5.85 -  // Undo the last allocation in the buffer, which is required to be of the
    5.86 -  // "obj" of the given "word_sz".
    5.87 -  void undo_allocation(HeapWord* obj, size_t word_sz) {
    5.88 -    assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
    5.89 -    assert(pointer_delta(_top, obj)     == word_sz, "Bad undo");
    5.90 -    _top = obj;
    5.91 -  }
    5.92 -
    5.93 -  // The total (word) size of the buffer, including both allocated and
    5.94 -  // unallocted space.
    5.95 -  size_t word_sz() { return _word_sz; }
    5.96 -
    5.97 -  // Should only be done if we are about to reset with a new buffer of the
    5.98 -  // given size.
    5.99 -  void set_word_size(size_t new_word_sz) {
   5.100 -    assert(new_word_sz > AlignmentReserve, "Too small");
   5.101 -    _word_sz = new_word_sz;
   5.102 -  }
   5.103 -
   5.104 -  // The number of words of unallocated space remaining in the buffer.
   5.105 -  size_t words_remaining() {
   5.106 -    assert(_end >= _top, "Negative buffer");
   5.107 -    return pointer_delta(_end, _top, HeapWordSize);
   5.108 -  }
   5.109 -
   5.110 -  bool contains(void* addr) {
   5.111 -    return (void*)_bottom <= addr && addr < (void*)_hard_end;
   5.112 -  }
   5.113 -
   5.114 -  // Sets the space of the buffer to be [buf, space+word_sz()).
   5.115 -  void set_buf(HeapWord* buf) {
   5.116 -    _bottom   = buf;
   5.117 -    _top      = _bottom;
   5.118 -    _hard_end = _bottom + word_sz();
   5.119 -    _end      = _hard_end - AlignmentReserve;
   5.120 -    assert(_end >= _top, "Negative buffer");
   5.121 -    // In support of ergonomic sizing
   5.122 -    _allocated += word_sz();
   5.123 -  }
   5.124 -
   5.125 -  // Flush the stats supporting ergonomic sizing of PLAB's
   5.126 -  void flush_stats(PLABStats* stats);
   5.127 -  void flush_stats_and_retire(PLABStats* stats, bool retain) {
   5.128 -    // We flush the stats first in order to get a reading of
   5.129 -    // unused space in the last buffer.
   5.130 -    if (ResizePLAB) {
   5.131 -      flush_stats(stats);
   5.132 -    }
   5.133 -    // Retire the last allocation buffer.
   5.134 -    retire(true, retain);
   5.135 -  }
   5.136 -
   5.137 -  // Force future allocations to fail and queries for contains()
   5.138 -  // to return false
   5.139 -  void invalidate() {
   5.140 -    assert(!_retained, "Shouldn't retain an invalidated buffer.");
   5.141 -    _end    = _hard_end;
   5.142 -    _wasted += pointer_delta(_end, _top);  // unused  space
   5.143 -    _top    = _end;      // force future allocations to fail
   5.144 -    _bottom = _end;      // force future contains() queries to return false
   5.145 -  }
   5.146 -
   5.147 -  // Fills in the unallocated portion of the buffer with a garbage object.
   5.148 -  // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
   5.149 -  // is true, attempt to re-use the unused portion in the next GC.
   5.150 -  void retire(bool end_of_gc, bool retain);
   5.151 -
   5.152 -  void print() PRODUCT_RETURN;
   5.153 -};
   5.154 -
   5.155 -// PLAB stats book-keeping
   5.156 -class PLABStats VALUE_OBJ_CLASS_SPEC {
   5.157 -  size_t _allocated;      // total allocated
   5.158 -  size_t _wasted;         // of which wasted (internal fragmentation)
   5.159 -  size_t _unused;         // Unused in last buffer
   5.160 -  size_t _used;           // derived = allocated - wasted - unused
   5.161 -  size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
   5.162 -  AdaptiveWeightedAverage
   5.163 -         _filter;         // integrator with decay
   5.164 -
   5.165 - public:
   5.166 -  PLABStats(size_t desired_plab_sz_, unsigned wt) :
   5.167 -    _allocated(0),
   5.168 -    _wasted(0),
   5.169 -    _unused(0),
   5.170 -    _used(0),
   5.171 -    _desired_plab_sz(desired_plab_sz_),
   5.172 -    _filter(wt)
   5.173 -  {
   5.174 -    size_t min_sz = min_size();
   5.175 -    size_t max_sz = max_size();
   5.176 -    size_t aligned_min_sz = align_object_size(min_sz);
   5.177 -    size_t aligned_max_sz = align_object_size(max_sz);
   5.178 -    assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz &&
   5.179 -           min_sz <= max_sz,
   5.180 -           "PLAB clipping computation in adjust_desired_plab_sz()"
   5.181 -           " may be incorrect");
   5.182 -  }
   5.183 -
   5.184 -  static const size_t min_size() {
   5.185 -    return ParGCAllocBuffer::min_size();
   5.186 -  }
   5.187 -
   5.188 -  static const size_t max_size() {
   5.189 -    return ParGCAllocBuffer::max_size();
   5.190 -  }
   5.191 -
   5.192 -  size_t desired_plab_sz() {
   5.193 -    return _desired_plab_sz;
   5.194 -  }
   5.195 -
   5.196 -  void adjust_desired_plab_sz(); // filter computation, latches output to
   5.197 -                                 // _desired_plab_sz, clears sensor accumulators
   5.198 -
   5.199 -  void add_allocated(size_t v) {
   5.200 -    Atomic::add_ptr(v, &_allocated);
   5.201 -  }
   5.202 -
   5.203 -  void add_unused(size_t v) {
   5.204 -    Atomic::add_ptr(v, &_unused);
   5.205 -  }
   5.206 -
   5.207 -  void add_wasted(size_t v) {
   5.208 -    Atomic::add_ptr(v, &_wasted);
   5.209 -  }
   5.210 -};
   5.211 -
   5.212 -class ParGCAllocBufferWithBOT: public ParGCAllocBuffer {
   5.213 -  BlockOffsetArrayContigSpace _bt;
   5.214 -  BlockOffsetSharedArray*     _bsa;
   5.215 -  HeapWord*                   _true_end;  // end of the whole ParGCAllocBuffer
   5.216 -
   5.217 -  static const size_t ChunkSizeInWords;
   5.218 -  static const size_t ChunkSizeInBytes;
   5.219 -  HeapWord* allocate_slow(size_t word_sz);
   5.220 -
   5.221 -  void fill_region_with_block(MemRegion mr, bool contig);
   5.222 -
   5.223 -public:
   5.224 -  ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
   5.225 -
   5.226 -  HeapWord* allocate(size_t word_sz) {
   5.227 -    HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
   5.228 -    if (res != NULL) {
   5.229 -      _bt.alloc_block(res, word_sz);
   5.230 -    } else {
   5.231 -      res = allocate_slow(word_sz);
   5.232 -    }
   5.233 -    return res;
   5.234 -  }
   5.235 -
   5.236 -  void undo_allocation(HeapWord* obj, size_t word_sz);
   5.237 -
   5.238 -  void set_buf(HeapWord* buf_start) {
   5.239 -    ParGCAllocBuffer::set_buf(buf_start);
   5.240 -    _true_end = _hard_end;
   5.241 -    _bt.set_region(MemRegion(buf_start, word_sz()));
   5.242 -    _bt.initialize_threshold();
   5.243 -  }
   5.244 -
   5.245 -  void retire(bool end_of_gc, bool retain);
   5.246 -
   5.247 -  MemRegion range() {
   5.248 -    return MemRegion(_top, _true_end);
   5.249 -  }
   5.250 -};
   5.251 -
   5.252 -#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
     6.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Aug 15 16:49:38 2012 -0700
     6.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Aug 17 15:41:04 2012 -0700
     6.3 @@ -24,11 +24,11 @@
     6.4  
     6.5  #include "precompiled.hpp"
     6.6  #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
     6.7 -#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
     6.8  #include "gc_implementation/parNew/parNewGeneration.hpp"
     6.9  #include "gc_implementation/parNew/parOopClosures.inline.hpp"
    6.10  #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
    6.11  #include "gc_implementation/shared/ageTable.hpp"
    6.12 +#include "gc_implementation/shared/parGCAllocBuffer.hpp"
    6.13  #include "gc_implementation/shared/spaceDecorator.hpp"
    6.14  #include "memory/defNewGeneration.inline.hpp"
    6.15  #include "memory/genCollectedHeap.hpp"
    6.16 @@ -453,7 +453,8 @@
    6.17      // retire the last buffer.
    6.18      par_scan_state.to_space_alloc_buffer()->
    6.19        flush_stats_and_retire(_gen.plab_stats(),
    6.20 -                             false /* !retain */);
    6.21 +                             true /* end_of_gc */,
    6.22 +                             false /* retain */);
    6.23  
    6.24      // Every thread has its own age table.  We need to merge
    6.25      // them all into one.
     7.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Wed Aug 15 16:49:38 2012 -0700
     7.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Aug 17 15:41:04 2012 -0700
     7.3 @@ -1,5 +1,5 @@
     7.4  /*
     7.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     7.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     7.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.8   *
     7.9   * This code is free software; you can redistribute it and/or modify it
    7.10 @@ -25,7 +25,7 @@
    7.11  #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
    7.12  #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
    7.13  
    7.14 -#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
    7.15 +#include "gc_implementation/shared/parGCAllocBuffer.hpp"
    7.16  #include "memory/defNewGeneration.hpp"
    7.17  #include "utilities/taskqueue.hpp"
    7.18  
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Fri Aug 17 15:41:04 2012 -0700
     8.3 @@ -0,0 +1,342 @@
     8.4 +/*
     8.5 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     8.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.7 + *
     8.8 + * This code is free software; you can redistribute it and/or modify it
     8.9 + * under the terms of the GNU General Public License version 2 only, as
    8.10 + * published by the Free Software Foundation.
    8.11 + *
    8.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    8.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    8.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    8.15 + * version 2 for more details (a copy is included in the LICENSE file that
    8.16 + * accompanied this code).
    8.17 + *
    8.18 + * You should have received a copy of the GNU General Public License version
    8.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    8.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    8.21 + *
    8.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    8.23 + * or visit www.oracle.com if you need additional information or have any
    8.24 + * questions.
    8.25 + *
    8.26 + */
    8.27 +
    8.28 +#include "precompiled.hpp"
    8.29 +#include "gc_implementation/shared/parGCAllocBuffer.hpp"
    8.30 +#include "memory/sharedHeap.hpp"
    8.31 +#include "oops/arrayOop.hpp"
    8.32 +#include "oops/oop.inline.hpp"
    8.33 +
    8.34 +ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
    8.35 +  _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
    8.36 +  _end(NULL), _hard_end(NULL),
    8.37 +  _retained(false), _retained_filler(),
    8.38 +  _allocated(0), _wasted(0)
    8.39 +{
    8.40 +  assert (min_size() > AlignmentReserve, "Inconsistency!");
    8.41 +  // arrayOopDesc::header_size depends on command line initialization.
    8.42 +  FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
    8.43 +  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
    8.44 +}
    8.45 +
    8.46 +size_t ParGCAllocBuffer::FillerHeaderSize;
    8.47 +
    8.48 +// If the minimum object size is greater than MinObjAlignment, we can
    8.49 +// end up with a shard at the end of the buffer that's smaller than
    8.50 +// the smallest object.  We can't allow that because the buffer must
    8.51 +// look like it's full of objects when we retire it, so we make
    8.52 +// sure we have enough space for a filler int array object.
    8.53 +size_t ParGCAllocBuffer::AlignmentReserve;
    8.54 +
    8.55 +void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
    8.56 +  assert(!retain || end_of_gc, "Can only retain at GC end.");
    8.57 +  if (_retained) {
    8.58 +    // If the buffer had been retained shorten the previous filler object.
    8.59 +    assert(_retained_filler.end() <= _top, "INVARIANT");
    8.60 +    CollectedHeap::fill_with_object(_retained_filler);
    8.61 +    // Wasted space book-keeping, otherwise (normally) done in invalidate()
    8.62 +    _wasted += _retained_filler.word_size();
    8.63 +    _retained = false;
    8.64 +  }
    8.65 +  assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
    8.66 +  if (_top < _hard_end) {
    8.67 +    CollectedHeap::fill_with_object(_top, _hard_end);
    8.68 +    if (!retain) {
    8.69 +      invalidate();
    8.70 +    } else {
    8.71 +      // Is there wasted space we'd like to retain for the next GC?
    8.72 +      if (pointer_delta(_end, _top) > FillerHeaderSize) {
    8.73 +        _retained = true;
    8.74 +        _retained_filler = MemRegion(_top, FillerHeaderSize);
    8.75 +        _top = _top + FillerHeaderSize;
    8.76 +      } else {
    8.77 +        invalidate();
    8.78 +      }
    8.79 +    }
    8.80 +  }
    8.81 +}
    8.82 +
    8.83 +void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
    8.84 +  assert(ResizePLAB, "Wasted work");
    8.85 +  stats->add_allocated(_allocated);
    8.86 +  stats->add_wasted(_wasted);
    8.87 +  stats->add_unused(pointer_delta(_end, _top));
    8.88 +}
    8.89 +
    8.90 +// Compute desired plab size and latch result for later
    8.91 +// use. This should be called once at the end of parallel
    8.92 +// scavenge; it clears the sensor accumulators.
    8.93 +void PLABStats::adjust_desired_plab_sz() {
    8.94 +  assert(ResizePLAB, "Not set");
    8.95 +  if (_allocated == 0) {
    8.96 +    assert(_unused == 0, "Inconsistency in PLAB stats");
    8.97 +    _allocated = 1;
    8.98 +  }
    8.99 +  double wasted_frac    = (double)_unused/(double)_allocated;
   8.100 +  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
   8.101 +                                   TargetPLABWastePct);
   8.102 +  if (target_refills == 0) {
   8.103 +    target_refills = 1;
   8.104 +  }
   8.105 +  _used = _allocated - _wasted - _unused;
   8.106 +  size_t plab_sz = _used/(target_refills*ParallelGCThreads);
   8.107 +  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
   8.108 +  // Take historical weighted average
   8.109 +  _filter.sample(plab_sz);
   8.110 +  // Clip from above and below, and align to object boundary
   8.111 +  plab_sz = MAX2(min_size(), (size_t)_filter.average());
   8.112 +  plab_sz = MIN2(max_size(), plab_sz);
   8.113 +  plab_sz = align_object_size(plab_sz);
   8.114 +  // Latch the result
   8.115 +  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
   8.116 +  _desired_plab_sz = plab_sz;
   8.117 +  // Now clear the accumulators for next round:
   8.118 +  // note this needs to be fixed in the case where we
   8.119 +  // are retaining across scavenges. FIX ME !!! XXX
   8.120 +  _allocated = 0;
   8.121 +  _wasted    = 0;
   8.122 +  _unused    = 0;
   8.123 +}
   8.124 +
   8.125 +#ifndef PRODUCT
   8.126 +void ParGCAllocBuffer::print() {
   8.127 +  gclog_or_tty->print("parGCAllocBuffer: _bottom: %p  _top: %p  _end: %p  _hard_end: %p"
   8.128 +             "_retained: %c _retained_filler: [%p,%p)\n",
   8.129 +             _bottom, _top, _end, _hard_end,
   8.130 +             "FT"[_retained], _retained_filler.start(), _retained_filler.end());
   8.131 +}
   8.132 +#endif // !PRODUCT
   8.133 +
   8.134 +const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
   8.135 +MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
   8.136 +     ((size_t)Generation::GenGrain)/HeapWordSize);
   8.137 +const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
   8.138 +MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
   8.139 +     (size_t)Generation::GenGrain);
   8.140 +
   8.141 +ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
   8.142 +                                                 BlockOffsetSharedArray* bsa) :
   8.143 +  ParGCAllocBuffer(word_sz),
   8.144 +  _bsa(bsa),
   8.145 +  _bt(bsa, MemRegion(_bottom, _hard_end)),
   8.146 +  _true_end(_hard_end)
   8.147 +{}
   8.148 +
   8.149 +// The buffer comes with its own BOT, with a shared (obviously) underlying
   8.150 +// BlockOffsetSharedArray. We manipulate this BOT in the normal way
   8.151 +// as we would for any contiguous space. However, on accasion we
   8.152 +// need to do some buffer surgery at the extremities before we
   8.153 +// start using the body of the buffer for allocations. Such surgery
   8.154 +// (as explained elsewhere) is to prevent allocation on a card that
   8.155 +// is in the process of being walked concurrently by another GC thread.
   8.156 +// When such surgery happens at a point that is far removed (to the
   8.157 +// right of the current allocation point, top), we use the "contig"
   8.158 +// parameter below to directly manipulate the shared array without
   8.159 +// modifying the _next_threshold state in the BOT.
   8.160 +void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
   8.161 +                                                     bool contig) {
   8.162 +  CollectedHeap::fill_with_object(mr);
   8.163 +  if (contig) {
   8.164 +    _bt.alloc_block(mr.start(), mr.end());
   8.165 +  } else {
   8.166 +    _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
   8.167 +  }
   8.168 +}
   8.169 +
   8.170 +HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
   8.171 +  HeapWord* res = NULL;
   8.172 +  if (_true_end > _hard_end) {
   8.173 +    assert((HeapWord*)align_size_down(intptr_t(_hard_end),
   8.174 +                                      ChunkSizeInBytes) == _hard_end,
   8.175 +           "or else _true_end should be equal to _hard_end");
   8.176 +    assert(_retained, "or else _true_end should be equal to _hard_end");
   8.177 +    assert(_retained_filler.end() <= _top, "INVARIANT");
   8.178 +    CollectedHeap::fill_with_object(_retained_filler);
   8.179 +    if (_top < _hard_end) {
   8.180 +      fill_region_with_block(MemRegion(_top, _hard_end), true);
   8.181 +    }
   8.182 +    HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
   8.183 +    _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
   8.184 +    _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
   8.185 +    _top      = _retained_filler.end();
   8.186 +    _hard_end = next_hard_end;
   8.187 +    _end      = _hard_end - AlignmentReserve;
   8.188 +    res       = ParGCAllocBuffer::allocate(word_sz);
   8.189 +    if (res != NULL) {
   8.190 +      _bt.alloc_block(res, word_sz);
   8.191 +    }
   8.192 +  }
   8.193 +  return res;
   8.194 +}
   8.195 +
   8.196 +void
   8.197 +ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
   8.198 +  ParGCAllocBuffer::undo_allocation(obj, word_sz);
   8.199 +  // This may back us up beyond the previous threshold, so reset.
   8.200 +  _bt.set_region(MemRegion(_top, _hard_end));
   8.201 +  _bt.initialize_threshold();
   8.202 +}
   8.203 +
   8.204 +void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
   8.205 +  assert(!retain || end_of_gc, "Can only retain at GC end.");
   8.206 +  if (_retained) {
   8.207 +    // We're about to make the retained_filler into a block.
   8.208 +    _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
   8.209 +                                      _retained_filler.end());
   8.210 +  }
   8.211 +  // Reset _hard_end to _true_end (and update _end)
   8.212 +  if (retain && _hard_end != NULL) {
   8.213 +    assert(_hard_end <= _true_end, "Invariant.");
   8.214 +    _hard_end = _true_end;
   8.215 +    _end      = MAX2(_top, _hard_end - AlignmentReserve);
   8.216 +    assert(_end <= _hard_end, "Invariant.");
   8.217 +  }
   8.218 +  _true_end = _hard_end;
   8.219 +  HeapWord* pre_top = _top;
   8.220 +
   8.221 +  ParGCAllocBuffer::retire(end_of_gc, retain);
   8.222 +  // Now any old _retained_filler is cut back to size, the free part is
   8.223 +  // filled with a filler object, and top is past the header of that
   8.224 +  // object.
   8.225 +
   8.226 +  if (retain && _top < _end) {
   8.227 +    assert(end_of_gc && retain, "Or else retain should be false.");
   8.228 +    // If the lab does not start on a card boundary, we don't want to
   8.229 +    // allocate onto that card, since that might lead to concurrent
   8.230 +    // allocation and card scanning, which we don't support.  So we fill
   8.231 +    // the first card with a garbage object.
   8.232 +    size_t first_card_index = _bsa->index_for(pre_top);
   8.233 +    HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
   8.234 +    if (first_card_start < pre_top) {
   8.235 +      HeapWord* second_card_start =
   8.236 +        _bsa->inc_by_region_size(first_card_start);
   8.237 +
   8.238 +      // Ensure enough room to fill with the smallest block
   8.239 +      second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
   8.240 +
   8.241 +      // If the end is already in the first card, don't go beyond it!
   8.242 +      // Or if the remainder is too small for a filler object, gobble it up.
   8.243 +      if (_hard_end < second_card_start ||
   8.244 +          pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
   8.245 +        second_card_start = _hard_end;
   8.246 +      }
   8.247 +      if (pre_top < second_card_start) {
   8.248 +        MemRegion first_card_suffix(pre_top, second_card_start);
   8.249 +        fill_region_with_block(first_card_suffix, true);
   8.250 +      }
   8.251 +      pre_top = second_card_start;
   8.252 +      _top = pre_top;
   8.253 +      _end = MAX2(_top, _hard_end - AlignmentReserve);
   8.254 +    }
   8.255 +
   8.256 +    // If the lab does not end on a card boundary, we don't want to
   8.257 +    // allocate onto that card, since that might lead to concurrent
   8.258 +    // allocation and card scanning, which we don't support.  So we fill
   8.259 +    // the last card with a garbage object.
   8.260 +    size_t last_card_index = _bsa->index_for(_hard_end);
   8.261 +    HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
   8.262 +    if (last_card_start < _hard_end) {
   8.263 +
   8.264 +      // Ensure enough room to fill with the smallest block
   8.265 +      last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
   8.266 +
   8.267 +      // If the top is already in the last card, don't go back beyond it!
   8.268 +      // Or if the remainder is too small for a filler object, gobble it up.
   8.269 +      if (_top > last_card_start ||
   8.270 +          pointer_delta(last_card_start, _top) < AlignmentReserve) {
   8.271 +        last_card_start = _top;
   8.272 +      }
   8.273 +      if (last_card_start < _hard_end) {
   8.274 +        MemRegion last_card_prefix(last_card_start, _hard_end);
   8.275 +        fill_region_with_block(last_card_prefix, false);
   8.276 +      }
   8.277 +      _hard_end = last_card_start;
   8.278 +      _end      = MAX2(_top, _hard_end - AlignmentReserve);
   8.279 +      _true_end = _hard_end;
   8.280 +      assert(_end <= _hard_end, "Invariant.");
   8.281 +    }
   8.282 +
   8.283 +    // At this point:
   8.284 +    //   1) we had a filler object from the original top to hard_end.
   8.285 +    //   2) We've filled in any partial cards at the front and back.
   8.286 +    if (pre_top < _hard_end) {
   8.287 +      // Now we can reset the _bt to do allocation in the given area.
   8.288 +      MemRegion new_filler(pre_top, _hard_end);
   8.289 +      fill_region_with_block(new_filler, false);
   8.290 +      _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
   8.291 +      // If there's no space left, don't retain.
   8.292 +      if (_top >= _end) {
   8.293 +        _retained = false;
   8.294 +        invalidate();
   8.295 +        return;
   8.296 +      }
   8.297 +      _retained_filler = MemRegion(pre_top, _top);
   8.298 +      _bt.set_region(MemRegion(_top, _hard_end));
   8.299 +      _bt.initialize_threshold();
   8.300 +      assert(_bt.threshold() > _top, "initialize_threshold failed!");
   8.301 +
   8.302 +      // There may be other reasons for queries into the middle of the
   8.303 +      // filler object.  When such queries are done in parallel with
   8.304 +      // allocation, bad things can happen, if the query involves object
   8.305 +      // iteration.  So we ensure that such queries do not involve object
   8.306 +      // iteration, by putting another filler object on the boundaries of
   8.307 +      // such queries.  One such is the object spanning a parallel card
   8.308 +      // chunk boundary.
   8.309 +
   8.310 +      // "chunk_boundary" is the address of the first chunk boundary less
   8.311 +      // than "hard_end".
   8.312 +      HeapWord* chunk_boundary =
   8.313 +        (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
   8.314 +      assert(chunk_boundary < _hard_end, "Or else above did not work.");
   8.315 +      assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
   8.316 +             "Consequence of last card handling above.");
   8.317 +
   8.318 +      if (_top <= chunk_boundary) {
   8.319 +        assert(_true_end == _hard_end, "Invariant.");
   8.320 +        while (_top <= chunk_boundary) {
   8.321 +          assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
   8.322 +                 "Consequence of last card handling above.");
   8.323 +          _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
   8.324 +          CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
   8.325 +          _hard_end = chunk_boundary;
   8.326 +          chunk_boundary -= ChunkSizeInWords;
   8.327 +        }
   8.328 +        _end = _hard_end - AlignmentReserve;
   8.329 +        assert(_top <= _end, "Invariant.");
   8.330 +        // Now reset the initial filler chunk so it doesn't overlap with
   8.331 +        // the one(s) inserted above.
   8.332 +        MemRegion new_filler(pre_top, _hard_end);
   8.333 +        fill_region_with_block(new_filler, false);
   8.334 +      }
   8.335 +    } else {
   8.336 +      _retained = false;
   8.337 +      invalidate();
   8.338 +    }
   8.339 +  } else {
   8.340 +    assert(!end_of_gc ||
   8.341 +           (!_retained && _true_end == _hard_end), "Checking.");
   8.342 +  }
   8.343 +  assert(_end <= _hard_end, "Invariant.");
   8.344 +  assert(_top < _end || _top == _hard_end, "Invariant");
   8.345 +}
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Fri Aug 17 15:41:04 2012 -0700
     9.3 @@ -0,0 +1,249 @@
     9.4 +/*
     9.5 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     9.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.7 + *
     9.8 + * This code is free software; you can redistribute it and/or modify it
     9.9 + * under the terms of the GNU General Public License version 2 only, as
    9.10 + * published by the Free Software Foundation.
    9.11 + *
    9.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    9.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    9.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    9.15 + * version 2 for more details (a copy is included in the LICENSE file that
    9.16 + * accompanied this code).
    9.17 + *
    9.18 + * You should have received a copy of the GNU General Public License version
    9.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    9.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    9.21 + *
    9.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    9.23 + * or visit www.oracle.com if you need additional information or have any
    9.24 + * questions.
    9.25 + *
    9.26 + */
    9.27 +
    9.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
    9.29 +#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
    9.30 +
    9.31 +#include "memory/allocation.hpp"
    9.32 +#include "memory/blockOffsetTable.hpp"
    9.33 +#include "memory/threadLocalAllocBuffer.hpp"
    9.34 +#include "utilities/globalDefinitions.hpp"
    9.35 +
    9.36 +// Forward decl.
    9.37 +
    9.38 +class PLABStats;
    9.39 +
    9.40 +// A per-thread allocation buffer used during GC.
    9.41 +class ParGCAllocBuffer: public CHeapObj<mtGC> {
    9.42 +protected:
    9.43 +  char head[32];
    9.44 +  size_t _word_sz;          // in HeapWord units
    9.45 +  HeapWord* _bottom;
    9.46 +  HeapWord* _top;
    9.47 +  HeapWord* _end;       // last allocatable address + 1
    9.48 +  HeapWord* _hard_end;  // _end + AlignmentReserve
    9.49 +  bool      _retained;  // whether we hold a _retained_filler
    9.50 +  MemRegion _retained_filler;
    9.51 +  // In support of ergonomic sizing of PLAB's
    9.52 +  size_t    _allocated;     // in HeapWord units
    9.53 +  size_t    _wasted;        // in HeapWord units
    9.54 +  char tail[32];
    9.55 +  static size_t FillerHeaderSize;
    9.56 +  static size_t AlignmentReserve;
    9.57 +
    9.58 +public:
    9.59 +  // Initializes the buffer to be empty, but with the given "word_sz".
    9.60 +  // Must get initialized with "set_buf" for an allocation to succeed.
    9.61 +  ParGCAllocBuffer(size_t word_sz);
    9.62 +
    9.63 +  static const size_t min_size() {
    9.64 +    return ThreadLocalAllocBuffer::min_size();
    9.65 +  }
    9.66 +
    9.67 +  static const size_t max_size() {
    9.68 +    return ThreadLocalAllocBuffer::max_size();
    9.69 +  }
    9.70 +
    9.71 +  // If an allocation of the given "word_sz" can be satisfied within the
    9.72 +  // buffer, do the allocation, returning a pointer to the start of the
    9.73 +  // allocated block.  If the allocation request cannot be satisfied,
    9.74 +  // return NULL.
    9.75 +  HeapWord* allocate(size_t word_sz) {
    9.76 +    HeapWord* res = _top;
    9.77 +    if (pointer_delta(_end, _top) >= word_sz) {
    9.78 +      _top = _top + word_sz;
    9.79 +      return res;
    9.80 +    } else {
    9.81 +      return NULL;
    9.82 +    }
    9.83 +  }
    9.84 +
    9.85 +  // Undo the last allocation in the buffer, which is required to be of the
    9.86 +  // "obj" of the given "word_sz".
    9.87 +  void undo_allocation(HeapWord* obj, size_t word_sz) {
    9.88 +    assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
    9.89 +    assert(pointer_delta(_top, obj)     == word_sz, "Bad undo");
    9.90 +    _top = obj;
    9.91 +  }
    9.92 +
    9.93 +  // The total (word) size of the buffer, including both allocated and
    9.94 +  // unallocted space.
    9.95 +  size_t word_sz() { return _word_sz; }
    9.96 +
    9.97 +  // Should only be done if we are about to reset with a new buffer of the
    9.98 +  // given size.
    9.99 +  void set_word_size(size_t new_word_sz) {
   9.100 +    assert(new_word_sz > AlignmentReserve, "Too small");
   9.101 +    _word_sz = new_word_sz;
   9.102 +  }
   9.103 +
   9.104 +  // The number of words of unallocated space remaining in the buffer.
   9.105 +  size_t words_remaining() {
   9.106 +    assert(_end >= _top, "Negative buffer");
   9.107 +    return pointer_delta(_end, _top, HeapWordSize);
   9.108 +  }
   9.109 +
   9.110 +  bool contains(void* addr) {
   9.111 +    return (void*)_bottom <= addr && addr < (void*)_hard_end;
   9.112 +  }
   9.113 +
   9.114 +  // Sets the space of the buffer to be [buf, space+word_sz()).
   9.115 +  void set_buf(HeapWord* buf) {
   9.116 +    _bottom   = buf;
   9.117 +    _top      = _bottom;
   9.118 +    _hard_end = _bottom + word_sz();
   9.119 +    _end      = _hard_end - AlignmentReserve;
   9.120 +    assert(_end >= _top, "Negative buffer");
   9.121 +    // In support of ergonomic sizing
   9.122 +    _allocated += word_sz();
   9.123 +  }
   9.124 +
   9.125 +  // Flush the stats supporting ergonomic sizing of PLAB's
   9.126 +  void flush_stats(PLABStats* stats);
   9.127 +  void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
   9.128 +    // We flush the stats first in order to get a reading of
   9.129 +    // unused space in the last buffer.
   9.130 +    if (ResizePLAB) {
   9.131 +      flush_stats(stats);
   9.132 +    }
   9.133 +    // Retire the last allocation buffer.
   9.134 +    retire(end_of_gc, retain);
   9.135 +  }
   9.136 +
   9.137 +  // Force future allocations to fail and queries for contains()
   9.138 +  // to return false
   9.139 +  void invalidate() {
   9.140 +    assert(!_retained, "Shouldn't retain an invalidated buffer.");
   9.141 +    _end    = _hard_end;
   9.142 +    _wasted += pointer_delta(_end, _top);  // unused  space
   9.143 +    _top    = _end;      // force future allocations to fail
   9.144 +    _bottom = _end;      // force future contains() queries to return false
   9.145 +  }
   9.146 +
   9.147 +  // Fills in the unallocated portion of the buffer with a garbage object.
   9.148 +  // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
   9.149 +  // is true, attempt to re-use the unused portion in the next GC.
   9.150 +  void retire(bool end_of_gc, bool retain);
   9.151 +
   9.152 +  void print() PRODUCT_RETURN;
   9.153 +};
   9.154 +
   9.155 +// PLAB stats book-keeping
   9.156 +class PLABStats VALUE_OBJ_CLASS_SPEC {
   9.157 +  size_t _allocated;      // total allocated
   9.158 +  size_t _wasted;         // of which wasted (internal fragmentation)
   9.159 +  size_t _unused;         // Unused in last buffer
   9.160 +  size_t _used;           // derived = allocated - wasted - unused
   9.161 +  size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
   9.162 +  AdaptiveWeightedAverage
   9.163 +         _filter;         // integrator with decay
   9.164 +
   9.165 + public:
   9.166 +  PLABStats(size_t desired_plab_sz_, unsigned wt) :
   9.167 +    _allocated(0),
   9.168 +    _wasted(0),
   9.169 +    _unused(0),
   9.170 +    _used(0),
   9.171 +    _desired_plab_sz(desired_plab_sz_),
   9.172 +    _filter(wt)
   9.173 +  {
   9.174 +    size_t min_sz = min_size();
   9.175 +    size_t max_sz = max_size();
   9.176 +    size_t aligned_min_sz = align_object_size(min_sz);
   9.177 +    size_t aligned_max_sz = align_object_size(max_sz);
   9.178 +    assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz &&
   9.179 +           min_sz <= max_sz,
   9.180 +           "PLAB clipping computation in adjust_desired_plab_sz()"
   9.181 +           " may be incorrect");
   9.182 +  }
   9.183 +
   9.184 +  static const size_t min_size() {
   9.185 +    return ParGCAllocBuffer::min_size();
   9.186 +  }
   9.187 +
   9.188 +  static const size_t max_size() {
   9.189 +    return ParGCAllocBuffer::max_size();
   9.190 +  }
   9.191 +
   9.192 +  size_t desired_plab_sz() {
   9.193 +    return _desired_plab_sz;
   9.194 +  }
   9.195 +
   9.196 +  void adjust_desired_plab_sz(); // filter computation, latches output to
   9.197 +                                 // _desired_plab_sz, clears sensor accumulators
   9.198 +
   9.199 +  void add_allocated(size_t v) {
   9.200 +    Atomic::add_ptr(v, &_allocated);
   9.201 +  }
   9.202 +
   9.203 +  void add_unused(size_t v) {
   9.204 +    Atomic::add_ptr(v, &_unused);
   9.205 +  }
   9.206 +
   9.207 +  void add_wasted(size_t v) {
   9.208 +    Atomic::add_ptr(v, &_wasted);
   9.209 +  }
   9.210 +};
   9.211 +
   9.212 +class ParGCAllocBufferWithBOT: public ParGCAllocBuffer {
   9.213 +  BlockOffsetArrayContigSpace _bt;
   9.214 +  BlockOffsetSharedArray*     _bsa;
   9.215 +  HeapWord*                   _true_end;  // end of the whole ParGCAllocBuffer
   9.216 +
   9.217 +  static const size_t ChunkSizeInWords;
   9.218 +  static const size_t ChunkSizeInBytes;
   9.219 +  HeapWord* allocate_slow(size_t word_sz);
   9.220 +
   9.221 +  void fill_region_with_block(MemRegion mr, bool contig);
   9.222 +
   9.223 +public:
   9.224 +  ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
   9.225 +
   9.226 +  HeapWord* allocate(size_t word_sz) {
   9.227 +    HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
   9.228 +    if (res != NULL) {
   9.229 +      _bt.alloc_block(res, word_sz);
   9.230 +    } else {
   9.231 +      res = allocate_slow(word_sz);
   9.232 +    }
   9.233 +    return res;
   9.234 +  }
   9.235 +
   9.236 +  void undo_allocation(HeapWord* obj, size_t word_sz);
   9.237 +
   9.238 +  void set_buf(HeapWord* buf_start) {
   9.239 +    ParGCAllocBuffer::set_buf(buf_start);
   9.240 +    _true_end = _hard_end;
   9.241 +    _bt.set_region(MemRegion(buf_start, word_sz()));
   9.242 +    _bt.initialize_threshold();
   9.243 +  }
   9.244 +
   9.245 +  void retire(bool end_of_gc, bool retain);
   9.246 +
   9.247 +  MemRegion range() {
   9.248 +    return MemRegion(_top, _true_end);
   9.249 +  }
   9.250 +};
   9.251 +
   9.252 +#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
    10.1 --- a/src/share/vm/memory/tenuredGeneration.cpp	Wed Aug 15 16:49:38 2012 -0700
    10.2 +++ b/src/share/vm/memory/tenuredGeneration.cpp	Fri Aug 17 15:41:04 2012 -0700
    10.3 @@ -1,5 +1,5 @@
    10.4  /*
    10.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    10.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    10.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8   *
    10.9   * This code is free software; you can redistribute it and/or modify it
   10.10 @@ -23,8 +23,8 @@
   10.11   */
   10.12  
   10.13  #include "precompiled.hpp"
   10.14 -#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
   10.15  #include "gc_implementation/shared/collectorCounters.hpp"
   10.16 +#include "gc_implementation/shared/parGCAllocBuffer.hpp"
   10.17  #include "memory/allocation.inline.hpp"
   10.18  #include "memory/blockOffsetTable.inline.hpp"
   10.19  #include "memory/generation.inline.hpp"
    11.1 --- a/src/share/vm/precompiled/precompiled.hpp	Wed Aug 15 16:49:38 2012 -0700
    11.2 +++ b/src/share/vm/precompiled/precompiled.hpp	Fri Aug 17 15:41:04 2012 -0700
    11.3 @@ -1,5 +1,5 @@
    11.4  /*
    11.5 - * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
    11.6 + * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
    11.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    11.8   *
    11.9   * This code is free software; you can redistribute it and/or modify it
   11.10 @@ -306,7 +306,6 @@
   11.11  # include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
   11.12  # include "gc_implementation/g1/ptrQueue.hpp"
   11.13  # include "gc_implementation/g1/satbQueue.hpp"
   11.14 -# include "gc_implementation/parNew/parGCAllocBuffer.hpp"
   11.15  # include "gc_implementation/parNew/parOopClosures.hpp"
   11.16  # include "gc_implementation/parallelScavenge/objectStartArray.hpp"
   11.17  # include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
   11.18 @@ -322,6 +321,7 @@
   11.19  # include "gc_implementation/parallelScavenge/psYoungGen.hpp"
   11.20  # include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
   11.21  # include "gc_implementation/shared/gcPolicyCounters.hpp"
   11.22 +# include "gc_implementation/shared/parGCAllocBuffer.hpp"
   11.23  #endif // SERIALGC
   11.24  
   11.25  #endif // !DONT_USE_PRECOMPILED_HEADER

mercurial