7045397: NPG: Add freelists to class loader arenas.

Tue, 18 Sep 2012 23:35:42 -0700

author
jmasa
date
Tue, 18 Sep 2012 23:35:42 -0700
changeset 4196
685df3c6f84b
parent 4188
d0e7716b179e
child 4197
476718ea6759

7045397: NPG: Add freelists to class loader arenas.
Reviewed-by: coleenp, stefank, jprovino, ohair

make/excludeSrc.make file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/vmGCOperations.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/binaryTreeDictionary.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/binaryTreeDictionary.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/freeBlockDictionary.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/freeBlockDictionary.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/freeList.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/freeList.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metablock.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metachunk.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metaspace.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metaspace.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmStructs.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/make/excludeSrc.make	Fri Oct 19 11:26:17 2012 -0700
     1.2 +++ b/make/excludeSrc.make	Tue Sep 18 23:35:42 2012 -0700
     1.3 @@ -79,10 +79,10 @@
     1.4        CXXFLAGS += -DSERIALGC
     1.5        CFLAGS += -DSERIALGC
     1.6        Src_Files_EXCLUDE += \
     1.7 -	binaryTreeDictionary.cpp cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
     1.8 +	cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
     1.9  	cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \
    1.10 -	concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp freeBlockDictionary.cpp \
    1.11 -	freeChunk.cpp freeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
    1.12 +	concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
    1.13 +	freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
    1.14  	concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
    1.15  	dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
    1.16  	g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Tue Sep 18 23:35:42 2012 -0700
     2.3 @@ -0,0 +1,175 @@
     2.4 +/*
     2.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
     2.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.7 + *
     2.8 + * This code is free software; you can redistribute it and/or modify it
     2.9 + * under the terms of the GNU General Public License version 2 only, as
    2.10 + * published by the Free Software Foundation.
    2.11 + *
    2.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    2.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    2.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    2.15 + * version 2 for more details (a copy is included in the LICENSE file that
    2.16 + * accompanied this code).
    2.17 + *
    2.18 + * You should have received a copy of the GNU General Public License version
    2.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    2.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    2.21 + *
    2.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    2.23 + * or visit www.oracle.com if you need additional information or have any
    2.24 + * questions.
    2.25 + *
    2.26 + */
    2.27 +
    2.28 +#include "precompiled.hpp"
    2.29 +#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
    2.30 +#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
    2.31 +#include "memory/freeBlockDictionary.hpp"
    2.32 +#include "memory/sharedHeap.hpp"
    2.33 +#include "runtime/globals.hpp"
    2.34 +#include "runtime/mutex.hpp"
    2.35 +#include "runtime/vmThread.hpp"
    2.36 +
    2.37 +template <>
    2.38 +void AdaptiveFreeList<FreeChunk>::print_on(outputStream* st, const char* c) const {
    2.39 +  if (c != NULL) {
    2.40 +    st->print("%16s", c);
    2.41 +  } else {
    2.42 +    st->print(SIZE_FORMAT_W(16), size());
    2.43 +  }
    2.44 +  st->print("\t"
    2.45 +           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
    2.46 +           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
    2.47 +           bfr_surp(),             surplus(),             desired(),             prev_sweep(),           before_sweep(),
    2.48 +           count(),               coal_births(),          coal_deaths(),          split_births(),         split_deaths());
    2.49 +}
    2.50 +
    2.51 +template <class Chunk>
    2.52 +AdaptiveFreeList<Chunk>::AdaptiveFreeList() : FreeList<Chunk>(), _hint(0) {
    2.53 +  init_statistics();
    2.54 +}
    2.55 +
    2.56 +template <class Chunk>
    2.57 +AdaptiveFreeList<Chunk>::AdaptiveFreeList(Chunk* fc) : FreeList<Chunk>(fc), _hint(0) {
    2.58 +  init_statistics();
    2.59 +#ifndef PRODUCT
    2.60 +  _allocation_stats.set_returned_bytes(size() * HeapWordSize);
    2.61 +#endif
    2.62 +}
    2.63 +
    2.64 +template <class Chunk>
    2.65 +void AdaptiveFreeList<Chunk>::initialize() {
    2.66 +  FreeList<Chunk>::initialize();
    2.67 +  set_hint(0);
    2.68 +  init_statistics(true /* split_birth */);
    2.69 +}
    2.70 +
    2.71 +template <class Chunk>
    2.72 +void AdaptiveFreeList<Chunk>::reset(size_t hint) {
    2.73 +  FreeList<Chunk>::reset();
    2.74 +  set_hint(hint);
    2.75 +}
    2.76 +
    2.77 +#ifndef PRODUCT
    2.78 +template <class Chunk>
    2.79 +void AdaptiveFreeList<Chunk>::assert_proper_lock_protection_work() const {
    2.80 +  assert(protecting_lock() != NULL, "Don't call this directly");
    2.81 +  assert(ParallelGCThreads > 0, "Don't call this directly");
    2.82 +  Thread* thr = Thread::current();
    2.83 +  if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
    2.84 +    // assert that we are holding the freelist lock
    2.85 +  } else if (thr->is_GC_task_thread()) {
    2.86 +    assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
    2.87 +  } else if (thr->is_Java_thread()) {
    2.88 +    assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
    2.89 +  } else {
    2.90 +    ShouldNotReachHere();  // unaccounted thread type?
    2.91 +  }
    2.92 +}
    2.93 +#endif
    2.94 +template <class Chunk>
    2.95 +void AdaptiveFreeList<Chunk>::init_statistics(bool split_birth) {
    2.96 +  _allocation_stats.initialize(split_birth);
    2.97 +}
    2.98 +
    2.99 +template <class Chunk>
   2.100 +size_t AdaptiveFreeList<Chunk>::get_better_size() {
   2.101 +
   2.102 +  // A candidate chunk has been found.  If it is already under
   2.103 +  // populated and there is a hinT, REturn the hint().  Else
   2.104 +  // return the size of this chunk.
   2.105 +  if (surplus() <= 0) {
   2.106 +    if (hint() != 0) {
   2.107 +      return hint();
   2.108 +    } else {
   2.109 +      return size();
   2.110 +    }
   2.111 +  } else {
   2.112 +    // This list has a surplus so use it.
   2.113 +    return size();
   2.114 +  }
   2.115 +}
   2.116 +
   2.117 +
   2.118 +template <class Chunk>
   2.119 +void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
   2.120 +  assert_proper_lock_protection();
   2.121 +  return_chunk_at_head(chunk, true);
   2.122 +}
   2.123 +
   2.124 +template <class Chunk>
   2.125 +void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
   2.126 +  FreeList<Chunk>::return_chunk_at_head(chunk, record_return);
   2.127 +#ifdef ASSERT
   2.128 +  if (record_return) {
   2.129 +    increment_returned_bytes_by(size()*HeapWordSize);
   2.130 +  }
   2.131 +#endif
   2.132 +}
   2.133 +
   2.134 +template <class Chunk>
   2.135 +void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
   2.136 +  return_chunk_at_tail(chunk, true);
   2.137 +}
   2.138 +
   2.139 +template <class Chunk>
   2.140 +void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
   2.141 +  FreeList<Chunk>::return_chunk_at_tail(chunk, record_return);
   2.142 +#ifdef ASSERT
   2.143 +  if (record_return) {
   2.144 +    increment_returned_bytes_by(size()*HeapWordSize);
   2.145 +  }
   2.146 +#endif
   2.147 +}
   2.148 +
   2.149 +#ifndef PRODUCT
   2.150 +template <class Chunk>
   2.151 +void AdaptiveFreeList<Chunk>::verify_stats() const {
   2.152 +  // The +1 of the LH comparand is to allow some "looseness" in
   2.153 +  // checking: we usually call this interface when adding a block
   2.154 +  // and we'll subsequently update the stats; we cannot update the
   2.155 +  // stats beforehand because in the case of the large-block BT
   2.156 +  // dictionary for example, this might be the first block and
   2.157 +  // in that case there would be no place that we could record
   2.158 +  // the stats (which are kept in the block itself).
   2.159 +  assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
   2.160 +          + _allocation_stats.coal_births() + 1)   // Total Production Stock + 1
   2.161 +         >= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
   2.162 +             + (ssize_t)count()),                // Total Current Stock + depletion
   2.163 +         err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
   2.164 +                 " violates Conservation Principle: "
   2.165 +                 "prev_sweep(" SIZE_FORMAT ")"
   2.166 +                 " + split_births(" SIZE_FORMAT ")"
   2.167 +                 " + coal_births(" SIZE_FORMAT ") + 1 >= "
   2.168 +                 " split_deaths(" SIZE_FORMAT ")"
   2.169 +                 " coal_deaths(" SIZE_FORMAT ")"
   2.170 +                 " + count(" SSIZE_FORMAT ")",
   2.171 +                 this, size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
   2.172 +                 _allocation_stats.split_births(), _allocation_stats.split_deaths(),
   2.173 +                 _allocation_stats.coal_deaths(), count()));
   2.174 +}
   2.175 +#endif
   2.176 +
   2.177 +// Needs to be after the definitions have been seen.
   2.178 +template class AdaptiveFreeList<FreeChunk>;
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp	Tue Sep 18 23:35:42 2012 -0700
     3.3 @@ -0,0 +1,232 @@
     3.4 +/*
     3.5 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.7 + *
     3.8 + * This code is free software; you can redistribute it and/or modify it
     3.9 + * under the terms of the GNU General Public License version 2 only, as
    3.10 + * published by the Free Software Foundation.
    3.11 + *
    3.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    3.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    3.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    3.15 + * version 2 for more details (a copy is included in the LICENSE file that
    3.16 + * accompanied this code).
    3.17 + *
    3.18 + * You should have received a copy of the GNU General Public License version
    3.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    3.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    3.21 + *
    3.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    3.23 + * or visit www.oracle.com if you need additional information or have any
    3.24 + * questions.
    3.25 + *
    3.26 + */
    3.27 +
    3.28 +#ifndef SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
    3.29 +#define SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
    3.30 +
    3.31 +#include "memory/freeList.hpp"
    3.32 +#include "gc_implementation/shared/allocationStats.hpp"
    3.33 +
    3.34 +class CompactibleFreeListSpace;
    3.35 +
    3.36 +// A class for maintaining a free list of Chunk's.  The FreeList
    3.37 +// maintains a the structure of the list (head, tail, etc.) plus
    3.38 +// statistics for allocations from the list.  The links between items
    3.39 +// are not part of FreeList.  The statistics are
    3.40 +// used to make decisions about coalescing Chunk's when they
    3.41 +// are swept during collection.
    3.42 +//
    3.43 +// See the corresponding .cpp file for a description of the specifics
    3.44 +// for that implementation.
    3.45 +
    3.46 +class Mutex;
    3.47 +
    3.48 +template <class Chunk>
    3.49 +class AdaptiveFreeList : public FreeList<Chunk> {
    3.50 +  friend class CompactibleFreeListSpace;
    3.51 +  friend class VMStructs;
    3.52 +  // friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
    3.53 +
    3.54 +  size_t        _hint;          // next larger size list with a positive surplus
    3.55 +
    3.56 +  AllocationStats _allocation_stats; // allocation-related statistics
    3.57 +
    3.58 + public:
    3.59 +
    3.60 +  AdaptiveFreeList();
    3.61 +  AdaptiveFreeList(Chunk* fc);
    3.62 +
    3.63 +  using FreeList<Chunk>::assert_proper_lock_protection;
    3.64 +#ifdef ASSERT
    3.65 +  using FreeList<Chunk>::protecting_lock;
    3.66 +#endif
    3.67 +  using FreeList<Chunk>::count;
    3.68 +  using FreeList<Chunk>::size;
    3.69 +  using FreeList<Chunk>::verify_chunk_in_free_list;
    3.70 +  using FreeList<Chunk>::getFirstNChunksFromList;
    3.71 +  using FreeList<Chunk>::print_on;
    3.72 +  void return_chunk_at_head(Chunk* fc, bool record_return);
    3.73 +  void return_chunk_at_head(Chunk* fc);
    3.74 +  void return_chunk_at_tail(Chunk* fc, bool record_return);
    3.75 +  void return_chunk_at_tail(Chunk* fc);
    3.76 +  using FreeList<Chunk>::return_chunk_at_tail;
    3.77 +  using FreeList<Chunk>::remove_chunk;
    3.78 +  using FreeList<Chunk>::prepend;
    3.79 +  using FreeList<Chunk>::print_labels_on;
    3.80 +  using FreeList<Chunk>::get_chunk_at_head;
    3.81 +
    3.82 +  // Initialize.
    3.83 +  void initialize();
    3.84 +
    3.85 +  // Reset the head, tail, hint, and count of a free list.
    3.86 +  void reset(size_t hint);
    3.87 +
    3.88 +  void assert_proper_lock_protection_work() const PRODUCT_RETURN;
    3.89 +
    3.90 +  void print_on(outputStream* st, const char* c = NULL) const;
    3.91 +
    3.92 +  size_t hint() const {
    3.93 +    return _hint;
    3.94 +  }
    3.95 +  void set_hint(size_t v) {
    3.96 +    assert_proper_lock_protection();
    3.97 +    assert(v == 0 || size() < v, "Bad hint");
    3.98 +    _hint = v;
    3.99 +  }
   3.100 +
   3.101 +  size_t get_better_size();
   3.102 +
   3.103 +  // Accessors for statistics
   3.104 +  void init_statistics(bool split_birth = false);
   3.105 +
   3.106 +  AllocationStats* allocation_stats() {
   3.107 +    assert_proper_lock_protection();
   3.108 +    return &_allocation_stats;
   3.109 +  }
   3.110 +
   3.111 +  ssize_t desired() const {
   3.112 +    return _allocation_stats.desired();
   3.113 +  }
   3.114 +  void set_desired(ssize_t v) {
   3.115 +    assert_proper_lock_protection();
   3.116 +    _allocation_stats.set_desired(v);
   3.117 +  }
   3.118 +  void compute_desired(float inter_sweep_current,
   3.119 +                       float inter_sweep_estimate,
   3.120 +                       float intra_sweep_estimate) {
   3.121 +    assert_proper_lock_protection();
   3.122 +    _allocation_stats.compute_desired(count(),
   3.123 +                                      inter_sweep_current,
   3.124 +                                      inter_sweep_estimate,
   3.125 +                                      intra_sweep_estimate);
   3.126 +  }
   3.127 +  ssize_t coal_desired() const {
   3.128 +    return _allocation_stats.coal_desired();
   3.129 +  }
   3.130 +  void set_coal_desired(ssize_t v) {
   3.131 +    assert_proper_lock_protection();
   3.132 +    _allocation_stats.set_coal_desired(v);
   3.133 +  }
   3.134 +
   3.135 +  ssize_t surplus() const {
   3.136 +    return _allocation_stats.surplus();
   3.137 +  }
   3.138 +  void set_surplus(ssize_t v) {
   3.139 +    assert_proper_lock_protection();
   3.140 +    _allocation_stats.set_surplus(v);
   3.141 +  }
   3.142 +  void increment_surplus() {
   3.143 +    assert_proper_lock_protection();
   3.144 +    _allocation_stats.increment_surplus();
   3.145 +  }
   3.146 +  void decrement_surplus() {
   3.147 +    assert_proper_lock_protection();
   3.148 +    _allocation_stats.decrement_surplus();
   3.149 +  }
   3.150 +
   3.151 +  ssize_t bfr_surp() const {
   3.152 +    return _allocation_stats.bfr_surp();
   3.153 +  }
   3.154 +  void set_bfr_surp(ssize_t v) {
   3.155 +    assert_proper_lock_protection();
   3.156 +    _allocation_stats.set_bfr_surp(v);
   3.157 +  }
   3.158 +  ssize_t prev_sweep() const {
   3.159 +    return _allocation_stats.prev_sweep();
   3.160 +  }
   3.161 +  void set_prev_sweep(ssize_t v) {
   3.162 +    assert_proper_lock_protection();
   3.163 +    _allocation_stats.set_prev_sweep(v);
   3.164 +  }
   3.165 +  ssize_t before_sweep() const {
   3.166 +    return _allocation_stats.before_sweep();
   3.167 +  }
   3.168 +  void set_before_sweep(ssize_t v) {
   3.169 +    assert_proper_lock_protection();
   3.170 +    _allocation_stats.set_before_sweep(v);
   3.171 +  }
   3.172 +
   3.173 +  ssize_t coal_births() const {
   3.174 +    return _allocation_stats.coal_births();
   3.175 +  }
   3.176 +  void set_coal_births(ssize_t v) {
   3.177 +    assert_proper_lock_protection();
   3.178 +    _allocation_stats.set_coal_births(v);
   3.179 +  }
   3.180 +  void increment_coal_births() {
   3.181 +    assert_proper_lock_protection();
   3.182 +    _allocation_stats.increment_coal_births();
   3.183 +  }
   3.184 +
   3.185 +  ssize_t coal_deaths() const {
   3.186 +    return _allocation_stats.coal_deaths();
   3.187 +  }
   3.188 +  void set_coal_deaths(ssize_t v) {
   3.189 +    assert_proper_lock_protection();
   3.190 +    _allocation_stats.set_coal_deaths(v);
   3.191 +  }
   3.192 +  void increment_coal_deaths() {
   3.193 +    assert_proper_lock_protection();
   3.194 +    _allocation_stats.increment_coal_deaths();
   3.195 +  }
   3.196 +
   3.197 +  ssize_t split_births() const {
   3.198 +    return _allocation_stats.split_births();
   3.199 +  }
   3.200 +  void set_split_births(ssize_t v) {
   3.201 +    assert_proper_lock_protection();
   3.202 +    _allocation_stats.set_split_births(v);
   3.203 +  }
   3.204 +  void increment_split_births() {
   3.205 +    assert_proper_lock_protection();
   3.206 +    _allocation_stats.increment_split_births();
   3.207 +  }
   3.208 +
   3.209 +  ssize_t split_deaths() const {
   3.210 +    return _allocation_stats.split_deaths();
   3.211 +  }
   3.212 +  void set_split_deaths(ssize_t v) {
   3.213 +    assert_proper_lock_protection();
   3.214 +    _allocation_stats.set_split_deaths(v);
   3.215 +  }
   3.216 +  void increment_split_deaths() {
   3.217 +    assert_proper_lock_protection();
   3.218 +    _allocation_stats.increment_split_deaths();
   3.219 +  }
   3.220 +
   3.221 +#ifndef PRODUCT
   3.222 +  // For debugging.  The "_returned_bytes" in all the lists are summed
   3.223 +  // and compared with the total number of bytes swept during a
   3.224 +  // collection.
   3.225 +  size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
   3.226 +  void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
   3.227 +  void increment_returned_bytes_by(size_t v) {
   3.228 +    _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
   3.229 +  }
   3.230 +  // Stats verification
   3.231 +  void verify_stats() const;
   3.232 +#endif  // NOT PRODUCT
   3.233 +};
   3.234 +
   3.235 +#endif // SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
     4.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Oct 19 11:26:17 2012 -0700
     4.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Tue Sep 18 23:35:42 2012 -0700
     4.3 @@ -91,7 +91,7 @@
     4.4    _collector(NULL)
     4.5  {
     4.6    assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
     4.7 -    "FreeChunk is larger than expected");
     4.8 +         "FreeChunk is larger than expected");
     4.9    _bt.set_space(this);
    4.10    initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    4.11    // We have all of "mr", all of which we place in the dictionary
    4.12 @@ -101,14 +101,14 @@
    4.13    // implementation, namely, the simple binary tree (splaying
    4.14    // temporarily disabled).
    4.15    switch (dictionaryChoice) {
    4.16 +    case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
    4.17 +      _dictionary = new BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>(mr);
    4.18 +      break;
    4.19      case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
    4.20      case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
    4.21      default:
    4.22        warning("dictionaryChoice: selected option not understood; using"
    4.23                " default BinaryTreeDictionary implementation instead.");
    4.24 -    case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
    4.25 -      _dictionary = new BinaryTreeDictionary<FreeChunk>(mr, use_adaptive_freelists);
    4.26 -      break;
    4.27    }
    4.28    assert(_dictionary != NULL, "CMS dictionary initialization");
    4.29    // The indexed free lists are initially all empty and are lazily
    4.30 @@ -453,7 +453,7 @@
    4.31    reportIndexedFreeListStatistics();
    4.32    gclog_or_tty->print_cr("Layout of Indexed Freelists");
    4.33    gclog_or_tty->print_cr("---------------------------");
    4.34 -  FreeList<FreeChunk>::print_labels_on(st, "size");
    4.35 +  AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
    4.36    for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
    4.37      _indexedFreeList[i].print_on(gclog_or_tty);
    4.38      for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
    4.39 @@ -1319,7 +1319,7 @@
    4.40    size_t currSize = numWords + MinChunkSize;
    4.41    assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
    4.42    for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
    4.43 -    FreeList<FreeChunk>* fl = &_indexedFreeList[i];
    4.44 +    AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
    4.45      if (fl->head()) {
    4.46        ret = getFromListGreater(fl, numWords);
    4.47        assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
    4.48 @@ -1702,7 +1702,9 @@
    4.49    _dictionary->return_chunk(chunk);
    4.50  #ifndef PRODUCT
    4.51    if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
    4.52 -    TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats();
    4.53 +    TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
    4.54 +    TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
    4.55 +    tl->verify_stats();
    4.56    }
    4.57  #endif // PRODUCT
    4.58  }
    4.59 @@ -1745,7 +1747,7 @@
    4.60    {
    4.61      MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
    4.62      ec = dictionary()->find_largest_dict();  // get largest block
    4.63 -    if (ec != NULL && ec->end() == chunk) {
    4.64 +    if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
    4.65        // It's a coterminal block - we can coalesce.
    4.66        size_t old_size = ec->size();
    4.67        coalDeath(old_size);
    4.68 @@ -1850,11 +1852,11 @@
    4.69       the excess is >= MIN_CHUNK. */
    4.70    size_t start = align_object_size(numWords + MinChunkSize);
    4.71    if (start < IndexSetSize) {
    4.72 -    FreeList<FreeChunk>* it   = _indexedFreeList;
    4.73 +    AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
    4.74      size_t    hint = _indexedFreeList[start].hint();
    4.75      while (hint < IndexSetSize) {
    4.76        assert(hint % MinObjAlignment == 0, "hint should be aligned");
    4.77 -      FreeList<FreeChunk> *fl = &_indexedFreeList[hint];
    4.78 +      AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
    4.79        if (fl->surplus() > 0 && fl->head() != NULL) {
    4.80          // Found a list with surplus, reset original hint
    4.81          // and split out a free chunk which is returned.
    4.82 @@ -1873,7 +1875,7 @@
    4.83  }
    4.84  
    4.85  /* Requires fl->size >= numWords + MinChunkSize */
    4.86 -FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl,
    4.87 +FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
    4.88    size_t numWords) {
    4.89    FreeChunk *curr = fl->head();
    4.90    size_t oldNumWords = curr->size();
    4.91 @@ -2155,7 +2157,7 @@
    4.92    assert_locked();
    4.93    size_t i;
    4.94    for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
    4.95 -    FreeList<FreeChunk>* fl = &_indexedFreeList[i];
    4.96 +    AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
    4.97      if (PrintFLSStatistics > 1) {
    4.98        gclog_or_tty->print("size[%d] : ", i);
    4.99      }
   4.100 @@ -2174,7 +2176,7 @@
   4.101    assert_locked();
   4.102    size_t i;
   4.103    for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   4.104 -    FreeList<FreeChunk> *fl = &_indexedFreeList[i];
   4.105 +    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
   4.106      fl->set_surplus(fl->count() -
   4.107                      (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
   4.108    }
   4.109 @@ -2185,7 +2187,7 @@
   4.110    size_t i;
   4.111    size_t h = IndexSetSize;
   4.112    for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   4.113 -    FreeList<FreeChunk> *fl = &_indexedFreeList[i];
   4.114 +    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
   4.115      fl->set_hint(h);
   4.116      if (fl->surplus() > 0) {
   4.117        h = i;
   4.118 @@ -2197,7 +2199,7 @@
   4.119    assert_locked();
   4.120    size_t i;
   4.121    for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   4.122 -    FreeList<FreeChunk> *fl = &_indexedFreeList[i];
   4.123 +    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
   4.124      fl->set_prev_sweep(fl->count());
   4.125      fl->set_coal_births(0);
   4.126      fl->set_coal_deaths(0);
   4.127 @@ -2224,7 +2226,7 @@
   4.128  
   4.129  bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
   4.130    if (size < SmallForDictionary) {
   4.131 -    FreeList<FreeChunk> *fl = &_indexedFreeList[size];
   4.132 +    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
   4.133      return (fl->coal_desired() < 0) ||
   4.134             ((int)fl->count() > fl->coal_desired());
   4.135    } else {
   4.136 @@ -2234,14 +2236,14 @@
   4.137  
   4.138  void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
   4.139    assert(size < SmallForDictionary, "Size too large for indexed list");
   4.140 -  FreeList<FreeChunk> *fl = &_indexedFreeList[size];
   4.141 +  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
   4.142    fl->increment_coal_births();
   4.143    fl->increment_surplus();
   4.144  }
   4.145  
   4.146  void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
   4.147    assert(size < SmallForDictionary, "Size too large for indexed list");
   4.148 -  FreeList<FreeChunk> *fl = &_indexedFreeList[size];
   4.149 +  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
   4.150    fl->increment_coal_deaths();
   4.151    fl->decrement_surplus();
   4.152  }
   4.153 @@ -2250,7 +2252,7 @@
   4.154    if (size  < SmallForDictionary) {
   4.155      smallCoalBirth(size);
   4.156    } else {
   4.157 -    dictionary()->dict_census_udpate(size,
   4.158 +    dictionary()->dict_census_update(size,
   4.159                                     false /* split */,
   4.160                                     true /* birth */);
   4.161    }
   4.162 @@ -2260,7 +2262,7 @@
   4.163    if(size  < SmallForDictionary) {
   4.164      smallCoalDeath(size);
   4.165    } else {
   4.166 -    dictionary()->dict_census_udpate(size,
   4.167 +    dictionary()->dict_census_update(size,
   4.168                                     false /* split */,
   4.169                                     false /* birth */);
   4.170    }
   4.171 @@ -2268,14 +2270,14 @@
   4.172  
   4.173  void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
   4.174    assert(size < SmallForDictionary, "Size too large for indexed list");
   4.175 -  FreeList<FreeChunk> *fl = &_indexedFreeList[size];
   4.176 +  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
   4.177    fl->increment_split_births();
   4.178    fl->increment_surplus();
   4.179  }
   4.180  
   4.181  void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
   4.182    assert(size < SmallForDictionary, "Size too large for indexed list");
   4.183 -  FreeList<FreeChunk> *fl = &_indexedFreeList[size];
   4.184 +  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
   4.185    fl->increment_split_deaths();
   4.186    fl->decrement_surplus();
   4.187  }
   4.188 @@ -2284,7 +2286,7 @@
   4.189    if (size  < SmallForDictionary) {
   4.190      smallSplitBirth(size);
   4.191    } else {
   4.192 -    dictionary()->dict_census_udpate(size,
   4.193 +    dictionary()->dict_census_update(size,
   4.194                                     true /* split */,
   4.195                                     true /* birth */);
   4.196    }
   4.197 @@ -2294,7 +2296,7 @@
   4.198    if (size  < SmallForDictionary) {
   4.199      smallSplitDeath(size);
   4.200    } else {
   4.201 -    dictionary()->dict_census_udpate(size,
   4.202 +    dictionary()->dict_census_update(size,
   4.203                                     true /* split */,
   4.204                                     false /* birth */);
   4.205    }
   4.206 @@ -2517,10 +2519,10 @@
   4.207  
   4.208  #ifndef PRODUCT
   4.209  void CompactibleFreeListSpace::check_free_list_consistency() const {
   4.210 -  assert(_dictionary->min_size() <= IndexSetSize,
   4.211 +  assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
   4.212      "Some sizes can't be allocated without recourse to"
   4.213      " linear allocation buffers");
   4.214 -  assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>),
   4.215 +  assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
   4.216      "else MIN_TREE_CHUNK_SIZE is wrong");
   4.217    assert(IndexSetStart != 0, "IndexSetStart not initialized");
   4.218    assert(IndexSetStride != 0, "IndexSetStride not initialized");
   4.219 @@ -2529,15 +2531,15 @@
   4.220  
   4.221  void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
   4.222    assert_lock_strong(&_freelistLock);
   4.223 -  FreeList<FreeChunk> total;
   4.224 +  AdaptiveFreeList<FreeChunk> total;
   4.225    gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
   4.226 -  FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
   4.227 +  AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
   4.228    size_t total_free = 0;
   4.229    for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   4.230 -    const FreeList<FreeChunk> *fl = &_indexedFreeList[i];
   4.231 +    const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
   4.232      total_free += fl->count() * fl->size();
   4.233      if (i % (40*IndexSetStride) == 0) {
   4.234 -      FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
   4.235 +      AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
   4.236      }
   4.237      fl->print_on(gclog_or_tty);
   4.238      total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
   4.239 @@ -2620,7 +2622,7 @@
   4.240      res = _cfls->getChunkFromDictionaryExact(word_sz);
   4.241      if (res == NULL) return NULL;
   4.242    } else {
   4.243 -    FreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
   4.244 +    AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
   4.245      if (fl->count() == 0) {
   4.246        // Attempt to refill this local free list.
   4.247        get_from_global_pool(word_sz, fl);
   4.248 @@ -2640,7 +2642,7 @@
   4.249  
   4.250  // Get a chunk of blocks of the right size and update related
   4.251  // book-keeping stats
   4.252 -void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl) {
   4.253 +void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
   4.254    // Get the #blocks we want to claim
   4.255    size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
   4.256    assert(n_blks > 0, "Error");
   4.257 @@ -2722,7 +2724,7 @@
   4.258          if (num_retire > 0) {
   4.259            _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
   4.260            // Reset this list.
   4.261 -          _indexedFreeList[i] = FreeList<FreeChunk>();
   4.262 +          _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
   4.263            _indexedFreeList[i].set_size(i);
   4.264          }
   4.265        }
   4.266 @@ -2736,7 +2738,7 @@
   4.267    }
   4.268  }
   4.269  
   4.270 -void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
   4.271 +void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
   4.272    assert(fl->count() == 0, "Precondition.");
   4.273    assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
   4.274           "Precondition");
   4.275 @@ -2752,12 +2754,12 @@
   4.276           (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
   4.277           (CMSSplitIndexedFreeListBlocks || k <= 1);
   4.278           k++, cur_sz = k * word_sz) {
   4.279 -      FreeList<FreeChunk> fl_for_cur_sz;  // Empty.
   4.280 +      AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
   4.281        fl_for_cur_sz.set_size(cur_sz);
   4.282        {
   4.283          MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
   4.284                          Mutex::_no_safepoint_check_flag);
   4.285 -        FreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
   4.286 +        AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
   4.287          if (gfl->count() != 0) {
   4.288            // nn is the number of chunks of size cur_sz that
   4.289            // we'd need to split k-ways each, in order to create
   4.290 @@ -2832,12 +2834,11 @@
   4.291      MutexLockerEx x(parDictionaryAllocLock(),
   4.292                      Mutex::_no_safepoint_check_flag);
   4.293      while (n > 0) {
   4.294 -      fc = dictionary()->get_chunk(MAX2(n * word_sz,
   4.295 -                                  _dictionary->min_size()),
   4.296 +      fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
   4.297                                    FreeBlockDictionary<FreeChunk>::atLeast);
   4.298        if (fc != NULL) {
   4.299          _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
   4.300 -        dictionary()->dict_census_udpate(fc->size(),
   4.301 +        dictionary()->dict_census_update(fc->size(),
   4.302                                         true /*split*/,
   4.303                                         false /*birth*/);
   4.304          break;
   4.305 @@ -2890,7 +2891,7 @@
   4.306        fc->set_size(prefix_size);
   4.307        if (rem >= IndexSetSize) {
   4.308          returnChunkToDictionary(rem_fc);
   4.309 -        dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/);
   4.310 +        dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
   4.311          rem_fc = NULL;
   4.312        }
   4.313        // Otherwise, return it to the small list below.
     5.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Oct 19 11:26:17 2012 -0700
     5.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Tue Sep 18 23:35:42 2012 -0700
     5.3 @@ -25,6 +25,7 @@
     5.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
     5.5  #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
     5.6  
     5.7 +#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
     5.8  #include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
     5.9  #include "memory/binaryTreeDictionary.hpp"
    5.10  #include "memory/blockOffsetTable.inline.hpp"
    5.11 @@ -38,6 +39,7 @@
    5.12  class CompactibleFreeListSpace;
    5.13  class BlkClosure;
    5.14  class BlkClosureCareful;
    5.15 +class FreeChunk;
    5.16  class UpwardsObjectClosure;
    5.17  class ObjectClosureCareful;
    5.18  class Klass;
    5.19 @@ -131,7 +133,7 @@
    5.20    FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
    5.21    FreeBlockDictionary<FreeChunk>* _dictionary;    // ptr to dictionary for large size blocks
    5.22  
    5.23 -  FreeList<FreeChunk> _indexedFreeList[IndexSetSize];
    5.24 +  AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
    5.25                                         // indexed array for small size blocks
    5.26    // allocation stategy
    5.27    bool       _fitStrategy;      // Use best fit strategy.
    5.28 @@ -168,7 +170,7 @@
    5.29    // If the count of "fl" is negative, it's absolute value indicates a
    5.30    // number of free chunks that had been previously "borrowed" from global
    5.31    // list of size "word_sz", and must now be decremented.
    5.32 -  void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl);
    5.33 +  void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
    5.34  
    5.35    // Allocation helper functions
    5.36    // Allocate using a strategy that takes from the indexed free lists
    5.37 @@ -214,7 +216,7 @@
    5.38    // and return it.  The split off remainder is returned to
    5.39    // the free lists.  The old name for getFromListGreater
    5.40    // was lookInListGreater.
    5.41 -  FreeChunk* getFromListGreater(FreeList<FreeChunk>* fl, size_t numWords);
    5.42 +  FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
    5.43    // Get a chunk in the indexed free list or dictionary,
    5.44    // by considering a larger chunk and splitting it.
    5.45    FreeChunk* getChunkFromGreater(size_t numWords);
    5.46 @@ -621,7 +623,7 @@
    5.47    CompactibleFreeListSpace* _cfls;
    5.48  
    5.49    // Our local free lists.
    5.50 -  FreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
    5.51 +  AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
    5.52  
    5.53    // Initialized from a command-line arg.
    5.54  
    5.55 @@ -634,7 +636,7 @@
    5.56    size_t        _num_blocks        [CompactibleFreeListSpace::IndexSetSize];
    5.57  
    5.58    // Internal work method
    5.59 -  void get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl);
    5.60 +  void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
    5.61  
    5.62  public:
    5.63    CFLS_LAB(CompactibleFreeListSpace* cfls);
     6.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Oct 19 11:26:17 2012 -0700
     6.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Sep 18 23:35:42 2012 -0700
     6.3 @@ -9143,7 +9143,7 @@
     6.4      size_t shrinkable_size_in_bytes = chunk_at_end->size();
     6.5      size_t aligned_shrinkable_size_in_bytes =
     6.6        align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
     6.7 -    assert(unallocated_start <= chunk_at_end->end(),
     6.8 +    assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
     6.9        "Inconsistent chunk at end of space");
    6.10      size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
    6.11      size_t word_size_before = heap_word_size(_virtual_space.committed_size());
    6.12 @@ -9210,7 +9210,7 @@
    6.13  
    6.14      assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
    6.15        "Inconsistency at end of space");
    6.16 -    assert(chunk_at_end->end() == _cmsSpace->end(),
    6.17 +    assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
    6.18        "Shrinking is inconsistent");
    6.19      return;
    6.20    }
     7.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Fri Oct 19 11:26:17 2012 -0700
     7.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Tue Sep 18 23:35:42 2012 -0700
     7.3 @@ -133,7 +133,7 @@
     7.4    }
     7.5  
     7.6    // Return the address past the end of this chunk
     7.7 -  HeapWord* end() const { return ((HeapWord*) this) + size(); }
     7.8 +  uintptr_t* end() const { return ((uintptr_t*) this) + size(); }
     7.9  
    7.10    // debugging
    7.11    void verify()             const PRODUCT_RETURN;
     8.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	Fri Oct 19 11:26:17 2012 -0700
     8.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	Tue Sep 18 23:35:42 2012 -0700
     8.3 @@ -25,6 +25,8 @@
     8.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
     8.5  #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
     8.6  
     8.7 +typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList> AFLBinaryTreeDictionary;
     8.8 +
     8.9  #define VM_STRUCTS_CMS(nonstatic_field, \
    8.10                     volatile_nonstatic_field, \
    8.11                     static_field) \
    8.12 @@ -38,14 +40,8 @@
    8.13    nonstatic_field(CMSCollector,                _markBitMap,                                   CMSBitMap)                             \
    8.14    nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace,                                   CompactibleFreeListSpace*)             \
    8.15       static_field(ConcurrentMarkSweepThread,   _collector,                                    CMSCollector*)                         \
    8.16 -  volatile_nonstatic_field(FreeChunk,          _size,                                         size_t)                                \
    8.17 -  nonstatic_field(FreeChunk,                   _next,                                         FreeChunk*)                            \
    8.18 -  nonstatic_field(FreeChunk,                   _prev,                                         FreeChunk*)                            \
    8.19    nonstatic_field(LinearAllocBlock,            _word_size,                                    size_t)                                \
    8.20 -  nonstatic_field(FreeList<FreeChunk>,         _size,                                         size_t)                                \
    8.21 -  nonstatic_field(FreeList<FreeChunk>,         _count,                                        ssize_t)                               \
    8.22 -  nonstatic_field(BinaryTreeDictionary<FreeChunk>,_total_size,                                 size_t)                                \
    8.23 -  nonstatic_field(CompactibleFreeListSpace,    _dictionary,                                   FreeBlockDictionary<FreeChunk>*)       \
    8.24 +  nonstatic_field(AFLBinaryTreeDictionary,     _total_size,                                   size_t)                                \
    8.25    nonstatic_field(CompactibleFreeListSpace,    _indexedFreeList[0],                           FreeList<FreeChunk>)                   \
    8.26    nonstatic_field(CompactibleFreeListSpace,    _smallLinearAllocBlock,                        LinearAllocBlock)
    8.27  
    8.28 @@ -60,19 +56,17 @@
    8.29    declare_toplevel_type(CMSCollector)                                     \
    8.30    declare_toplevel_type(CMSBitMap)                                        \
    8.31    declare_toplevel_type(FreeChunk)                                        \
    8.32 +  declare_toplevel_type(Metablock)                                        \
    8.33    declare_toplevel_type(ConcurrentMarkSweepThread*)                       \
    8.34    declare_toplevel_type(ConcurrentMarkSweepGeneration*)                   \
    8.35    declare_toplevel_type(SurrogateLockerThread*)                           \
    8.36    declare_toplevel_type(CompactibleFreeListSpace*)                        \
    8.37    declare_toplevel_type(CMSCollector*)                                    \
    8.38 -  declare_toplevel_type(FreeChunk*)                                       \
    8.39 -  declare_toplevel_type(BinaryTreeDictionary<FreeChunk>*)                 \
    8.40 -  declare_toplevel_type(FreeBlockDictionary<FreeChunk>*)                  \
    8.41 -  declare_toplevel_type(FreeList<FreeChunk>*)                             \
    8.42 -  declare_toplevel_type(FreeList<FreeChunk>)                              \
    8.43 +  declare_toplevel_type(AFLBinaryTreeDictionary*)                         \
    8.44    declare_toplevel_type(LinearAllocBlock)                                 \
    8.45    declare_toplevel_type(FreeBlockDictionary<FreeChunk>)                   \
    8.46 -            declare_type(BinaryTreeDictionary<FreeChunk>, FreeBlockDictionary<FreeChunk>)
    8.47 +  declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>)   \
    8.48 +            declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>) \
    8.49  
    8.50  #define VM_INT_CONSTANTS_CMS(declare_constant)                            \
    8.51    declare_constant(Generation::ConcurrentMarkSweep)                       \
     9.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Fri Oct 19 11:26:17 2012 -0700
     9.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Tue Sep 18 23:35:42 2012 -0700
     9.3 @@ -191,7 +191,7 @@
     9.4  class VM_CollectForMetadataAllocation: public VM_GC_Operation {
     9.5   private:
     9.6    MetaWord*                _result;
     9.7 -  size_t      _size;                       // size of object to be allocated
     9.8 +  size_t                   _size;     // size of object to be allocated
     9.9    Metaspace::MetadataType  _mdtype;
    9.10    ClassLoaderData*         _loader_data;
    9.11   public:
    10.1 --- a/src/share/vm/memory/binaryTreeDictionary.cpp	Fri Oct 19 11:26:17 2012 -0700
    10.2 +++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Tue Sep 18 23:35:42 2012 -0700
    10.3 @@ -25,9 +25,15 @@
    10.4  #include "precompiled.hpp"
    10.5  #include "gc_implementation/shared/allocationStats.hpp"
    10.6  #include "memory/binaryTreeDictionary.hpp"
    10.7 +#include "memory/freeList.hpp"
    10.8 +#include "memory/freeBlockDictionary.hpp"
    10.9 +#include "memory/metablock.hpp"
   10.10 +#include "memory/metachunk.hpp"
   10.11  #include "runtime/globals.hpp"
   10.12  #include "utilities/ostream.hpp"
   10.13  #ifndef SERIALGC
   10.14 +#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
   10.15 +#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
   10.16  #include "gc_implementation/shared/spaceDecorator.hpp"
   10.17  #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
   10.18  #endif // SERIALGC
   10.19 @@ -37,15 +43,18 @@
   10.20  // This is currently used in the Concurrent Mark&Sweep implementation.
   10.21  ////////////////////////////////////////////////////////////////////////////////
   10.22  
   10.23 -template <class Chunk>
   10.24 -TreeChunk<Chunk>* TreeChunk<Chunk>::as_TreeChunk(Chunk* fc) {
   10.25 +template <class Chunk_t, template <class> class FreeList_t>
   10.26 +size_t TreeChunk<Chunk_t, FreeList_t>::_min_tree_chunk_size = sizeof(TreeChunk<Chunk_t,  FreeList_t>)/HeapWordSize;
   10.27 +
   10.28 +template <class Chunk_t, template <class> class FreeList_t>
   10.29 +TreeChunk<Chunk_t, FreeList_t>* TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(Chunk_t* fc) {
   10.30    // Do some assertion checking here.
   10.31 -  return (TreeChunk<Chunk>*) fc;
   10.32 +  return (TreeChunk<Chunk_t, FreeList_t>*) fc;
   10.33  }
   10.34  
   10.35 -template <class Chunk>
   10.36 -void TreeChunk<Chunk>::verify_tree_chunk_list() const {
   10.37 -  TreeChunk<Chunk>* nextTC = (TreeChunk<Chunk>*)next();
   10.38 +template <class Chunk_t, template <class> class FreeList_t>
   10.39 +void TreeChunk<Chunk_t, FreeList_t>::verify_tree_chunk_list() const {
   10.40 +  TreeChunk<Chunk_t, FreeList_t>* nextTC = (TreeChunk<Chunk_t, FreeList_t>*)next();
   10.41    if (prev() != NULL) { // interior list node shouldn'r have tree fields
   10.42      guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL &&
   10.43                embedded_list()->right()  == NULL, "should be clear");
   10.44 @@ -57,53 +66,113 @@
   10.45    }
   10.46  }
   10.47  
   10.48 +template <class Chunk_t, template <class> class FreeList_t>
   10.49 +TreeList<Chunk_t, FreeList_t>::TreeList() {}
   10.50  
   10.51 -template <class Chunk>
   10.52 -TreeList<Chunk>* TreeList<Chunk>::as_TreeList(TreeChunk<Chunk>* tc) {
   10.53 +template <class Chunk_t, template <class> class FreeList_t>
   10.54 +TreeList<Chunk_t, FreeList_t>*
   10.55 +TreeList<Chunk_t, FreeList_t>::as_TreeList(TreeChunk<Chunk_t,FreeList_t>* tc) {
   10.56    // This first free chunk in the list will be the tree list.
   10.57 -  assert(tc->size() >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "Chunk is too small for a TreeChunk");
   10.58 -  TreeList<Chunk>* tl = tc->embedded_list();
   10.59 +  assert((tc->size() >= (TreeChunk<Chunk_t, FreeList_t>::min_size())),
   10.60 +    "Chunk is too small for a TreeChunk");
   10.61 +  TreeList<Chunk_t, FreeList_t>* tl = tc->embedded_list();
   10.62 +  tl->initialize();
   10.63    tc->set_list(tl);
   10.64 -#ifdef ASSERT
   10.65 -  tl->set_protecting_lock(NULL);
   10.66 -#endif
   10.67 -  tl->set_hint(0);
   10.68    tl->set_size(tc->size());
   10.69    tl->link_head(tc);
   10.70    tl->link_tail(tc);
   10.71    tl->set_count(1);
   10.72 -  tl->init_statistics(true /* split_birth */);
   10.73 -  tl->set_parent(NULL);
   10.74 -  tl->set_left(NULL);
   10.75 -  tl->set_right(NULL);
   10.76 +
   10.77    return tl;
   10.78  }
   10.79  
   10.80 -template <class Chunk>
   10.81 -TreeList<Chunk>* TreeList<Chunk>::as_TreeList(HeapWord* addr, size_t size) {
   10.82 -  TreeChunk<Chunk>* tc = (TreeChunk<Chunk>*) addr;
   10.83 -  assert(size >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "Chunk is too small for a TreeChunk");
   10.84 -  // The space in the heap will have been mangled initially but
   10.85 -  // is not remangled when a free chunk is returned to the free list
   10.86 +
   10.87 +template <class Chunk_t, template <class> class FreeList_t>
   10.88 +TreeList<Chunk_t, FreeList_t>*
   10.89 +get_chunk(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither) {
   10.90 +  FreeBlockDictionary<Chunk_t>::verify_par_locked();
   10.91 +  Chunk_t* res = get_chunk_from_tree(size, dither);
   10.92 +  assert(res == NULL || res->is_free(),
   10.93 +         "Should be returning a free chunk");
   10.94 +  assert(dither != FreeBlockDictionary<Chunk_t>::exactly ||
   10.95 +         res->size() == size, "Not correct size");
   10.96 +  return res;
   10.97 +}
   10.98 +
   10.99 +template <class Chunk_t, template <class> class FreeList_t>
  10.100 +TreeList<Chunk_t, FreeList_t>*
  10.101 +TreeList<Chunk_t, FreeList_t>::as_TreeList(HeapWord* addr, size_t size) {
  10.102 +  TreeChunk<Chunk_t, FreeList_t>* tc = (TreeChunk<Chunk_t, FreeList_t>*) addr;
  10.103 +  assert((size >= TreeChunk<Chunk_t, FreeList_t>::min_size()),
  10.104 +    "Chunk is too small for a TreeChunk");
  10.105 +  // The space will have been mangled initially but
  10.106 +  // is not remangled when a Chunk_t is returned to the free list
  10.107    // (since it is used to maintain the chunk on the free list).
  10.108 -  assert((ZapUnusedHeapArea &&
  10.109 -          SpaceMangler::is_mangled((HeapWord*) tc->size_addr()) &&
  10.110 -          SpaceMangler::is_mangled((HeapWord*) tc->prev_addr()) &&
  10.111 -          SpaceMangler::is_mangled((HeapWord*) tc->next_addr())) ||
  10.112 -          (tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL),
  10.113 -    "Space should be clear or mangled");
  10.114 +  tc->assert_is_mangled();
  10.115    tc->set_size(size);
  10.116    tc->link_prev(NULL);
  10.117    tc->link_next(NULL);
  10.118 -  TreeList<Chunk>* tl = TreeList<Chunk>::as_TreeList(tc);
  10.119 +  TreeList<Chunk_t, FreeList_t>* tl = TreeList<Chunk_t, FreeList_t>::as_TreeList(tc);
  10.120    return tl;
  10.121  }
  10.122  
  10.123 -template <class Chunk>
  10.124 -TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk>* tc) {
  10.125  
  10.126 -  TreeList<Chunk>* retTL = this;
  10.127 -  Chunk* list = head();
  10.128 +#ifndef SERIALGC
  10.129 +// Specialize for AdaptiveFreeList which tries to avoid
  10.130 +// splitting a chunk of a size that is under populated in favor of
  10.131 +// an over populated size.  The general get_better_list() just returns
  10.132 +// the current list.
  10.133 +template <>
  10.134 +TreeList<FreeChunk, AdaptiveFreeList>*
  10.135 +TreeList<FreeChunk, AdaptiveFreeList>::get_better_list(
  10.136 +  BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList>* dictionary) {
  10.137 +  // A candidate chunk has been found.  If it is already under
  10.138 +  // populated, get a chunk associated with the hint for this
  10.139 +  // chunk.
  10.140 +
  10.141 +  TreeList<FreeChunk, ::AdaptiveFreeList>* curTL = this;
  10.142 +  if (surplus() <= 0) {
  10.143 +    /* Use the hint to find a size with a surplus, and reset the hint. */
  10.144 +    TreeList<FreeChunk, ::AdaptiveFreeList>* hintTL = this;
  10.145 +    while (hintTL->hint() != 0) {
  10.146 +      assert(hintTL->hint() > hintTL->size(),
  10.147 +        "hint points in the wrong direction");
  10.148 +      hintTL = dictionary->find_list(hintTL->hint());
  10.149 +      assert(curTL != hintTL, "Infinite loop");
  10.150 +      if (hintTL == NULL ||
  10.151 +          hintTL == curTL /* Should not happen but protect against it */ ) {
  10.152 +        // No useful hint.  Set the hint to NULL and go on.
  10.153 +        curTL->set_hint(0);
  10.154 +        break;
  10.155 +      }
  10.156 +      assert(hintTL->size() > curTL->size(), "hint is inconsistent");
  10.157 +      if (hintTL->surplus() > 0) {
  10.158 +        // The hint led to a list that has a surplus.  Use it.
  10.159 +        // Set the hint for the candidate to an overpopulated
  10.160 +        // size.
  10.161 +        curTL->set_hint(hintTL->size());
  10.162 +        // Change the candidate.
  10.163 +        curTL = hintTL;
  10.164 +        break;
  10.165 +      }
  10.166 +    }
  10.167 +  }
  10.168 +  return curTL;
  10.169 +}
  10.170 +#endif // SERIALGC
  10.171 +
  10.172 +template <class Chunk_t, template <class> class FreeList_t>
  10.173 +TreeList<Chunk_t, FreeList_t>*
  10.174 +TreeList<Chunk_t, FreeList_t>::get_better_list(
  10.175 +  BinaryTreeDictionary<Chunk_t, FreeList_t>* dictionary) {
  10.176 +  return this;
  10.177 +}
  10.178 +
  10.179 +template <class Chunk_t, template <class> class FreeList_t>
  10.180 +TreeList<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc) {
  10.181 +
  10.182 +  TreeList<Chunk_t, FreeList_t>* retTL = this;
  10.183 +  Chunk_t* list = head();
  10.184    assert(!list || list != list->next(), "Chunk on list twice");
  10.185    assert(tc != NULL, "Chunk being removed is NULL");
  10.186    assert(parent() == NULL || this == parent()->left() ||
  10.187 @@ -112,13 +181,13 @@
  10.188    assert(head() == NULL || head()->prev() == NULL, "list invariant");
  10.189    assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  10.190  
  10.191 -  Chunk* prevFC = tc->prev();
  10.192 -  TreeChunk<Chunk>* nextTC = TreeChunk<Chunk>::as_TreeChunk(tc->next());
  10.193 +  Chunk_t* prevFC = tc->prev();
  10.194 +  TreeChunk<Chunk_t, FreeList_t>* nextTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(tc->next());
  10.195    assert(list != NULL, "should have at least the target chunk");
  10.196  
  10.197    // Is this the first item on the list?
  10.198    if (tc == list) {
  10.199 -    // The "getChunk..." functions for a TreeList<Chunk> will not return the
  10.200 +    // The "getChunk..." functions for a TreeList<Chunk_t, FreeList_t> will not return the
  10.201      // first chunk in the list unless it is the last chunk in the list
  10.202      // because the first chunk is also acting as the tree node.
  10.203      // When coalescing happens, however, the first chunk in the a tree
  10.204 @@ -127,8 +196,8 @@
  10.205      // allocated when the sweeper yields (giving up the free list lock)
  10.206      // to allow mutator activity.  If this chunk is the first in the
  10.207      // list and is not the last in the list, do the work to copy the
  10.208 -    // TreeList<Chunk> from the first chunk to the next chunk and update all
  10.209 -    // the TreeList<Chunk> pointers in the chunks in the list.
  10.210 +    // TreeList<Chunk_t, FreeList_t> from the first chunk to the next chunk and update all
  10.211 +    // the TreeList<Chunk_t, FreeList_t> pointers in the chunks in the list.
  10.212      if (nextTC == NULL) {
  10.213        assert(prevFC == NULL, "Not last chunk in the list");
  10.214        set_tail(NULL);
  10.215 @@ -141,11 +210,11 @@
  10.216        // This can be slow for a long list.  Consider having
  10.217        // an option that does not allow the first chunk on the
  10.218        // list to be coalesced.
  10.219 -      for (TreeChunk<Chunk>* curTC = nextTC; curTC != NULL;
  10.220 -          curTC = TreeChunk<Chunk>::as_TreeChunk(curTC->next())) {
  10.221 +      for (TreeChunk<Chunk_t, FreeList_t>* curTC = nextTC; curTC != NULL;
  10.222 +          curTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(curTC->next())) {
  10.223          curTC->set_list(retTL);
  10.224        }
  10.225 -      // Fix the parent to point to the new TreeList<Chunk>.
  10.226 +      // Fix the parent to point to the new TreeList<Chunk_t, FreeList_t>.
  10.227        if (retTL->parent() != NULL) {
  10.228          if (this == retTL->parent()->left()) {
  10.229            retTL->parent()->set_left(retTL);
  10.230 @@ -176,9 +245,9 @@
  10.231      prevFC->link_after(nextTC);
  10.232    }
  10.233  
  10.234 -  // Below this point the embeded TreeList<Chunk> being used for the
  10.235 +  // Below this point the embeded TreeList<Chunk_t, FreeList_t> being used for the
  10.236    // tree node may have changed. Don't use "this"
  10.237 -  // TreeList<Chunk>*.
  10.238 +  // TreeList<Chunk_t, FreeList_t>*.
  10.239    // chunk should still be a free chunk (bit set in _prev)
  10.240    assert(!retTL->head() || retTL->size() == retTL->head()->size(),
  10.241      "Wrong sized chunk in list");
  10.242 @@ -188,7 +257,7 @@
  10.243      tc->set_list(NULL);
  10.244      bool prev_found = false;
  10.245      bool next_found = false;
  10.246 -    for (Chunk* curFC = retTL->head();
  10.247 +    for (Chunk_t* curFC = retTL->head();
  10.248           curFC != NULL; curFC = curFC->next()) {
  10.249        assert(curFC != tc, "Chunk is still in list");
  10.250        if (curFC == prevFC) {
  10.251 @@ -215,8 +284,8 @@
  10.252    return retTL;
  10.253  }
  10.254  
  10.255 -template <class Chunk>
  10.256 -void TreeList<Chunk>::return_chunk_at_tail(TreeChunk<Chunk>* chunk) {
  10.257 +template <class Chunk_t, template <class> class FreeList_t>
  10.258 +void TreeList<Chunk_t, FreeList_t>::return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* chunk) {
  10.259    assert(chunk != NULL, "returning NULL chunk");
  10.260    assert(chunk->list() == this, "list should be set for chunk");
  10.261    assert(tail() != NULL, "The tree list is embedded in the first chunk");
  10.262 @@ -225,12 +294,12 @@
  10.263    assert(head() == NULL || head()->prev() == NULL, "list invariant");
  10.264    assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  10.265  
  10.266 -  Chunk* fc = tail();
  10.267 +  Chunk_t* fc = tail();
  10.268    fc->link_after(chunk);
  10.269    link_tail(chunk);
  10.270  
  10.271    assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
  10.272 -  increment_count();
  10.273 +  FreeList_t<Chunk_t>::increment_count();
  10.274    debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
  10.275    assert(head() == NULL || head()->prev() == NULL, "list invariant");
  10.276    assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  10.277 @@ -238,10 +307,10 @@
  10.278  
  10.279  // Add this chunk at the head of the list.  "At the head of the list"
  10.280  // is defined to be after the chunk pointer to by head().  This is
  10.281 -// because the TreeList<Chunk> is embedded in the first TreeChunk<Chunk> in the
  10.282 -// list.  See the definition of TreeChunk<Chunk>.
  10.283 -template <class Chunk>
  10.284 -void TreeList<Chunk>::return_chunk_at_head(TreeChunk<Chunk>* chunk) {
  10.285 +// because the TreeList<Chunk_t, FreeList_t> is embedded in the first TreeChunk<Chunk_t, FreeList_t> in the
  10.286 +// list.  See the definition of TreeChunk<Chunk_t, FreeList_t>.
  10.287 +template <class Chunk_t, template <class> class FreeList_t>
  10.288 +void TreeList<Chunk_t, FreeList_t>::return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* chunk) {
  10.289    assert(chunk->list() == this, "list should be set for chunk");
  10.290    assert(head() != NULL, "The tree list is embedded in the first chunk");
  10.291    assert(chunk != NULL, "returning NULL chunk");
  10.292 @@ -249,7 +318,7 @@
  10.293    assert(head() == NULL || head()->prev() == NULL, "list invariant");
  10.294    assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  10.295  
  10.296 -  Chunk* fc = head()->next();
  10.297 +  Chunk_t* fc = head()->next();
  10.298    if (fc != NULL) {
  10.299      chunk->link_after(fc);
  10.300    } else {
  10.301 @@ -258,28 +327,38 @@
  10.302    }
  10.303    head()->link_after(chunk);
  10.304    assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
  10.305 -  increment_count();
  10.306 +  FreeList_t<Chunk_t>::increment_count();
  10.307    debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
  10.308    assert(head() == NULL || head()->prev() == NULL, "list invariant");
  10.309    assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  10.310  }
  10.311  
  10.312 -template <class Chunk>
  10.313 -TreeChunk<Chunk>* TreeList<Chunk>::head_as_TreeChunk() {
  10.314 -  assert(head() == NULL || TreeChunk<Chunk>::as_TreeChunk(head())->list() == this,
  10.315 -    "Wrong type of chunk?");
  10.316 -  return TreeChunk<Chunk>::as_TreeChunk(head());
  10.317 +template <class Chunk_t, template <class> class FreeList_t>
  10.318 +void TreeChunk<Chunk_t, FreeList_t>::assert_is_mangled() const {
  10.319 +  assert((ZapUnusedHeapArea &&
  10.320 +          SpaceMangler::is_mangled((HeapWord*) Chunk_t::size_addr()) &&
  10.321 +          SpaceMangler::is_mangled((HeapWord*) Chunk_t::prev_addr()) &&
  10.322 +          SpaceMangler::is_mangled((HeapWord*) Chunk_t::next_addr())) ||
  10.323 +          (size() == 0 && prev() == NULL && next() == NULL),
  10.324 +    "Space should be clear or mangled");
  10.325  }
  10.326  
  10.327 -template <class Chunk>
  10.328 -TreeChunk<Chunk>* TreeList<Chunk>::first_available() {
  10.329 +template <class Chunk_t, template <class> class FreeList_t>
  10.330 +TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::head_as_TreeChunk() {
  10.331 +  assert(head() == NULL || (TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head())->list() == this),
  10.332 +    "Wrong type of chunk?");
  10.333 +  return TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head());
  10.334 +}
  10.335 +
  10.336 +template <class Chunk_t, template <class> class FreeList_t>
  10.337 +TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::first_available() {
  10.338    assert(head() != NULL, "The head of the list cannot be NULL");
  10.339 -  Chunk* fc = head()->next();
  10.340 -  TreeChunk<Chunk>* retTC;
  10.341 +  Chunk_t* fc = head()->next();
  10.342 +  TreeChunk<Chunk_t, FreeList_t>* retTC;
  10.343    if (fc == NULL) {
  10.344      retTC = head_as_TreeChunk();
  10.345    } else {
  10.346 -    retTC = TreeChunk<Chunk>::as_TreeChunk(fc);
  10.347 +    retTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(fc);
  10.348    }
  10.349    assert(retTC->list() == this, "Wrong type of chunk.");
  10.350    return retTC;
  10.351 @@ -288,41 +367,32 @@
  10.352  // Returns the block with the largest heap address amongst
  10.353  // those in the list for this size; potentially slow and expensive,
  10.354  // use with caution!
  10.355 -template <class Chunk>
  10.356 -TreeChunk<Chunk>* TreeList<Chunk>::largest_address() {
  10.357 +template <class Chunk_t, template <class> class FreeList_t>
  10.358 +TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::largest_address() {
  10.359    assert(head() != NULL, "The head of the list cannot be NULL");
  10.360 -  Chunk* fc = head()->next();
  10.361 -  TreeChunk<Chunk>* retTC;
  10.362 +  Chunk_t* fc = head()->next();
  10.363 +  TreeChunk<Chunk_t, FreeList_t>* retTC;
  10.364    if (fc == NULL) {
  10.365      retTC = head_as_TreeChunk();
  10.366    } else {
  10.367      // walk down the list and return the one with the highest
  10.368      // heap address among chunks of this size.
  10.369 -    Chunk* last = fc;
  10.370 +    Chunk_t* last = fc;
  10.371      while (fc->next() != NULL) {
  10.372        if ((HeapWord*)last < (HeapWord*)fc) {
  10.373          last = fc;
  10.374        }
  10.375        fc = fc->next();
  10.376      }
  10.377 -    retTC = TreeChunk<Chunk>::as_TreeChunk(last);
  10.378 +    retTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(last);
  10.379    }
  10.380    assert(retTC->list() == this, "Wrong type of chunk.");
  10.381    return retTC;
  10.382  }
  10.383  
  10.384 -template <class Chunk>
  10.385 -BinaryTreeDictionary<Chunk>::BinaryTreeDictionary(bool adaptive_freelists, bool splay) :
  10.386 -  _splay(splay), _adaptive_freelists(adaptive_freelists),
  10.387 -  _total_size(0), _total_free_blocks(0), _root(0) {}
  10.388 -
  10.389 -template <class Chunk>
  10.390 -BinaryTreeDictionary<Chunk>::BinaryTreeDictionary(MemRegion mr,
  10.391 -                                           bool adaptive_freelists,
  10.392 -                                           bool splay):
  10.393 -  _adaptive_freelists(adaptive_freelists), _splay(splay)
  10.394 -{
  10.395 -  assert(mr.word_size() >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "minimum chunk size");
  10.396 +template <class Chunk_t, template <class> class FreeList_t>
  10.397 +BinaryTreeDictionary<Chunk_t, FreeList_t>::BinaryTreeDictionary(MemRegion mr) {
  10.398 +  assert((mr.byte_size() > min_size()), "minimum chunk size");
  10.399  
  10.400    reset(mr);
  10.401    assert(root()->left() == NULL, "reset check failed");
  10.402 @@ -333,52 +403,48 @@
  10.403    assert(total_free_blocks() == 1, "reset check failed");
  10.404  }
  10.405  
  10.406 -template <class Chunk>
  10.407 -void BinaryTreeDictionary<Chunk>::inc_total_size(size_t inc) {
  10.408 +template <class Chunk_t, template <class> class FreeList_t>
  10.409 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::inc_total_size(size_t inc) {
  10.410    _total_size = _total_size + inc;
  10.411  }
  10.412  
  10.413 -template <class Chunk>
  10.414 -void BinaryTreeDictionary<Chunk>::dec_total_size(size_t dec) {
  10.415 +template <class Chunk_t, template <class> class FreeList_t>
  10.416 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::dec_total_size(size_t dec) {
  10.417    _total_size = _total_size - dec;
  10.418  }
  10.419  
  10.420 -template <class Chunk>
  10.421 -void BinaryTreeDictionary<Chunk>::reset(MemRegion mr) {
  10.422 -  assert(mr.word_size() >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "minimum chunk size");
  10.423 -  set_root(TreeList<Chunk>::as_TreeList(mr.start(), mr.word_size()));
  10.424 +template <class Chunk_t, template <class> class FreeList_t>
  10.425 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(MemRegion mr) {
  10.426 +  assert((mr.byte_size() > min_size()), "minimum chunk size");
  10.427 +  set_root(TreeList<Chunk_t, FreeList_t>::as_TreeList(mr.start(), mr.word_size()));
  10.428    set_total_size(mr.word_size());
  10.429    set_total_free_blocks(1);
  10.430  }
  10.431  
  10.432 -template <class Chunk>
  10.433 -void BinaryTreeDictionary<Chunk>::reset(HeapWord* addr, size_t byte_size) {
  10.434 +template <class Chunk_t, template <class> class FreeList_t>
  10.435 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(HeapWord* addr, size_t byte_size) {
  10.436    MemRegion mr(addr, heap_word_size(byte_size));
  10.437    reset(mr);
  10.438  }
  10.439  
  10.440 -template <class Chunk>
  10.441 -void BinaryTreeDictionary<Chunk>::reset() {
  10.442 +template <class Chunk_t, template <class> class FreeList_t>
  10.443 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset() {
  10.444    set_root(NULL);
  10.445    set_total_size(0);
  10.446    set_total_free_blocks(0);
  10.447  }
  10.448  
  10.449  // Get a free block of size at least size from tree, or NULL.
  10.450 -// If a splay step is requested, the removal algorithm (only) incorporates
  10.451 -// a splay step as follows:
  10.452 -// . the search proceeds down the tree looking for a possible
  10.453 -//   match. At the (closest) matching location, an appropriate splay step is applied
  10.454 -//   (zig, zig-zig or zig-zag). A chunk of the appropriate size is then returned
  10.455 -//   if available, and if it's the last chunk, the node is deleted. A deteleted
  10.456 -//   node is replaced in place by its tree successor.
  10.457 -template <class Chunk>
  10.458 -TreeChunk<Chunk>*
  10.459 -BinaryTreeDictionary<Chunk>::get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay)
  10.460 +template <class Chunk_t, template <class> class FreeList_t>
  10.461 +TreeChunk<Chunk_t, FreeList_t>*
  10.462 +BinaryTreeDictionary<Chunk_t, FreeList_t>::get_chunk_from_tree(
  10.463 +                              size_t size,
  10.464 +                              enum FreeBlockDictionary<Chunk_t>::Dither dither)
  10.465  {
  10.466 -  TreeList<Chunk> *curTL, *prevTL;
  10.467 -  TreeChunk<Chunk>* retTC = NULL;
  10.468 -  assert(size >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "minimum chunk size");
  10.469 +  TreeList<Chunk_t, FreeList_t> *curTL, *prevTL;
  10.470 +  TreeChunk<Chunk_t, FreeList_t>* retTC = NULL;
  10.471 +
  10.472 +  assert((size >= min_size()), "minimum chunk size");
  10.473    if (FLSVerifyDictionary) {
  10.474      verify_tree();
  10.475    }
  10.476 @@ -398,7 +464,7 @@
  10.477    }
  10.478    if (curTL == NULL) { // couldn't find exact match
  10.479  
  10.480 -    if (dither == FreeBlockDictionary<Chunk>::exactly) return NULL;
  10.481 +    if (dither == FreeBlockDictionary<Chunk_t>::exactly) return NULL;
  10.482  
  10.483      // try and find the next larger size by walking back up the search path
  10.484      for (curTL = prevTL; curTL != NULL;) {
  10.485 @@ -410,46 +476,9 @@
  10.486    }
  10.487    if (curTL != NULL) {
  10.488      assert(curTL->size() >= size, "size inconsistency");
  10.489 -    if (adaptive_freelists()) {
  10.490  
  10.491 -      // A candidate chunk has been found.  If it is already under
  10.492 -      // populated, get a chunk associated with the hint for this
  10.493 -      // chunk.
  10.494 -      if (curTL->surplus() <= 0) {
  10.495 -        /* Use the hint to find a size with a surplus, and reset the hint. */
  10.496 -        TreeList<Chunk>* hintTL = curTL;
  10.497 -        while (hintTL->hint() != 0) {
  10.498 -          assert(hintTL->hint() == 0 || hintTL->hint() > hintTL->size(),
  10.499 -            "hint points in the wrong direction");
  10.500 -          hintTL = find_list(hintTL->hint());
  10.501 -          assert(curTL != hintTL, "Infinite loop");
  10.502 -          if (hintTL == NULL ||
  10.503 -              hintTL == curTL /* Should not happen but protect against it */ ) {
  10.504 -            // No useful hint.  Set the hint to NULL and go on.
  10.505 -            curTL->set_hint(0);
  10.506 -            break;
  10.507 -          }
  10.508 -          assert(hintTL->size() > size, "hint is inconsistent");
  10.509 -          if (hintTL->surplus() > 0) {
  10.510 -            // The hint led to a list that has a surplus.  Use it.
  10.511 -            // Set the hint for the candidate to an overpopulated
  10.512 -            // size.
  10.513 -            curTL->set_hint(hintTL->size());
  10.514 -            // Change the candidate.
  10.515 -            curTL = hintTL;
  10.516 -            break;
  10.517 -          }
  10.518 -          // The evm code reset the hint of the candidate as
  10.519 -          // at an interim point.  Why?  Seems like this leaves
  10.520 -          // the hint pointing to a list that didn't work.
  10.521 -          // curTL->set_hint(hintTL->size());
  10.522 -        }
  10.523 -      }
  10.524 -    }
  10.525 -    // don't waste time splaying if chunk's singleton
  10.526 -    if (splay && curTL->head()->next() != NULL) {
  10.527 -      semi_splay_step(curTL);
  10.528 -    }
  10.529 +    curTL = curTL->get_better_list(this);
  10.530 +
  10.531      retTC = curTL->first_available();
  10.532      assert((retTC != NULL) && (curTL->count() > 0),
  10.533        "A list in the binary tree should not be NULL");
  10.534 @@ -465,9 +494,9 @@
  10.535    return retTC;
  10.536  }
  10.537  
  10.538 -template <class Chunk>
  10.539 -TreeList<Chunk>* BinaryTreeDictionary<Chunk>::find_list(size_t size) const {
  10.540 -  TreeList<Chunk>* curTL;
  10.541 +template <class Chunk_t, template <class> class FreeList_t>
  10.542 +TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_list(size_t size) const {
  10.543 +  TreeList<Chunk_t, FreeList_t>* curTL;
  10.544    for (curTL = root(); curTL != NULL;) {
  10.545      if (curTL->size() == size) {        // exact match
  10.546        break;
  10.547 @@ -484,10 +513,10 @@
  10.548  }
  10.549  
  10.550  
  10.551 -template <class Chunk>
  10.552 -bool BinaryTreeDictionary<Chunk>::verify_chunk_in_free_list(Chunk* tc) const {
  10.553 +template <class Chunk_t, template <class> class FreeList_t>
  10.554 +bool BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_chunk_in_free_list(Chunk_t* tc) const {
  10.555    size_t size = tc->size();
  10.556 -  TreeList<Chunk>* tl = find_list(size);
  10.557 +  TreeList<Chunk_t, FreeList_t>* tl = find_list(size);
  10.558    if (tl == NULL) {
  10.559      return false;
  10.560    } else {
  10.561 @@ -495,9 +524,9 @@
  10.562    }
  10.563  }
  10.564  
  10.565 -template <class Chunk>
  10.566 -Chunk* BinaryTreeDictionary<Chunk>::find_largest_dict() const {
  10.567 -  TreeList<Chunk> *curTL = root();
  10.568 +template <class Chunk_t, template <class> class FreeList_t>
  10.569 +Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_largest_dict() const {
  10.570 +  TreeList<Chunk_t, FreeList_t> *curTL = root();
  10.571    if (curTL != NULL) {
  10.572      while(curTL->right() != NULL) curTL = curTL->right();
  10.573      return curTL->largest_address();
  10.574 @@ -510,15 +539,15 @@
  10.575  // chunk in a list on a tree node, just unlink it.
  10.576  // If it is the last chunk in the list (the next link is NULL),
  10.577  // remove the node and repair the tree.
  10.578 -template <class Chunk>
  10.579 -TreeChunk<Chunk>*
  10.580 -BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) {
  10.581 +template <class Chunk_t, template <class> class FreeList_t>
  10.582 +TreeChunk<Chunk_t, FreeList_t>*
  10.583 +BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_chunk_from_tree(TreeChunk<Chunk_t, FreeList_t>* tc) {
  10.584    assert(tc != NULL, "Should not call with a NULL chunk");
  10.585    assert(tc->is_free(), "Header is not marked correctly");
  10.586  
  10.587 -  TreeList<Chunk> *newTL, *parentTL;
  10.588 -  TreeChunk<Chunk>* retTC;
  10.589 -  TreeList<Chunk>* tl = tc->list();
  10.590 +  TreeList<Chunk_t, FreeList_t> *newTL, *parentTL;
  10.591 +  TreeChunk<Chunk_t, FreeList_t>* retTC;
  10.592 +  TreeList<Chunk_t, FreeList_t>* tl = tc->list();
  10.593    debug_only(
  10.594      bool removing_only_chunk = false;
  10.595      if (tl == _root) {
  10.596 @@ -538,8 +567,8 @@
  10.597  
  10.598    retTC = tc;
  10.599    // Removing this chunk can have the side effect of changing the node
  10.600 -  // (TreeList<Chunk>*) in the tree.  If the node is the root, update it.
  10.601 -  TreeList<Chunk>* replacementTL = tl->remove_chunk_replace_if_needed(tc);
  10.602 +  // (TreeList<Chunk_t, FreeList_t>*) in the tree.  If the node is the root, update it.
  10.603 +  TreeList<Chunk_t, FreeList_t>* replacementTL = tl->remove_chunk_replace_if_needed(tc);
  10.604    assert(tc->is_free(), "Chunk should still be free");
  10.605    assert(replacementTL->parent() == NULL ||
  10.606           replacementTL == replacementTL->parent()->left() ||
  10.607 @@ -549,17 +578,18 @@
  10.608      assert(replacementTL->parent() == NULL, "Incorrectly replacing root");
  10.609      set_root(replacementTL);
  10.610    }
  10.611 -  debug_only(
  10.612 +#ifdef ASSERT
  10.613      if (tl != replacementTL) {
  10.614        assert(replacementTL->head() != NULL,
  10.615          "If the tree list was replaced, it should not be a NULL list");
  10.616 -      TreeList<Chunk>* rhl = replacementTL->head_as_TreeChunk()->list();
  10.617 -      TreeList<Chunk>* rtl = TreeChunk<Chunk>::as_TreeChunk(replacementTL->tail())->list();
  10.618 +      TreeList<Chunk_t, FreeList_t>* rhl = replacementTL->head_as_TreeChunk()->list();
  10.619 +      TreeList<Chunk_t, FreeList_t>* rtl =
  10.620 +        TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(replacementTL->tail())->list();
  10.621        assert(rhl == replacementTL, "Broken head");
  10.622        assert(rtl == replacementTL, "Broken tail");
  10.623        assert(replacementTL->size() == tc->size(),  "Broken size");
  10.624      }
  10.625 -  )
  10.626 +#endif
  10.627  
  10.628    // Does the tree need to be repaired?
  10.629    if (replacementTL->count() == 0) {
  10.630 @@ -574,7 +604,7 @@
  10.631      } else if (replacementTL->right() == NULL) {
  10.632        // right is NULL
  10.633        newTL = replacementTL->left();
  10.634 -      debug_only(replacementTL->clearLeft();)
  10.635 +      debug_only(replacementTL->clear_left();)
  10.636      } else {  // we have both children, so, by patriarchal convention,
  10.637                // my replacement is least node in right sub-tree
  10.638        complicated_splice = true;
  10.639 @@ -623,7 +653,7 @@
  10.640        newTL->set_right(replacementTL->right());
  10.641        debug_only(
  10.642          replacementTL->clear_right();
  10.643 -        replacementTL->clearLeft();
  10.644 +        replacementTL->clear_left();
  10.645        )
  10.646      }
  10.647      assert(replacementTL->right() == NULL &&
  10.648 @@ -644,21 +674,21 @@
  10.649      verify_tree();
  10.650    }
  10.651    assert(!removing_only_chunk || _root == NULL, "root should be NULL");
  10.652 -  return TreeChunk<Chunk>::as_TreeChunk(retTC);
  10.653 +  return TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(retTC);
  10.654  }
  10.655  
  10.656  // Remove the leftmost node (lm) in the tree and return it.
  10.657  // If lm has a right child, link it to the left node of
  10.658  // the parent of lm.
  10.659 -template <class Chunk>
  10.660 -TreeList<Chunk>* BinaryTreeDictionary<Chunk>::remove_tree_minimum(TreeList<Chunk>* tl) {
  10.661 +template <class Chunk_t, template <class> class FreeList_t>
  10.662 +TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_tree_minimum(TreeList<Chunk_t, FreeList_t>* tl) {
  10.663    assert(tl != NULL && tl->parent() != NULL, "really need a proper sub-tree");
  10.664    // locate the subtree minimum by walking down left branches
  10.665 -  TreeList<Chunk>* curTL = tl;
  10.666 +  TreeList<Chunk_t, FreeList_t>* curTL = tl;
  10.667    for (; curTL->left() != NULL; curTL = curTL->left());
  10.668    // obviously curTL now has at most one child, a right child
  10.669    if (curTL != root()) {  // Should this test just be removed?
  10.670 -    TreeList<Chunk>* parentTL = curTL->parent();
  10.671 +    TreeList<Chunk_t, FreeList_t>* parentTL = curTL->parent();
  10.672      if (parentTL->left() == curTL) { // curTL is a left child
  10.673        parentTL->set_left(curTL->right());
  10.674      } else {
  10.675 @@ -685,31 +715,14 @@
  10.676    return curTL;
  10.677  }
  10.678  
  10.679 -// Based on a simplification of the algorithm by Sleator and Tarjan (JACM 1985).
  10.680 -// The simplifications are the following:
  10.681 -// . we splay only when we delete (not when we insert)
  10.682 -// . we apply a single spay step per deletion/access
  10.683 -// By doing such partial splaying, we reduce the amount of restructuring,
  10.684 -// while getting a reasonably efficient search tree (we think).
  10.685 -// [Measurements will be needed to (in)validate this expectation.]
  10.686 -
  10.687 -template <class Chunk>
  10.688 -void BinaryTreeDictionary<Chunk>::semi_splay_step(TreeList<Chunk>* tc) {
  10.689 -  // apply a semi-splay step at the given node:
  10.690 -  // . if root, norting needs to be done
  10.691 -  // . if child of root, splay once
  10.692 -  // . else zig-zig or sig-zag depending on path from grandparent
  10.693 -  if (root() == tc) return;
  10.694 -  warning("*** Splaying not yet implemented; "
  10.695 -          "tree operations may be inefficient ***");
  10.696 -}
  10.697 -
  10.698 -template <class Chunk>
  10.699 -void BinaryTreeDictionary<Chunk>::insert_chunk_in_tree(Chunk* fc) {
  10.700 -  TreeList<Chunk> *curTL, *prevTL;
  10.701 +template <class Chunk_t, template <class> class FreeList_t>
  10.702 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::insert_chunk_in_tree(Chunk_t* fc) {
  10.703 +  TreeList<Chunk_t, FreeList_t> *curTL, *prevTL;
  10.704    size_t size = fc->size();
  10.705  
  10.706 -  assert(size >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "too small to be a TreeList<Chunk>");
  10.707 +  assert((size >= min_size()),
  10.708 +    err_msg(SIZE_FORMAT " is too small to be a TreeChunk<Chunk_t, FreeList_t> " SIZE_FORMAT,
  10.709 +      size, min_size()));
  10.710    if (FLSVerifyDictionary) {
  10.711      verify_tree();
  10.712    }
  10.713 @@ -729,9 +742,9 @@
  10.714        curTL = curTL->right();
  10.715      }
  10.716    }
  10.717 -  TreeChunk<Chunk>* tc = TreeChunk<Chunk>::as_TreeChunk(fc);
  10.718 +  TreeChunk<Chunk_t, FreeList_t>* tc = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(fc);
  10.719    // This chunk is being returned to the binary tree.  Its embedded
  10.720 -  // TreeList<Chunk> should be unused at this point.
  10.721 +  // TreeList<Chunk_t, FreeList_t> should be unused at this point.
  10.722    tc->initialize();
  10.723    if (curTL != NULL) {          // exact match
  10.724      tc->set_list(curTL);
  10.725 @@ -739,8 +752,8 @@
  10.726    } else {                     // need a new node in tree
  10.727      tc->clear_next();
  10.728      tc->link_prev(NULL);
  10.729 -    TreeList<Chunk>* newTL = TreeList<Chunk>::as_TreeList(tc);
  10.730 -    assert(((TreeChunk<Chunk>*)tc)->list() == newTL,
  10.731 +    TreeList<Chunk_t, FreeList_t>* newTL = TreeList<Chunk_t, FreeList_t>::as_TreeList(tc);
  10.732 +    assert(((TreeChunk<Chunk_t, FreeList_t>*)tc)->list() == newTL,
  10.733        "List was not initialized correctly");
  10.734      if (prevTL == NULL) {      // we are the only tree node
  10.735        assert(root() == NULL, "control point invariant");
  10.736 @@ -768,30 +781,30 @@
  10.737    }
  10.738  }
  10.739  
  10.740 -template <class Chunk>
  10.741 -size_t BinaryTreeDictionary<Chunk>::max_chunk_size() const {
  10.742 -  FreeBlockDictionary<Chunk>::verify_par_locked();
  10.743 -  TreeList<Chunk>* tc = root();
  10.744 +template <class Chunk_t, template <class> class FreeList_t>
  10.745 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::max_chunk_size() const {
  10.746 +  FreeBlockDictionary<Chunk_t>::verify_par_locked();
  10.747 +  TreeList<Chunk_t, FreeList_t>* tc = root();
  10.748    if (tc == NULL) return 0;
  10.749    for (; tc->right() != NULL; tc = tc->right());
  10.750    return tc->size();
  10.751  }
  10.752  
  10.753 -template <class Chunk>
  10.754 -size_t BinaryTreeDictionary<Chunk>::total_list_length(TreeList<Chunk>* tl) const {
  10.755 +template <class Chunk_t, template <class> class FreeList_t>
  10.756 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_list_length(TreeList<Chunk_t, FreeList_t>* tl) const {
  10.757    size_t res;
  10.758    res = tl->count();
  10.759  #ifdef ASSERT
  10.760    size_t cnt;
  10.761 -  Chunk* tc = tl->head();
  10.762 +  Chunk_t* tc = tl->head();
  10.763    for (cnt = 0; tc != NULL; tc = tc->next(), cnt++);
  10.764    assert(res == cnt, "The count is not being maintained correctly");
  10.765  #endif
  10.766    return res;
  10.767  }
  10.768  
  10.769 -template <class Chunk>
  10.770 -size_t BinaryTreeDictionary<Chunk>::total_size_in_tree(TreeList<Chunk>* tl) const {
  10.771 +template <class Chunk_t, template <class> class FreeList_t>
  10.772 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_size_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
  10.773    if (tl == NULL)
  10.774      return 0;
  10.775    return (tl->size() * total_list_length(tl)) +
  10.776 @@ -799,8 +812,8 @@
  10.777           total_size_in_tree(tl->right());
  10.778  }
  10.779  
  10.780 -template <class Chunk>
  10.781 -double BinaryTreeDictionary<Chunk>::sum_of_squared_block_sizes(TreeList<Chunk>* const tl) const {
  10.782 +template <class Chunk_t, template <class> class FreeList_t>
  10.783 +double BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_of_squared_block_sizes(TreeList<Chunk_t, FreeList_t>* const tl) const {
  10.784    if (tl == NULL) {
  10.785      return 0.0;
  10.786    }
  10.787 @@ -811,8 +824,8 @@
  10.788    return curr;
  10.789  }
  10.790  
  10.791 -template <class Chunk>
  10.792 -size_t BinaryTreeDictionary<Chunk>::total_free_blocks_in_tree(TreeList<Chunk>* tl) const {
  10.793 +template <class Chunk_t, template <class> class FreeList_t>
  10.794 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_free_blocks_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
  10.795    if (tl == NULL)
  10.796      return 0;
  10.797    return total_list_length(tl) +
  10.798 @@ -820,28 +833,28 @@
  10.799           total_free_blocks_in_tree(tl->right());
  10.800  }
  10.801  
  10.802 -template <class Chunk>
  10.803 -size_t BinaryTreeDictionary<Chunk>::num_free_blocks() const {
  10.804 +template <class Chunk_t, template <class> class FreeList_t>
  10.805 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::num_free_blocks() const {
  10.806    assert(total_free_blocks_in_tree(root()) == total_free_blocks(),
  10.807           "_total_free_blocks inconsistency");
  10.808    return total_free_blocks();
  10.809  }
  10.810  
  10.811 -template <class Chunk>
  10.812 -size_t BinaryTreeDictionary<Chunk>::tree_height_helper(TreeList<Chunk>* tl) const {
  10.813 +template <class Chunk_t, template <class> class FreeList_t>
  10.814 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
  10.815    if (tl == NULL)
  10.816      return 0;
  10.817    return 1 + MAX2(tree_height_helper(tl->left()),
  10.818                    tree_height_helper(tl->right()));
  10.819  }
  10.820  
  10.821 -template <class Chunk>
  10.822 -size_t BinaryTreeDictionary<Chunk>::treeHeight() const {
  10.823 +template <class Chunk_t, template <class> class FreeList_t>
  10.824 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height() const {
  10.825    return tree_height_helper(root());
  10.826  }
  10.827  
  10.828 -template <class Chunk>
  10.829 -size_t BinaryTreeDictionary<Chunk>::total_nodes_helper(TreeList<Chunk>* tl) const {
  10.830 +template <class Chunk_t, template <class> class FreeList_t>
  10.831 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
  10.832    if (tl == NULL) {
  10.833      return 0;
  10.834    }
  10.835 @@ -849,14 +862,18 @@
  10.836      total_nodes_helper(tl->right());
  10.837  }
  10.838  
  10.839 -template <class Chunk>
  10.840 -size_t BinaryTreeDictionary<Chunk>::total_nodes_in_tree(TreeList<Chunk>* tl) const {
  10.841 +template <class Chunk_t, template <class> class FreeList_t>
  10.842 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
  10.843    return total_nodes_helper(root());
  10.844  }
  10.845  
  10.846 -template <class Chunk>
  10.847 -void BinaryTreeDictionary<Chunk>::dict_census_udpate(size_t size, bool split, bool birth){
  10.848 -  TreeList<Chunk>* nd = find_list(size);
  10.849 +template <class Chunk_t, template <class> class FreeList_t>
  10.850 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::dict_census_update(size_t size, bool split, bool birth){}
  10.851 +
  10.852 +#ifndef SERIALGC
  10.853 +template <>
  10.854 +void BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>::dict_census_update(size_t size, bool split, bool birth){
  10.855 +  TreeList<FreeChunk, AdaptiveFreeList>* nd = find_list(size);
  10.856    if (nd) {
  10.857      if (split) {
  10.858        if (birth) {
  10.859 @@ -882,16 +899,26 @@
  10.860    //   This is a birth associated with a LinAB.  The chunk
  10.861    //     for the LinAB is not in the dictionary.
  10.862  }
  10.863 +#endif // SERIALGC
  10.864  
  10.865 -template <class Chunk>
  10.866 -bool BinaryTreeDictionary<Chunk>::coal_dict_over_populated(size_t size) {
  10.867 +template <class Chunk_t, template <class> class FreeList_t>
  10.868 +bool BinaryTreeDictionary<Chunk_t, FreeList_t>::coal_dict_over_populated(size_t size) {
  10.869 +  // For the general type of freelists, encourage coalescing by
  10.870 +  // returning true.
  10.871 +  return true;
  10.872 +}
  10.873 +
  10.874 +#ifndef SERIALGC
  10.875 +template <>
  10.876 +bool BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>::coal_dict_over_populated(size_t size) {
  10.877    if (FLSAlwaysCoalesceLarge) return true;
  10.878  
  10.879 -  TreeList<Chunk>* list_of_size = find_list(size);
  10.880 +  TreeList<FreeChunk, AdaptiveFreeList>* list_of_size = find_list(size);
  10.881    // None of requested size implies overpopulated.
  10.882    return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
  10.883           list_of_size->count() > list_of_size->coal_desired();
  10.884  }
  10.885 +#endif  // SERIALGC
  10.886  
  10.887  // Closures for walking the binary tree.
  10.888  //   do_list() walks the free list in a node applying the closure
  10.889 @@ -899,19 +926,18 @@
  10.890  //   do_tree() walks the nodes in the binary tree applying do_list()
  10.891  //     to each list at each node.
  10.892  
  10.893 -template <class Chunk>
  10.894 +template <class Chunk_t, template <class> class FreeList_t>
  10.895  class TreeCensusClosure : public StackObj {
  10.896   protected:
  10.897 -  virtual void do_list(FreeList<Chunk>* fl) = 0;
  10.898 +  virtual void do_list(FreeList_t<Chunk_t>* fl) = 0;
  10.899   public:
  10.900 -  virtual void do_tree(TreeList<Chunk>* tl) = 0;
  10.901 +  virtual void do_tree(TreeList<Chunk_t, FreeList_t>* tl) = 0;
  10.902  };
  10.903  
  10.904 -template <class Chunk>
  10.905 -class AscendTreeCensusClosure : public TreeCensusClosure<Chunk> {
  10.906 -  using TreeCensusClosure<Chunk>::do_list;
  10.907 +template <class Chunk_t, template <class> class FreeList_t>
  10.908 +class AscendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
  10.909   public:
  10.910 -  void do_tree(TreeList<Chunk>* tl) {
  10.911 +  void do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
  10.912      if (tl != NULL) {
  10.913        do_tree(tl->left());
  10.914        do_list(tl);
  10.915 @@ -920,11 +946,10 @@
  10.916    }
  10.917  };
  10.918  
  10.919 -template <class Chunk>
  10.920 -class DescendTreeCensusClosure : public TreeCensusClosure<Chunk> {
  10.921 -  using TreeCensusClosure<Chunk>::do_list;
  10.922 +template <class Chunk_t, template <class> class FreeList_t>
  10.923 +class DescendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
  10.924   public:
  10.925 -  void do_tree(TreeList<Chunk>* tl) {
  10.926 +  void do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
  10.927      if (tl != NULL) {
  10.928        do_tree(tl->right());
  10.929        do_list(tl);
  10.930 @@ -935,8 +960,8 @@
  10.931  
  10.932  // For each list in the tree, calculate the desired, desired
  10.933  // coalesce, count before sweep, and surplus before sweep.
  10.934 -template <class Chunk>
  10.935 -class BeginSweepClosure : public AscendTreeCensusClosure<Chunk> {
  10.936 +template <class Chunk_t, template <class> class FreeList_t>
  10.937 +class BeginSweepClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
  10.938    double _percentage;
  10.939    float _inter_sweep_current;
  10.940    float _inter_sweep_estimate;
  10.941 @@ -951,32 +976,36 @@
  10.942     _inter_sweep_estimate(inter_sweep_estimate),
  10.943     _intra_sweep_estimate(intra_sweep_estimate) { }
  10.944  
  10.945 -  void do_list(FreeList<Chunk>* fl) {
  10.946 +  void do_list(FreeList<Chunk_t>* fl) {}
  10.947 +
  10.948 +#ifndef SERIALGC
  10.949 +  void do_list(AdaptiveFreeList<Chunk_t>* fl) {
  10.950      double coalSurplusPercent = _percentage;
  10.951      fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
  10.952      fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent));
  10.953      fl->set_before_sweep(fl->count());
  10.954      fl->set_bfr_surp(fl->surplus());
  10.955    }
  10.956 +#endif // SERIALGC
  10.957  };
  10.958  
  10.959  // Used to search the tree until a condition is met.
  10.960  // Similar to TreeCensusClosure but searches the
  10.961  // tree and returns promptly when found.
  10.962  
  10.963 -template <class Chunk>
  10.964 +template <class Chunk_t, template <class> class FreeList_t>
  10.965  class TreeSearchClosure : public StackObj {
  10.966   protected:
  10.967 -  virtual bool do_list(FreeList<Chunk>* fl) = 0;
  10.968 +  virtual bool do_list(FreeList_t<Chunk_t>* fl) = 0;
  10.969   public:
  10.970 -  virtual bool do_tree(TreeList<Chunk>* tl) = 0;
  10.971 +  virtual bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) = 0;
  10.972  };
  10.973  
  10.974  #if 0 //  Don't need this yet but here for symmetry.
  10.975 -template <class Chunk>
  10.976 -class AscendTreeSearchClosure : public TreeSearchClosure {
  10.977 +template <class Chunk_t, template <class> class FreeList_t>
  10.978 +class AscendTreeSearchClosure : public TreeSearchClosure<Chunk_t> {
  10.979   public:
  10.980 -  bool do_tree(TreeList<Chunk>* tl) {
  10.981 +  bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
  10.982      if (tl != NULL) {
  10.983        if (do_tree(tl->left())) return true;
  10.984        if (do_list(tl)) return true;
  10.985 @@ -987,11 +1016,10 @@
  10.986  };
  10.987  #endif
  10.988  
  10.989 -template <class Chunk>
  10.990 -class DescendTreeSearchClosure : public TreeSearchClosure<Chunk> {
  10.991 -  using TreeSearchClosure<Chunk>::do_list;
  10.992 +template <class Chunk_t, template <class> class FreeList_t>
  10.993 +class DescendTreeSearchClosure : public TreeSearchClosure<Chunk_t, FreeList_t> {
  10.994   public:
  10.995 -  bool do_tree(TreeList<Chunk>* tl) {
  10.996 +  bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
  10.997      if (tl != NULL) {
  10.998        if (do_tree(tl->right())) return true;
  10.999        if (do_list(tl)) return true;
 10.1000 @@ -1003,17 +1031,17 @@
 10.1001  
 10.1002  // Searches the tree for a chunk that ends at the
 10.1003  // specified address.
 10.1004 -template <class Chunk>
 10.1005 -class EndTreeSearchClosure : public DescendTreeSearchClosure<Chunk> {
 10.1006 +template <class Chunk_t, template <class> class FreeList_t>
 10.1007 +class EndTreeSearchClosure : public DescendTreeSearchClosure<Chunk_t, FreeList_t> {
 10.1008    HeapWord* _target;
 10.1009 -  Chunk* _found;
 10.1010 +  Chunk_t* _found;
 10.1011  
 10.1012   public:
 10.1013    EndTreeSearchClosure(HeapWord* target) : _target(target), _found(NULL) {}
 10.1014 -  bool do_list(FreeList<Chunk>* fl) {
 10.1015 -    Chunk* item = fl->head();
 10.1016 +  bool do_list(FreeList_t<Chunk_t>* fl) {
 10.1017 +    Chunk_t* item = fl->head();
 10.1018      while (item != NULL) {
 10.1019 -      if (item->end() == _target) {
 10.1020 +      if (item->end() == (uintptr_t*) _target) {
 10.1021          _found = item;
 10.1022          return true;
 10.1023        }
 10.1024 @@ -1021,22 +1049,22 @@
 10.1025      }
 10.1026      return false;
 10.1027    }
 10.1028 -  Chunk* found() { return _found; }
 10.1029 +  Chunk_t* found() { return _found; }
 10.1030  };
 10.1031  
 10.1032 -template <class Chunk>
 10.1033 -Chunk* BinaryTreeDictionary<Chunk>::find_chunk_ends_at(HeapWord* target) const {
 10.1034 -  EndTreeSearchClosure<Chunk> etsc(target);
 10.1035 +template <class Chunk_t, template <class> class FreeList_t>
 10.1036 +Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_chunk_ends_at(HeapWord* target) const {
 10.1037 +  EndTreeSearchClosure<Chunk_t, FreeList_t> etsc(target);
 10.1038    bool found_target = etsc.do_tree(root());
 10.1039    assert(found_target || etsc.found() == NULL, "Consistency check");
 10.1040    assert(!found_target || etsc.found() != NULL, "Consistency check");
 10.1041    return etsc.found();
 10.1042  }
 10.1043  
 10.1044 -template <class Chunk>
 10.1045 -void BinaryTreeDictionary<Chunk>::begin_sweep_dict_census(double coalSurplusPercent,
 10.1046 +template <class Chunk_t, template <class> class FreeList_t>
 10.1047 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::begin_sweep_dict_census(double coalSurplusPercent,
 10.1048    float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
 10.1049 -  BeginSweepClosure<Chunk> bsc(coalSurplusPercent, inter_sweep_current,
 10.1050 +  BeginSweepClosure<Chunk_t, FreeList_t> bsc(coalSurplusPercent, inter_sweep_current,
 10.1051                                              inter_sweep_estimate,
 10.1052                                              intra_sweep_estimate);
 10.1053    bsc.do_tree(root());
 10.1054 @@ -1045,84 +1073,91 @@
 10.1055  // Closures and methods for calculating total bytes returned to the
 10.1056  // free lists in the tree.
 10.1057  #ifndef PRODUCT
 10.1058 -template <class Chunk>
 10.1059 -class InitializeDictReturnedBytesClosure : public AscendTreeCensusClosure<Chunk> {
 10.1060 +template <class Chunk_t, template <class> class FreeList_t>
 10.1061 +class InitializeDictReturnedBytesClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
 10.1062     public:
 10.1063 -  void do_list(FreeList<Chunk>* fl) {
 10.1064 +  void do_list(FreeList_t<Chunk_t>* fl) {
 10.1065      fl->set_returned_bytes(0);
 10.1066    }
 10.1067  };
 10.1068  
 10.1069 -template <class Chunk>
 10.1070 -void BinaryTreeDictionary<Chunk>::initialize_dict_returned_bytes() {
 10.1071 -  InitializeDictReturnedBytesClosure<Chunk> idrb;
 10.1072 +template <class Chunk_t, template <class> class FreeList_t>
 10.1073 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::initialize_dict_returned_bytes() {
 10.1074 +  InitializeDictReturnedBytesClosure<Chunk_t, FreeList_t> idrb;
 10.1075    idrb.do_tree(root());
 10.1076  }
 10.1077  
 10.1078 -template <class Chunk>
 10.1079 -class ReturnedBytesClosure : public AscendTreeCensusClosure<Chunk> {
 10.1080 +template <class Chunk_t, template <class> class FreeList_t>
 10.1081 +class ReturnedBytesClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
 10.1082    size_t _dict_returned_bytes;
 10.1083   public:
 10.1084    ReturnedBytesClosure() { _dict_returned_bytes = 0; }
 10.1085 -  void do_list(FreeList<Chunk>* fl) {
 10.1086 +  void do_list(FreeList_t<Chunk_t>* fl) {
 10.1087      _dict_returned_bytes += fl->returned_bytes();
 10.1088    }
 10.1089    size_t dict_returned_bytes() { return _dict_returned_bytes; }
 10.1090  };
 10.1091  
 10.1092 -template <class Chunk>
 10.1093 -size_t BinaryTreeDictionary<Chunk>::sum_dict_returned_bytes() {
 10.1094 -  ReturnedBytesClosure<Chunk> rbc;
 10.1095 +template <class Chunk_t, template <class> class FreeList_t>
 10.1096 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_dict_returned_bytes() {
 10.1097 +  ReturnedBytesClosure<Chunk_t, FreeList_t> rbc;
 10.1098    rbc.do_tree(root());
 10.1099  
 10.1100    return rbc.dict_returned_bytes();
 10.1101  }
 10.1102  
 10.1103  // Count the number of entries in the tree.
 10.1104 -template <class Chunk>
 10.1105 -class treeCountClosure : public DescendTreeCensusClosure<Chunk> {
 10.1106 +template <class Chunk_t, template <class> class FreeList_t>
 10.1107 +class treeCountClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t> {
 10.1108   public:
 10.1109    uint count;
 10.1110    treeCountClosure(uint c) { count = c; }
 10.1111 -  void do_list(FreeList<Chunk>* fl) {
 10.1112 +  void do_list(FreeList_t<Chunk_t>* fl) {
 10.1113      count++;
 10.1114    }
 10.1115  };
 10.1116  
 10.1117 -template <class Chunk>
 10.1118 -size_t BinaryTreeDictionary<Chunk>::total_count() {
 10.1119 -  treeCountClosure<Chunk> ctc(0);
 10.1120 +template <class Chunk_t, template <class> class FreeList_t>
 10.1121 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_count() {
 10.1122 +  treeCountClosure<Chunk_t, FreeList_t> ctc(0);
 10.1123    ctc.do_tree(root());
 10.1124    return ctc.count;
 10.1125  }
 10.1126  #endif // PRODUCT
 10.1127  
 10.1128  // Calculate surpluses for the lists in the tree.
 10.1129 -template <class Chunk>
 10.1130 -class setTreeSurplusClosure : public AscendTreeCensusClosure<Chunk> {
 10.1131 +template <class Chunk_t, template <class> class FreeList_t>
 10.1132 +class setTreeSurplusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
 10.1133    double percentage;
 10.1134   public:
 10.1135    setTreeSurplusClosure(double v) { percentage = v; }
 10.1136 -  void do_list(FreeList<Chunk>* fl) {
 10.1137 +  void do_list(FreeList<Chunk_t>* fl) {}
 10.1138 +
 10.1139 +#ifndef SERIALGC
 10.1140 +  void do_list(AdaptiveFreeList<Chunk_t>* fl) {
 10.1141      double splitSurplusPercent = percentage;
 10.1142      fl->set_surplus(fl->count() -
 10.1143                     (ssize_t)((double)fl->desired() * splitSurplusPercent));
 10.1144    }
 10.1145 +#endif // SERIALGC
 10.1146  };
 10.1147  
 10.1148 -template <class Chunk>
 10.1149 -void BinaryTreeDictionary<Chunk>::set_tree_surplus(double splitSurplusPercent) {
 10.1150 -  setTreeSurplusClosure<Chunk> sts(splitSurplusPercent);
 10.1151 +template <class Chunk_t, template <class> class FreeList_t>
 10.1152 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::set_tree_surplus(double splitSurplusPercent) {
 10.1153 +  setTreeSurplusClosure<Chunk_t, FreeList_t> sts(splitSurplusPercent);
 10.1154    sts.do_tree(root());
 10.1155  }
 10.1156  
 10.1157  // Set hints for the lists in the tree.
 10.1158 -template <class Chunk>
 10.1159 -class setTreeHintsClosure : public DescendTreeCensusClosure<Chunk> {
 10.1160 +template <class Chunk_t, template <class> class FreeList_t>
 10.1161 +class setTreeHintsClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t> {
 10.1162    size_t hint;
 10.1163   public:
 10.1164    setTreeHintsClosure(size_t v) { hint = v; }
 10.1165 -  void do_list(FreeList<Chunk>* fl) {
 10.1166 +  void do_list(FreeList<Chunk_t>* fl) {}
 10.1167 +
 10.1168 +#ifndef SERIALGC
 10.1169 +  void do_list(AdaptiveFreeList<Chunk_t>* fl) {
 10.1170      fl->set_hint(hint);
 10.1171      assert(fl->hint() == 0 || fl->hint() > fl->size(),
 10.1172        "Current hint is inconsistent");
 10.1173 @@ -1130,35 +1165,40 @@
 10.1174        hint = fl->size();
 10.1175      }
 10.1176    }
 10.1177 +#endif // SERIALGC
 10.1178  };
 10.1179  
 10.1180 -template <class Chunk>
 10.1181 -void BinaryTreeDictionary<Chunk>::set_tree_hints(void) {
 10.1182 -  setTreeHintsClosure<Chunk> sth(0);
 10.1183 +template <class Chunk_t, template <class> class FreeList_t>
 10.1184 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::set_tree_hints(void) {
 10.1185 +  setTreeHintsClosure<Chunk_t, FreeList_t> sth(0);
 10.1186    sth.do_tree(root());
 10.1187  }
 10.1188  
 10.1189  // Save count before previous sweep and splits and coalesces.
 10.1190 -template <class Chunk>
 10.1191 -class clearTreeCensusClosure : public AscendTreeCensusClosure<Chunk> {
 10.1192 -  void do_list(FreeList<Chunk>* fl) {
 10.1193 +template <class Chunk_t, template <class> class FreeList_t>
 10.1194 +class clearTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
 10.1195 +  void do_list(FreeList<Chunk_t>* fl) {}
 10.1196 +
 10.1197 +#ifndef SERIALGC
 10.1198 +  void do_list(AdaptiveFreeList<Chunk_t>* fl) {
 10.1199      fl->set_prev_sweep(fl->count());
 10.1200      fl->set_coal_births(0);
 10.1201      fl->set_coal_deaths(0);
 10.1202      fl->set_split_births(0);
 10.1203      fl->set_split_deaths(0);
 10.1204    }
 10.1205 +#endif  // SERIALGC
 10.1206  };
 10.1207  
 10.1208 -template <class Chunk>
 10.1209 -void BinaryTreeDictionary<Chunk>::clear_tree_census(void) {
 10.1210 -  clearTreeCensusClosure<Chunk> ctc;
 10.1211 +template <class Chunk_t, template <class> class FreeList_t>
 10.1212 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::clear_tree_census(void) {
 10.1213 +  clearTreeCensusClosure<Chunk_t, FreeList_t> ctc;
 10.1214    ctc.do_tree(root());
 10.1215  }
 10.1216  
 10.1217  // Do reporting and post sweep clean up.
 10.1218 -template <class Chunk>
 10.1219 -void BinaryTreeDictionary<Chunk>::end_sweep_dict_census(double splitSurplusPercent) {
 10.1220 +template <class Chunk_t, template <class> class FreeList_t>
 10.1221 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::end_sweep_dict_census(double splitSurplusPercent) {
 10.1222    // Does walking the tree 3 times hurt?
 10.1223    set_tree_surplus(splitSurplusPercent);
 10.1224    set_tree_hints();
 10.1225 @@ -1169,9 +1209,9 @@
 10.1226  }
 10.1227  
 10.1228  // Print summary statistics
 10.1229 -template <class Chunk>
 10.1230 -void BinaryTreeDictionary<Chunk>::report_statistics() const {
 10.1231 -  FreeBlockDictionary<Chunk>::verify_par_locked();
 10.1232 +template <class Chunk_t, template <class> class FreeList_t>
 10.1233 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::report_statistics() const {
 10.1234 +  FreeBlockDictionary<Chunk_t>::verify_par_locked();
 10.1235    gclog_or_tty->print("Statistics for BinaryTreeDictionary:\n"
 10.1236           "------------------------------------\n");
 10.1237    size_t total_size = total_chunk_size(debug_only(NULL));
 10.1238 @@ -1182,36 +1222,47 @@
 10.1239    if (free_blocks > 0) {
 10.1240      gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
 10.1241    }
 10.1242 -  gclog_or_tty->print("Tree      Height: %d\n", treeHeight());
 10.1243 +  gclog_or_tty->print("Tree      Height: %d\n", tree_height());
 10.1244  }
 10.1245  
 10.1246  // Print census information - counts, births, deaths, etc.
 10.1247  // for each list in the tree.  Also print some summary
 10.1248  // information.
 10.1249 -template <class Chunk>
 10.1250 -class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk> {
 10.1251 +template <class Chunk_t, template <class> class FreeList_t>
 10.1252 +class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
 10.1253    int _print_line;
 10.1254    size_t _total_free;
 10.1255 -  FreeList<Chunk> _total;
 10.1256 +  FreeList_t<Chunk_t> _total;
 10.1257  
 10.1258   public:
 10.1259    PrintTreeCensusClosure() {
 10.1260      _print_line = 0;
 10.1261      _total_free = 0;
 10.1262    }
 10.1263 -  FreeList<Chunk>* total() { return &_total; }
 10.1264 +  FreeList_t<Chunk_t>* total() { return &_total; }
 10.1265    size_t total_free() { return _total_free; }
 10.1266 -  void do_list(FreeList<Chunk>* fl) {
 10.1267 +  void do_list(FreeList<Chunk_t>* fl) {
 10.1268      if (++_print_line >= 40) {
 10.1269 -      FreeList<Chunk>::print_labels_on(gclog_or_tty, "size");
 10.1270 +      FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
 10.1271        _print_line = 0;
 10.1272      }
 10.1273      fl->print_on(gclog_or_tty);
 10.1274      _total_free +=            fl->count()            * fl->size()        ;
 10.1275      total()->set_count(      total()->count()       + fl->count()      );
 10.1276 -    total()->set_bfr_surp(    total()->bfr_surp()     + fl->bfr_surp()    );
 10.1277 +  }
 10.1278 +
 10.1279 +#ifndef SERIALGC
 10.1280 +  void do_list(AdaptiveFreeList<Chunk_t>* fl) {
 10.1281 +    if (++_print_line >= 40) {
 10.1282 +      FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
 10.1283 +      _print_line = 0;
 10.1284 +    }
 10.1285 +    fl->print_on(gclog_or_tty);
 10.1286 +    _total_free +=           fl->count()             * fl->size()        ;
 10.1287 +    total()->set_count(      total()->count()        + fl->count()      );
 10.1288 +    total()->set_bfr_surp(   total()->bfr_surp()     + fl->bfr_surp()    );
 10.1289      total()->set_surplus(    total()->split_deaths() + fl->surplus()    );
 10.1290 -    total()->set_desired(    total()->desired()     + fl->desired()    );
 10.1291 +    total()->set_desired(    total()->desired()      + fl->desired()    );
 10.1292      total()->set_prev_sweep(  total()->prev_sweep()   + fl->prev_sweep()  );
 10.1293      total()->set_before_sweep(total()->before_sweep() + fl->before_sweep());
 10.1294      total()->set_coal_births( total()->coal_births()  + fl->coal_births() );
 10.1295 @@ -1219,18 +1270,32 @@
 10.1296      total()->set_split_births(total()->split_births() + fl->split_births());
 10.1297      total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
 10.1298    }
 10.1299 +#endif  // SERIALGC
 10.1300  };
 10.1301  
 10.1302 -template <class Chunk>
 10.1303 -void BinaryTreeDictionary<Chunk>::print_dict_census(void) const {
 10.1304 +template <class Chunk_t, template <class> class FreeList_t>
 10.1305 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_dict_census(void) const {
 10.1306  
 10.1307    gclog_or_tty->print("\nBinaryTree\n");
 10.1308 -  FreeList<Chunk>::print_labels_on(gclog_or_tty, "size");
 10.1309 -  PrintTreeCensusClosure<Chunk> ptc;
 10.1310 +  FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
 10.1311 +  PrintTreeCensusClosure<Chunk_t, FreeList_t> ptc;
 10.1312    ptc.do_tree(root());
 10.1313  
 10.1314 -  FreeList<Chunk>* total = ptc.total();
 10.1315 -  FreeList<Chunk>::print_labels_on(gclog_or_tty, " ");
 10.1316 +  FreeList_t<Chunk_t>* total = ptc.total();
 10.1317 +  FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, " ");
 10.1318 +}
 10.1319 +
 10.1320 +#ifndef SERIALGC
 10.1321 +template <>
 10.1322 +void BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>::print_dict_census(void) const {
 10.1323 +
 10.1324 +  gclog_or_tty->print("\nBinaryTree\n");
 10.1325 +  AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
 10.1326 +  PrintTreeCensusClosure<FreeChunk, AdaptiveFreeList> ptc;
 10.1327 +  ptc.do_tree(root());
 10.1328 +
 10.1329 +  AdaptiveFreeList<FreeChunk>* total = ptc.total();
 10.1330 +  AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, " ");
 10.1331    total->print_on(gclog_or_tty, "TOTAL\t");
 10.1332    gclog_or_tty->print(
 10.1333                "total_free(words): " SIZE_FORMAT_W(16)
 10.1334 @@ -1242,9 +1307,10 @@
 10.1335               (double)(total->desired() - total->count())
 10.1336               /(total->desired() != 0 ? (double)total->desired() : 1.0));
 10.1337  }
 10.1338 +#endif  // SERIALGC
 10.1339  
 10.1340 -template <class Chunk>
 10.1341 -class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk> {
 10.1342 +template <class Chunk_t, template <class> class FreeList_t>
 10.1343 +class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
 10.1344    outputStream* _st;
 10.1345    int _print_line;
 10.1346  
 10.1347 @@ -1253,14 +1319,14 @@
 10.1348      _st = st;
 10.1349      _print_line = 0;
 10.1350    }
 10.1351 -  void do_list(FreeList<Chunk>* fl) {
 10.1352 +  void do_list(FreeList_t<Chunk_t>* fl) {
 10.1353      if (++_print_line >= 40) {
 10.1354 -      FreeList<Chunk>::print_labels_on(_st, "size");
 10.1355 +      FreeList_t<Chunk_t>::print_labels_on(_st, "size");
 10.1356        _print_line = 0;
 10.1357      }
 10.1358      fl->print_on(gclog_or_tty);
 10.1359      size_t sz = fl->size();
 10.1360 -    for (Chunk* fc = fl->head(); fc != NULL;
 10.1361 +    for (Chunk_t* fc = fl->head(); fc != NULL;
 10.1362           fc = fc->next()) {
 10.1363        _st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 10.1364                      fc, (HeapWord*)fc + sz,
 10.1365 @@ -1269,11 +1335,11 @@
 10.1366    }
 10.1367  };
 10.1368  
 10.1369 -template <class Chunk>
 10.1370 -void BinaryTreeDictionary<Chunk>::print_free_lists(outputStream* st) const {
 10.1371 +template <class Chunk_t, template <class> class FreeList_t>
 10.1372 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_free_lists(outputStream* st) const {
 10.1373  
 10.1374 -  FreeList<Chunk>::print_labels_on(st, "size");
 10.1375 -  PrintFreeListsClosure<Chunk> pflc(st);
 10.1376 +  FreeList_t<Chunk_t>::print_labels_on(st, "size");
 10.1377 +  PrintFreeListsClosure<Chunk_t, FreeList_t> pflc(st);
 10.1378    pflc.do_tree(root());
 10.1379  }
 10.1380  
 10.1381 @@ -1281,18 +1347,18 @@
 10.1382  // . _root has no parent
 10.1383  // . parent and child point to each other
 10.1384  // . each node's key correctly related to that of its child(ren)
 10.1385 -template <class Chunk>
 10.1386 -void BinaryTreeDictionary<Chunk>::verify_tree() const {
 10.1387 +template <class Chunk_t, template <class> class FreeList_t>
 10.1388 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree() const {
 10.1389    guarantee(root() == NULL || total_free_blocks() == 0 ||
 10.1390      total_size() != 0, "_total_size should't be 0?");
 10.1391    guarantee(root() == NULL || root()->parent() == NULL, "_root shouldn't have parent");
 10.1392    verify_tree_helper(root());
 10.1393  }
 10.1394  
 10.1395 -template <class Chunk>
 10.1396 -size_t BinaryTreeDictionary<Chunk>::verify_prev_free_ptrs(TreeList<Chunk>* tl) {
 10.1397 +template <class Chunk_t, template <class> class FreeList_t>
 10.1398 +size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_prev_free_ptrs(TreeList<Chunk_t, FreeList_t>* tl) {
 10.1399    size_t ct = 0;
 10.1400 -  for (Chunk* curFC = tl->head(); curFC != NULL; curFC = curFC->next()) {
 10.1401 +  for (Chunk_t* curFC = tl->head(); curFC != NULL; curFC = curFC->next()) {
 10.1402      ct++;
 10.1403      assert(curFC->prev() == NULL || curFC->prev()->is_free(),
 10.1404        "Chunk should be free");
 10.1405 @@ -1303,8 +1369,8 @@
 10.1406  // Note: this helper is recursive rather than iterative, so use with
 10.1407  // caution on very deep trees; and watch out for stack overflow errors;
 10.1408  // In general, to be used only for debugging.
 10.1409 -template <class Chunk>
 10.1410 -void BinaryTreeDictionary<Chunk>::verify_tree_helper(TreeList<Chunk>* tl) const {
 10.1411 +template <class Chunk_t, template <class> class FreeList_t>
 10.1412 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
 10.1413    if (tl == NULL)
 10.1414      return;
 10.1415    guarantee(tl->size() != 0, "A list must has a size");
 10.1416 @@ -1332,15 +1398,25 @@
 10.1417    verify_tree_helper(tl->right());
 10.1418  }
 10.1419  
 10.1420 -template <class Chunk>
 10.1421 -void BinaryTreeDictionary<Chunk>::verify() const {
 10.1422 +template <class Chunk_t, template <class> class FreeList_t>
 10.1423 +void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify() const {
 10.1424    verify_tree();
 10.1425    guarantee(total_size() == total_size_in_tree(root()), "Total Size inconsistency");
 10.1426  }
 10.1427  
 10.1428 +template class TreeList<Metablock, FreeList>;
 10.1429 +template class BinaryTreeDictionary<Metablock, FreeList>;
 10.1430 +template class TreeChunk<Metablock, FreeList>;
 10.1431 +
 10.1432 +template class TreeList<Metachunk, FreeList>;
 10.1433 +template class BinaryTreeDictionary<Metachunk, FreeList>;
 10.1434 +template class TreeChunk<Metachunk, FreeList>;
 10.1435 +
 10.1436 +
 10.1437  #ifndef SERIALGC
 10.1438  // Explicitly instantiate these types for FreeChunk.
 10.1439 -template class BinaryTreeDictionary<FreeChunk>;
 10.1440 -template class TreeChunk<FreeChunk>;
 10.1441 -template class TreeList<FreeChunk>;
 10.1442 +template class TreeList<FreeChunk, AdaptiveFreeList>;
 10.1443 +template class BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>;
 10.1444 +template class TreeChunk<FreeChunk, AdaptiveFreeList>;
 10.1445 +
 10.1446  #endif // SERIALGC
    11.1 --- a/src/share/vm/memory/binaryTreeDictionary.hpp	Fri Oct 19 11:26:17 2012 -0700
    11.2 +++ b/src/share/vm/memory/binaryTreeDictionary.hpp	Tue Sep 18 23:35:42 2012 -0700
    11.3 @@ -37,77 +37,78 @@
    11.4  // A TreeList is a FreeList which can be used to maintain a
    11.5  // binary tree of free lists.
    11.6  
    11.7 -template <class Chunk> class TreeChunk;
    11.8 -template <class Chunk> class BinaryTreeDictionary;
    11.9 -template <class Chunk> class AscendTreeCensusClosure;
   11.10 -template <class Chunk> class DescendTreeCensusClosure;
   11.11 -template <class Chunk> class DescendTreeSearchClosure;
   11.12 +template <class Chunk_t, template <class> class FreeList_t> class TreeChunk;
   11.13 +template <class Chunk_t, template <class> class FreeList_t> class BinaryTreeDictionary;
   11.14 +template <class Chunk_t, template <class> class FreeList_t> class AscendTreeCensusClosure;
   11.15 +template <class Chunk_t, template <class> class FreeList_t> class DescendTreeCensusClosure;
   11.16 +template <class Chunk_t, template <class> class FreeList_t> class DescendTreeSearchClosure;
   11.17  
   11.18 -template <class Chunk>
   11.19 -class TreeList: public FreeList<Chunk> {
   11.20 -  friend class TreeChunk<Chunk>;
   11.21 -  friend class BinaryTreeDictionary<Chunk>;
   11.22 -  friend class AscendTreeCensusClosure<Chunk>;
   11.23 -  friend class DescendTreeCensusClosure<Chunk>;
   11.24 -  friend class DescendTreeSearchClosure<Chunk>;
   11.25 +template <class Chunk_t, template <class> class FreeList_t>
   11.26 +class TreeList : public FreeList_t<Chunk_t> {
   11.27 +  friend class TreeChunk<Chunk_t, FreeList_t>;
   11.28 +  friend class BinaryTreeDictionary<Chunk_t, FreeList_t>;
   11.29 +  friend class AscendTreeCensusClosure<Chunk_t, FreeList_t>;
   11.30 +  friend class DescendTreeCensusClosure<Chunk_t, FreeList_t>;
   11.31 +  friend class DescendTreeSearchClosure<Chunk_t, FreeList_t>;
   11.32  
   11.33 -  TreeList<Chunk>* _parent;
   11.34 -  TreeList<Chunk>* _left;
   11.35 -  TreeList<Chunk>* _right;
   11.36 +  TreeList<Chunk_t, FreeList_t>* _parent;
   11.37 +  TreeList<Chunk_t, FreeList_t>* _left;
   11.38 +  TreeList<Chunk_t, FreeList_t>* _right;
   11.39  
   11.40   protected:
   11.41 -  TreeList<Chunk>* parent() const { return _parent; }
   11.42 -  TreeList<Chunk>* left()   const { return _left;   }
   11.43 -  TreeList<Chunk>* right()  const { return _right;  }
   11.44  
   11.45 -  // Explicitly import these names into our namespace to fix name lookup with templates
   11.46 -  using FreeList<Chunk>::head;
   11.47 -  using FreeList<Chunk>::set_head;
   11.48 +  TreeList<Chunk_t, FreeList_t>* parent() const { return _parent; }
   11.49 +  TreeList<Chunk_t, FreeList_t>* left()   const { return _left;   }
   11.50 +  TreeList<Chunk_t, FreeList_t>* right()  const { return _right;  }
   11.51  
   11.52 -  using FreeList<Chunk>::tail;
   11.53 -  using FreeList<Chunk>::set_tail;
   11.54 -  using FreeList<Chunk>::link_tail;
   11.55 +  // Wrapper on call to base class, to get the template to compile.
   11.56 +  Chunk_t* head() const { return FreeList_t<Chunk_t>::head(); }
   11.57 +  Chunk_t* tail() const { return FreeList_t<Chunk_t>::tail(); }
   11.58 +  void set_head(Chunk_t* head) { FreeList_t<Chunk_t>::set_head(head); }
   11.59 +  void set_tail(Chunk_t* tail) { FreeList_t<Chunk_t>::set_tail(tail); }
   11.60  
   11.61 -  using FreeList<Chunk>::increment_count;
   11.62 -  NOT_PRODUCT(using FreeList<Chunk>::increment_returned_bytes_by;)
   11.63 -  using FreeList<Chunk>::verify_chunk_in_free_list;
   11.64 -  using FreeList<Chunk>::size;
   11.65 +  size_t size() const { return FreeList_t<Chunk_t>::size(); }
   11.66  
   11.67    // Accessors for links in tree.
   11.68  
   11.69 -  void set_left(TreeList<Chunk>* tl) {
   11.70 +  void set_left(TreeList<Chunk_t, FreeList_t>* tl) {
   11.71      _left   = tl;
   11.72      if (tl != NULL)
   11.73        tl->set_parent(this);
   11.74    }
   11.75 -  void set_right(TreeList<Chunk>* tl) {
   11.76 +  void set_right(TreeList<Chunk_t, FreeList_t>* tl) {
   11.77      _right  = tl;
   11.78      if (tl != NULL)
   11.79        tl->set_parent(this);
   11.80    }
   11.81 -  void set_parent(TreeList<Chunk>* tl)  { _parent = tl;   }
   11.82 +  void set_parent(TreeList<Chunk_t, FreeList_t>* tl)  { _parent = tl;   }
   11.83  
   11.84 -  void clearLeft()               { _left = NULL;   }
   11.85 +  void clear_left()               { _left = NULL;   }
   11.86    void clear_right()              { _right = NULL;  }
   11.87    void clear_parent()             { _parent = NULL; }
   11.88 -  void initialize()              { clearLeft(); clear_right(), clear_parent(); }
   11.89 +  void initialize()               { clear_left(); clear_right(), clear_parent(); FreeList_t<Chunk_t>::initialize(); }
   11.90  
   11.91    // For constructing a TreeList from a Tree chunk or
   11.92    // address and size.
   11.93 -  static TreeList<Chunk>* as_TreeList(TreeChunk<Chunk>* tc);
   11.94 -  static TreeList<Chunk>* as_TreeList(HeapWord* addr, size_t size);
   11.95 +  TreeList();
   11.96 +  static TreeList<Chunk_t, FreeList_t>*
   11.97 +          as_TreeList(TreeChunk<Chunk_t, FreeList_t>* tc);
   11.98 +  static TreeList<Chunk_t, FreeList_t>* as_TreeList(HeapWord* addr, size_t size);
   11.99  
  11.100    // Returns the head of the free list as a pointer to a TreeChunk.
  11.101 -  TreeChunk<Chunk>* head_as_TreeChunk();
  11.102 +  TreeChunk<Chunk_t, FreeList_t>* head_as_TreeChunk();
  11.103  
  11.104    // Returns the first available chunk in the free list as a pointer
  11.105    // to a TreeChunk.
  11.106 -  TreeChunk<Chunk>* first_available();
  11.107 +  TreeChunk<Chunk_t, FreeList_t>* first_available();
  11.108  
  11.109    // Returns the block with the largest heap address amongst
  11.110    // those in the list for this size; potentially slow and expensive,
  11.111    // use with caution!
  11.112 -  TreeChunk<Chunk>* largest_address();
  11.113 +  TreeChunk<Chunk_t, FreeList_t>* largest_address();
  11.114 +
  11.115 +  TreeList<Chunk_t, FreeList_t>* get_better_list(
  11.116 +    BinaryTreeDictionary<Chunk_t, FreeList_t>* dictionary);
  11.117  
  11.118    // remove_chunk_replace_if_needed() removes the given "tc" from the TreeList.
  11.119    // If "tc" is the first chunk in the list, it is also the
  11.120 @@ -115,10 +116,10 @@
  11.121    // returns the possibly replaced TreeList* for the node in
  11.122    // the tree.  It also updates the parent of the original
  11.123    // node to point to the new node.
  11.124 -  TreeList<Chunk>* remove_chunk_replace_if_needed(TreeChunk<Chunk>* tc);
  11.125 +  TreeList<Chunk_t, FreeList_t>* remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc);
  11.126    // See FreeList.
  11.127 -  void return_chunk_at_head(TreeChunk<Chunk>* tc);
  11.128 -  void return_chunk_at_tail(TreeChunk<Chunk>* tc);
  11.129 +  void return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* tc);
  11.130 +  void return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* tc);
  11.131  };
  11.132  
  11.133  // A TreeChunk is a subclass of a Chunk that additionally
  11.134 @@ -134,52 +135,54 @@
  11.135  // on the free list for a node in the tree and is only removed if
  11.136  // it is the last chunk on the free list.
  11.137  
  11.138 -template <class Chunk>
  11.139 -class TreeChunk : public Chunk {
  11.140 -  friend class TreeList<Chunk>;
  11.141 -  TreeList<Chunk>* _list;
  11.142 -  TreeList<Chunk> _embedded_list;  // if non-null, this chunk is on _list
  11.143 +template <class Chunk_t, template <class> class FreeList_t>
  11.144 +class TreeChunk : public Chunk_t {
  11.145 +  friend class TreeList<Chunk_t, FreeList_t>;
  11.146 +  TreeList<Chunk_t, FreeList_t>* _list;
  11.147 +  TreeList<Chunk_t, FreeList_t> _embedded_list;  // if non-null, this chunk is on _list
  11.148 +
  11.149 +  static size_t _min_tree_chunk_size;
  11.150 +
  11.151   protected:
  11.152 -  TreeList<Chunk>* embedded_list() const { return (TreeList<Chunk>*) &_embedded_list; }
  11.153 -  void set_embedded_list(TreeList<Chunk>* v) { _embedded_list = *v; }
  11.154 +  TreeList<Chunk_t, FreeList_t>* embedded_list() const { return (TreeList<Chunk_t, FreeList_t>*) &_embedded_list; }
  11.155 +  void set_embedded_list(TreeList<Chunk_t, FreeList_t>* v) { _embedded_list = *v; }
  11.156   public:
  11.157 -  TreeList<Chunk>* list() { return _list; }
  11.158 -  void set_list(TreeList<Chunk>* v) { _list = v; }
  11.159 -  static TreeChunk<Chunk>* as_TreeChunk(Chunk* fc);
  11.160 +  TreeList<Chunk_t, FreeList_t>* list() { return _list; }
  11.161 +  void set_list(TreeList<Chunk_t, FreeList_t>* v) { _list = v; }
  11.162 +  static TreeChunk<Chunk_t, FreeList_t>* as_TreeChunk(Chunk_t* fc);
  11.163    // Initialize fields in a TreeChunk that should be
  11.164    // initialized when the TreeChunk is being added to
  11.165    // a free list in the tree.
  11.166    void initialize() { embedded_list()->initialize(); }
  11.167  
  11.168 -  Chunk* next() const { return Chunk::next(); }
  11.169 -  Chunk* prev() const { return Chunk::prev(); }
  11.170 -  size_t size() const volatile { return Chunk::size(); }
  11.171 +  Chunk_t* next() const { return Chunk_t::next(); }
  11.172 +  Chunk_t* prev() const { return Chunk_t::prev(); }
  11.173 +  size_t size() const volatile { return Chunk_t::size(); }
  11.174 +
  11.175 +  static size_t min_size() {
  11.176 +    return _min_tree_chunk_size;
  11.177 +  }
  11.178  
  11.179    // debugging
  11.180    void verify_tree_chunk_list() const;
  11.181 +  void assert_is_mangled() const;
  11.182  };
  11.183  
  11.184  
  11.185 -template <class Chunk>
  11.186 -class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
  11.187 +template <class Chunk_t, template <class> class FreeList_t>
  11.188 +class BinaryTreeDictionary: public FreeBlockDictionary<Chunk_t> {
  11.189    friend class VMStructs;
  11.190 -  bool       _splay;
  11.191 -  bool       _adaptive_freelists;
  11.192    size_t     _total_size;
  11.193    size_t     _total_free_blocks;
  11.194 -  TreeList<Chunk>* _root;
  11.195 +  TreeList<Chunk_t, FreeList_t>* _root;
  11.196  
  11.197    // private accessors
  11.198 -  bool splay() const { return _splay; }
  11.199 -  void set_splay(bool v) { _splay = v; }
  11.200    void set_total_size(size_t v) { _total_size = v; }
  11.201    virtual void inc_total_size(size_t v);
  11.202    virtual void dec_total_size(size_t v);
  11.203 -  size_t total_free_blocks() const { return _total_free_blocks; }
  11.204    void set_total_free_blocks(size_t v) { _total_free_blocks = v; }
  11.205 -  TreeList<Chunk>* root() const { return _root; }
  11.206 -  void set_root(TreeList<Chunk>* v) { _root = v; }
  11.207 -  bool adaptive_freelists() { return _adaptive_freelists; }
  11.208 +  TreeList<Chunk_t, FreeList_t>* root() const { return _root; }
  11.209 +  void set_root(TreeList<Chunk_t, FreeList_t>* v) { _root = v; }
  11.210  
  11.211    // This field is added and can be set to point to the
  11.212    // the Mutex used to synchronize access to the
  11.213 @@ -191,54 +194,55 @@
  11.214    // return it.  If the chunk
  11.215    // is the last chunk of that size, remove the node for that size
  11.216    // from the tree.
  11.217 -  TreeChunk<Chunk>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay);
  11.218 +  TreeChunk<Chunk_t, FreeList_t>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither);
  11.219 +  // Remove this chunk from the tree.  If the removal results
  11.220 +  // in an empty list in the tree, remove the empty list.
  11.221 +  TreeChunk<Chunk_t, FreeList_t>* remove_chunk_from_tree(TreeChunk<Chunk_t, FreeList_t>* tc);
  11.222 +  // Remove the node in the trees starting at tl that has the
  11.223 +  // minimum value and return it.  Repair the tree as needed.
  11.224 +  TreeList<Chunk_t, FreeList_t>* remove_tree_minimum(TreeList<Chunk_t, FreeList_t>* tl);
  11.225 +  // Add this free chunk to the tree.
  11.226 +  void       insert_chunk_in_tree(Chunk_t* freeChunk);
  11.227 + public:
  11.228 +
  11.229    // Return a list of the specified size or NULL from the tree.
  11.230    // The list is not removed from the tree.
  11.231 -  TreeList<Chunk>* find_list (size_t size) const;
  11.232 -  // Remove this chunk from the tree.  If the removal results
  11.233 -  // in an empty list in the tree, remove the empty list.
  11.234 -  TreeChunk<Chunk>* remove_chunk_from_tree(TreeChunk<Chunk>* tc);
  11.235 -  // Remove the node in the trees starting at tl that has the
  11.236 -  // minimum value and return it.  Repair the tree as needed.
  11.237 -  TreeList<Chunk>* remove_tree_minimum(TreeList<Chunk>* tl);
  11.238 -  void       semi_splay_step(TreeList<Chunk>* tl);
  11.239 -  // Add this free chunk to the tree.
  11.240 -  void       insert_chunk_in_tree(Chunk* freeChunk);
  11.241 - public:
  11.242 -
  11.243 -  static const size_t min_tree_chunk_size  = sizeof(TreeChunk<Chunk>)/HeapWordSize;
  11.244 +  TreeList<Chunk_t, FreeList_t>* find_list (size_t size) const;
  11.245  
  11.246    void       verify_tree() const;
  11.247    // verify that the given chunk is in the tree.
  11.248 -  bool       verify_chunk_in_free_list(Chunk* tc) const;
  11.249 +  bool       verify_chunk_in_free_list(Chunk_t* tc) const;
  11.250   private:
  11.251 -  void          verify_tree_helper(TreeList<Chunk>* tl) const;
  11.252 -  static size_t verify_prev_free_ptrs(TreeList<Chunk>* tl);
  11.253 +  void          verify_tree_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
  11.254 +  static size_t verify_prev_free_ptrs(TreeList<Chunk_t, FreeList_t>* tl);
  11.255  
  11.256    // Returns the total number of chunks in the list.
  11.257 -  size_t     total_list_length(TreeList<Chunk>* tl) const;
  11.258 +  size_t     total_list_length(TreeList<Chunk_t, FreeList_t>* tl) const;
  11.259    // Returns the total number of words in the chunks in the tree
  11.260    // starting at "tl".
  11.261 -  size_t     total_size_in_tree(TreeList<Chunk>* tl) const;
  11.262 +  size_t     total_size_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
  11.263    // Returns the sum of the square of the size of each block
  11.264    // in the tree starting at "tl".
  11.265 -  double     sum_of_squared_block_sizes(TreeList<Chunk>* const tl) const;
  11.266 +  double     sum_of_squared_block_sizes(TreeList<Chunk_t, FreeList_t>* const tl) const;
  11.267    // Returns the total number of free blocks in the tree starting
  11.268    // at "tl".
  11.269 -  size_t     total_free_blocks_in_tree(TreeList<Chunk>* tl) const;
  11.270 -  size_t     num_free_blocks() const;
  11.271 -  size_t     treeHeight() const;
  11.272 -  size_t     tree_height_helper(TreeList<Chunk>* tl) const;
  11.273 -  size_t     total_nodes_in_tree(TreeList<Chunk>* tl) const;
  11.274 -  size_t     total_nodes_helper(TreeList<Chunk>* tl) const;
  11.275 +  size_t     total_free_blocks_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
  11.276 +  size_t     num_free_blocks()  const;
  11.277 +  size_t     tree_height() const;
  11.278 +  size_t     tree_height_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
  11.279 +  size_t     total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
  11.280 +  size_t     total_nodes_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
  11.281  
  11.282   public:
  11.283    // Constructor
  11.284 -  BinaryTreeDictionary(bool adaptive_freelists, bool splay = false);
  11.285 -  BinaryTreeDictionary(MemRegion mr, bool adaptive_freelists, bool splay = false);
  11.286 +  BinaryTreeDictionary() :
  11.287 +    _total_size(0), _total_free_blocks(0), _root(0) {}
  11.288 +
  11.289 +  BinaryTreeDictionary(MemRegion mr);
  11.290  
  11.291    // Public accessors
  11.292    size_t total_size() const { return _total_size; }
  11.293 +  size_t total_free_blocks() const { return _total_free_blocks; }
  11.294  
  11.295    // Reset the dictionary to the initial conditions with
  11.296    // a single free chunk.
  11.297 @@ -249,23 +253,24 @@
  11.298  
  11.299    // Return a chunk of size "size" or greater from
  11.300    // the tree.
  11.301 -  // want a better dynamic splay strategy for the future.
  11.302 -  Chunk* get_chunk(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither) {
  11.303 -    FreeBlockDictionary<Chunk>::verify_par_locked();
  11.304 -    Chunk* res = get_chunk_from_tree(size, dither, splay());
  11.305 +  Chunk_t* get_chunk(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither) {
  11.306 +    FreeBlockDictionary<Chunk_t>::verify_par_locked();
  11.307 +    Chunk_t* res = get_chunk_from_tree(size, dither);
  11.308      assert(res == NULL || res->is_free(),
  11.309             "Should be returning a free chunk");
  11.310 +    assert(dither != FreeBlockDictionary<Chunk_t>::exactly ||
  11.311 +           res->size() == size, "Not correct size");
  11.312      return res;
  11.313    }
  11.314  
  11.315 -  void return_chunk(Chunk* chunk) {
  11.316 -    FreeBlockDictionary<Chunk>::verify_par_locked();
  11.317 +  void return_chunk(Chunk_t* chunk) {
  11.318 +    FreeBlockDictionary<Chunk_t>::verify_par_locked();
  11.319      insert_chunk_in_tree(chunk);
  11.320    }
  11.321  
  11.322 -  void remove_chunk(Chunk* chunk) {
  11.323 -    FreeBlockDictionary<Chunk>::verify_par_locked();
  11.324 -    remove_chunk_from_tree((TreeChunk<Chunk>*)chunk);
  11.325 +  void remove_chunk(Chunk_t* chunk) {
  11.326 +    FreeBlockDictionary<Chunk_t>::verify_par_locked();
  11.327 +    remove_chunk_from_tree((TreeChunk<Chunk_t, FreeList_t>*)chunk);
  11.328      assert(chunk->is_free(), "Should still be a free chunk");
  11.329    }
  11.330  
  11.331 @@ -281,19 +286,19 @@
  11.332    }
  11.333  
  11.334    size_t     min_size() const {
  11.335 -    return min_tree_chunk_size;
  11.336 +    return TreeChunk<Chunk_t, FreeList_t>::min_size();
  11.337    }
  11.338  
  11.339    double     sum_of_squared_block_sizes() const {
  11.340      return sum_of_squared_block_sizes(root());
  11.341    }
  11.342  
  11.343 -  Chunk* find_chunk_ends_at(HeapWord* target) const;
  11.344 +  Chunk_t* find_chunk_ends_at(HeapWord* target) const;
  11.345  
  11.346    // Find the list with size "size" in the binary tree and update
  11.347    // the statistics in the list according to "split" (chunk was
  11.348    // split or coalesce) and "birth" (chunk was added or removed).
  11.349 -  void       dict_census_udpate(size_t size, bool split, bool birth);
  11.350 +  void       dict_census_update(size_t size, bool split, bool birth);
  11.351    // Return true if the dictionary is overpopulated (more chunks of
  11.352    // this size than desired) for size "size".
  11.353    bool       coal_dict_over_populated(size_t size);
  11.354 @@ -307,7 +312,7 @@
  11.355    // statistics for the sweep.
  11.356    void       end_sweep_dict_census(double splitSurplusPercent);
  11.357    // Return the largest free chunk in the tree.
  11.358 -  Chunk* find_largest_dict() const;
  11.359 +  Chunk_t* find_largest_dict() const;
  11.360    // Accessors for statistics
  11.361    void       set_tree_surplus(double splitSurplusPercent);
  11.362    void       set_tree_hints(void);
    12.1 --- a/src/share/vm/memory/freeBlockDictionary.cpp	Fri Oct 19 11:26:17 2012 -0700
    12.2 +++ b/src/share/vm/memory/freeBlockDictionary.cpp	Tue Sep 18 23:35:42 2012 -0700
    12.3 @@ -27,6 +27,8 @@
    12.4  #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
    12.5  #endif // SERIALGC
    12.6  #include "memory/freeBlockDictionary.hpp"
    12.7 +#include "memory/metablock.hpp"
    12.8 +#include "memory/metachunk.hpp"
    12.9  #ifdef TARGET_OS_FAMILY_linux
   12.10  # include "thread_linux.inline.hpp"
   12.11  #endif
   12.12 @@ -62,6 +64,9 @@
   12.13  }
   12.14  #endif
   12.15  
   12.16 +template class FreeBlockDictionary<Metablock>;
   12.17 +template class FreeBlockDictionary<Metachunk>;
   12.18 +
   12.19  #ifndef SERIALGC
   12.20  // Explicitly instantiate for FreeChunk
   12.21  template class FreeBlockDictionary<FreeChunk>;
    13.1 --- a/src/share/vm/memory/freeBlockDictionary.hpp	Fri Oct 19 11:26:17 2012 -0700
    13.2 +++ b/src/share/vm/memory/freeBlockDictionary.hpp	Tue Sep 18 23:35:42 2012 -0700
    13.3 @@ -66,7 +66,7 @@
    13.4    virtual void       reset(HeapWord* addr, size_t size) = 0;
    13.5    virtual void       reset() = 0;
    13.6  
    13.7 -  virtual void       dict_census_udpate(size_t size, bool split, bool birth) = 0;
    13.8 +  virtual void       dict_census_update(size_t size, bool split, bool birth) = 0;
    13.9    virtual bool       coal_dict_over_populated(size_t size) = 0;
   13.10    virtual void       begin_sweep_dict_census(double coalSurplusPercent,
   13.11                         float inter_sweep_current, float inter_sweep_estimate,
    14.1 --- a/src/share/vm/memory/freeList.cpp	Fri Oct 19 11:26:17 2012 -0700
    14.2 +++ b/src/share/vm/memory/freeList.cpp	Tue Sep 18 23:35:42 2012 -0700
    14.3 @@ -25,6 +25,8 @@
    14.4  #include "precompiled.hpp"
    14.5  #include "memory/freeBlockDictionary.hpp"
    14.6  #include "memory/freeList.hpp"
    14.7 +#include "memory/metablock.hpp"
    14.8 +#include "memory/metachunk.hpp"
    14.9  #include "memory/sharedHeap.hpp"
   14.10  #include "runtime/globals.hpp"
   14.11  #include "runtime/mutex.hpp"
   14.12 @@ -49,8 +51,6 @@
   14.13  {
   14.14    _size         = 0;
   14.15    _count        = 0;
   14.16 -  _hint         = 0;
   14.17 -  init_statistics();
   14.18  }
   14.19  
   14.20  template <class Chunk>
   14.21 @@ -62,34 +62,50 @@
   14.22  {
   14.23    _size         = fc->size();
   14.24    _count        = 1;
   14.25 -  _hint         = 0;
   14.26 -  init_statistics();
   14.27 -#ifndef PRODUCT
   14.28 -  _allocation_stats.set_returned_bytes(size() * HeapWordSize);
   14.29 -#endif
   14.30  }
   14.31  
   14.32  template <class Chunk>
   14.33 -void FreeList<Chunk>::reset(size_t hint) {
   14.34 +void FreeList<Chunk>::link_head(Chunk* v) {
   14.35 +  assert_proper_lock_protection();
   14.36 +  set_head(v);
   14.37 +  // If this method is not used (just set the head instead),
   14.38 +  // this check can be avoided.
   14.39 +  if (v != NULL) {
   14.40 +    v->link_prev(NULL);
   14.41 +  }
   14.42 +}
   14.43 +
   14.44 +
   14.45 +
   14.46 +template <class Chunk>
   14.47 +void FreeList<Chunk>::reset() {
   14.48 +  // Don't set the _size to 0 because this method is
   14.49 +  // used with a existing list that has a size but which has
   14.50 +  // been emptied.
   14.51 +  // Don't clear the _protecting_lock of an existing list.
   14.52    set_count(0);
   14.53    set_head(NULL);
   14.54    set_tail(NULL);
   14.55 -  set_hint(hint);
   14.56  }
   14.57  
   14.58  template <class Chunk>
   14.59 -void FreeList<Chunk>::init_statistics(bool split_birth) {
   14.60 -  _allocation_stats.initialize(split_birth);
   14.61 +void FreeList<Chunk>::initialize() {
   14.62 +#ifdef ASSERT
   14.63 +  // Needed early because it might be checked in other initializing code.
   14.64 +  set_protecting_lock(NULL);
   14.65 +#endif
   14.66 +  reset();
   14.67 +  set_size(0);
   14.68  }
   14.69  
   14.70 -template <class Chunk>
   14.71 -Chunk* FreeList<Chunk>::get_chunk_at_head() {
   14.72 +template <class Chunk_t>
   14.73 +Chunk_t* FreeList<Chunk_t>::get_chunk_at_head() {
   14.74    assert_proper_lock_protection();
   14.75    assert(head() == NULL || head()->prev() == NULL, "list invariant");
   14.76    assert(tail() == NULL || tail()->next() == NULL, "list invariant");
   14.77 -  Chunk* fc = head();
   14.78 +  Chunk_t* fc = head();
   14.79    if (fc != NULL) {
   14.80 -    Chunk* nextFC = fc->next();
   14.81 +    Chunk_t* nextFC = fc->next();
   14.82      if (nextFC != NULL) {
   14.83        // The chunk fc being removed has a "next".  Set the "next" to the
   14.84        // "prev" of fc.
   14.85 @@ -197,11 +213,6 @@
   14.86      link_tail(chunk);
   14.87    }
   14.88    increment_count(); // of # of chunks in list
   14.89 -  DEBUG_ONLY(
   14.90 -    if (record_return) {
   14.91 -      increment_returned_bytes_by(size()*HeapWordSize);
   14.92 -    }
   14.93 -  )
   14.94    assert(head() == NULL || head()->prev() == NULL, "list invariant");
   14.95    assert(tail() == NULL || tail()->next() == NULL, "list invariant");
   14.96    assert(head() == NULL || head()->size() == size(), "wrong item on list");
   14.97 @@ -233,11 +244,6 @@
   14.98    }
   14.99    link_tail(chunk);
  14.100    increment_count();  // of # of chunks in list
  14.101 -  DEBUG_ONLY(
  14.102 -    if (record_return) {
  14.103 -      increment_returned_bytes_by(size()*HeapWordSize);
  14.104 -    }
  14.105 -  )
  14.106    assert(head() == NULL || head()->prev() == NULL, "list invariant");
  14.107    assert(tail() == NULL || tail()->next() == NULL, "list invariant");
  14.108    assert(head() == NULL || head()->size() == size(), "wrong item on list");
  14.109 @@ -273,7 +279,7 @@
  14.110    }
  14.111  }
  14.112  
  14.113 -// verify_chunk_in_free_list() is used to verify that an item is in this free list.
  14.114 +// verify_chunk_in_free_lists() is used to verify that an item is in this free list.
  14.115  // It is used as a debugging aid.
  14.116  template <class Chunk>
  14.117  bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
  14.118 @@ -294,40 +300,14 @@
  14.119  
  14.120  #ifndef PRODUCT
  14.121  template <class Chunk>
  14.122 -void FreeList<Chunk>::verify_stats() const {
  14.123 -  // The +1 of the LH comparand is to allow some "looseness" in
  14.124 -  // checking: we usually call this interface when adding a block
  14.125 -  // and we'll subsequently update the stats; we cannot update the
  14.126 -  // stats beforehand because in the case of the large-block BT
  14.127 -  // dictionary for example, this might be the first block and
  14.128 -  // in that case there would be no place that we could record
  14.129 -  // the stats (which are kept in the block itself).
  14.130 -  assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
  14.131 -          + _allocation_stats.coal_births() + 1)   // Total Production Stock + 1
  14.132 -         >= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
  14.133 -             + (ssize_t)count()),                // Total Current Stock + depletion
  14.134 -         err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
  14.135 -                 " violates Conservation Principle: "
  14.136 -                 "prev_sweep(" SIZE_FORMAT ")"
  14.137 -                 " + split_births(" SIZE_FORMAT ")"
  14.138 -                 " + coal_births(" SIZE_FORMAT ") + 1 >= "
  14.139 -                 " split_deaths(" SIZE_FORMAT ")"
  14.140 -                 " coal_deaths(" SIZE_FORMAT ")"
  14.141 -                 " + count(" SSIZE_FORMAT ")",
  14.142 -                 this, _size, _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
  14.143 -                 _allocation_stats.split_births(), _allocation_stats.split_deaths(),
  14.144 -                 _allocation_stats.coal_deaths(), count()));
  14.145 -}
  14.146 -
  14.147 -template <class Chunk>
  14.148  void FreeList<Chunk>::assert_proper_lock_protection_work() const {
  14.149 -  assert(_protecting_lock != NULL, "Don't call this directly");
  14.150 +  assert(protecting_lock() != NULL, "Don't call this directly");
  14.151    assert(ParallelGCThreads > 0, "Don't call this directly");
  14.152    Thread* thr = Thread::current();
  14.153    if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
  14.154      // assert that we are holding the freelist lock
  14.155    } else if (thr->is_GC_task_thread()) {
  14.156 -    assert(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
  14.157 +    assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
  14.158    } else if (thr->is_Java_thread()) {
  14.159      assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
  14.160    } else {
  14.161 @@ -350,21 +330,17 @@
  14.162  // to the call is a non-null string, it is printed in the first column;
  14.163  // otherwise, if the argument is null (the default), then the size of the
  14.164  // (free list) block is printed in the first column.
  14.165 -template <class Chunk>
  14.166 -void FreeList<Chunk>::print_on(outputStream* st, const char* c) const {
  14.167 +template <class Chunk_t>
  14.168 +void FreeList<Chunk_t>::print_on(outputStream* st, const char* c) const {
  14.169    if (c != NULL) {
  14.170      st->print("%16s", c);
  14.171    } else {
  14.172      st->print(SIZE_FORMAT_W(16), size());
  14.173    }
  14.174 -  st->print("\t"
  14.175 -           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
  14.176 -           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
  14.177 -           bfr_surp(),             surplus(),             desired(),             prev_sweep(),           before_sweep(),
  14.178 -           count(),               coal_births(),          coal_deaths(),          split_births(),         split_deaths());
  14.179  }
  14.180  
  14.181 +template class FreeList<Metablock>;
  14.182 +template class FreeList<Metachunk>;
  14.183  #ifndef SERIALGC
  14.184 -// Needs to be after the definitions have been seen.
  14.185  template class FreeList<FreeChunk>;
  14.186  #endif // SERIALGC
    15.1 --- a/src/share/vm/memory/freeList.hpp	Fri Oct 19 11:26:17 2012 -0700
    15.2 +++ b/src/share/vm/memory/freeList.hpp	Tue Sep 18 23:35:42 2012 -0700
    15.3 @@ -40,23 +40,19 @@
    15.4  // for that implementation.
    15.5  
    15.6  class Mutex;
    15.7 -template <class Chunk> class TreeList;
    15.8 -template <class Chunk> class PrintTreeCensusClosure;
    15.9  
   15.10 -template <class Chunk>
   15.11 +template <class Chunk_t>
   15.12  class FreeList VALUE_OBJ_CLASS_SPEC {
   15.13    friend class CompactibleFreeListSpace;
   15.14    friend class VMStructs;
   15.15 -  friend class PrintTreeCensusClosure<Chunk>;
   15.16  
   15.17   private:
   15.18 -  Chunk*        _head;          // Head of list of free chunks
   15.19 -  Chunk*        _tail;          // Tail of list of free chunks
   15.20 +  Chunk_t*      _head;          // Head of list of free chunks
   15.21 +  Chunk_t*      _tail;          // Tail of list of free chunks
   15.22    size_t        _size;          // Size in Heap words of each chunk
   15.23    ssize_t       _count;         // Number of entries in list
   15.24 -  size_t        _hint;          // next larger size list with a positive surplus
   15.25  
   15.26 -  AllocationStats _allocation_stats; // allocation-related statistics
   15.27 + protected:
   15.28  
   15.29  #ifdef ASSERT
   15.30    Mutex*        _protecting_lock;
   15.31 @@ -71,10 +67,6 @@
   15.32  #endif
   15.33    }
   15.34  
   15.35 -  // Initialize the allocation statistics.
   15.36 - protected:
   15.37 -  void init_statistics(bool split_birth = false);
   15.38 -  void set_count(ssize_t v) { _count = v;}
   15.39    void increment_count()    {
   15.40      _count++;
   15.41    }
   15.42 @@ -89,52 +81,48 @@
   15.43    // Construct a list without any entries.
   15.44    FreeList();
   15.45    // Construct a list with "fc" as the first (and lone) entry in the list.
   15.46 -  FreeList(Chunk* fc);
   15.47 +  FreeList(Chunk_t* fc);
   15.48  
   15.49 -  // Reset the head, tail, hint, and count of a free list.
   15.50 -  void reset(size_t hint);
   15.51 +  // Do initialization
   15.52 +  void initialize();
   15.53 +
   15.54 +  // Reset the head, tail, and count of a free list.
   15.55 +  void reset();
   15.56  
   15.57    // Declare the current free list to be protected by the given lock.
   15.58  #ifdef ASSERT
   15.59 -  void set_protecting_lock(Mutex* protecting_lock) {
   15.60 -    _protecting_lock = protecting_lock;
   15.61 +  Mutex* protecting_lock() const { return _protecting_lock; }
   15.62 +  void set_protecting_lock(Mutex* v) {
   15.63 +    _protecting_lock = v;
   15.64    }
   15.65  #endif
   15.66  
   15.67    // Accessors.
   15.68 -  Chunk* head() const {
   15.69 +  Chunk_t* head() const {
   15.70      assert_proper_lock_protection();
   15.71      return _head;
   15.72    }
   15.73 -  void set_head(Chunk* v) {
   15.74 +  void set_head(Chunk_t* v) {
   15.75      assert_proper_lock_protection();
   15.76      _head = v;
   15.77      assert(!_head || _head->size() == _size, "bad chunk size");
   15.78    }
   15.79    // Set the head of the list and set the prev field of non-null
   15.80    // values to NULL.
   15.81 -  void link_head(Chunk* v) {
   15.82 -    assert_proper_lock_protection();
   15.83 -    set_head(v);
   15.84 -    // If this method is not used (just set the head instead),
   15.85 -    // this check can be avoided.
   15.86 -    if (v != NULL) {
   15.87 -      v->link_prev(NULL);
   15.88 -    }
   15.89 -  }
   15.90 +  void link_head(Chunk_t* v);
   15.91  
   15.92 -  Chunk* tail() const {
   15.93 +  Chunk_t* tail() const {
   15.94      assert_proper_lock_protection();
   15.95      return _tail;
   15.96    }
   15.97 -  void set_tail(Chunk* v) {
   15.98 +  void set_tail(Chunk_t* v) {
   15.99      assert_proper_lock_protection();
  15.100      _tail = v;
  15.101      assert(!_tail || _tail->size() == _size, "bad chunk size");
  15.102    }
  15.103    // Set the tail of the list and set the next field of non-null
  15.104    // values to NULL.
  15.105 -  void link_tail(Chunk* v) {
  15.106 +  void link_tail(Chunk_t* v) {
  15.107      assert_proper_lock_protection();
  15.108      set_tail(v);
  15.109      if (v != NULL) {
  15.110 @@ -152,174 +140,45 @@
  15.111      assert_proper_lock_protection();
  15.112      _size = v;
  15.113    }
  15.114 -  ssize_t count() const {
  15.115 -    return _count;
  15.116 -  }
  15.117 -  size_t hint() const {
  15.118 -    return _hint;
  15.119 -  }
  15.120 -  void set_hint(size_t v) {
  15.121 -    assert_proper_lock_protection();
  15.122 -    assert(v == 0 || _size < v, "Bad hint"); _hint = v;
  15.123 -  }
  15.124 +  ssize_t count() const { return _count; }
  15.125 +  void set_count(ssize_t v) { _count = v;}
  15.126  
  15.127 -  // Accessors for statistics
  15.128 -  AllocationStats* allocation_stats() {
  15.129 -    assert_proper_lock_protection();
  15.130 -    return &_allocation_stats;
  15.131 -  }
  15.132 +  size_t get_better_size() { return size(); }
  15.133  
  15.134 -  ssize_t desired() const {
  15.135 -    return _allocation_stats.desired();
  15.136 -  }
  15.137 -  void set_desired(ssize_t v) {
  15.138 -    assert_proper_lock_protection();
  15.139 -    _allocation_stats.set_desired(v);
  15.140 -  }
  15.141 -  void compute_desired(float inter_sweep_current,
  15.142 -                       float inter_sweep_estimate,
  15.143 -                       float intra_sweep_estimate) {
  15.144 -    assert_proper_lock_protection();
  15.145 -    _allocation_stats.compute_desired(_count,
  15.146 -                                      inter_sweep_current,
  15.147 -                                      inter_sweep_estimate,
  15.148 -                                      intra_sweep_estimate);
  15.149 -  }
  15.150 -  ssize_t coal_desired() const {
  15.151 -    return _allocation_stats.coal_desired();
  15.152 -  }
  15.153 -  void set_coal_desired(ssize_t v) {
  15.154 -    assert_proper_lock_protection();
  15.155 -    _allocation_stats.set_coal_desired(v);
  15.156 -  }
  15.157 -
  15.158 -  ssize_t surplus() const {
  15.159 -    return _allocation_stats.surplus();
  15.160 -  }
  15.161 -  void set_surplus(ssize_t v) {
  15.162 -    assert_proper_lock_protection();
  15.163 -    _allocation_stats.set_surplus(v);
  15.164 -  }
  15.165 -  void increment_surplus() {
  15.166 -    assert_proper_lock_protection();
  15.167 -    _allocation_stats.increment_surplus();
  15.168 -  }
  15.169 -  void decrement_surplus() {
  15.170 -    assert_proper_lock_protection();
  15.171 -    _allocation_stats.decrement_surplus();
  15.172 -  }
  15.173 -
  15.174 -  ssize_t bfr_surp() const {
  15.175 -    return _allocation_stats.bfr_surp();
  15.176 -  }
  15.177 -  void set_bfr_surp(ssize_t v) {
  15.178 -    assert_proper_lock_protection();
  15.179 -    _allocation_stats.set_bfr_surp(v);
  15.180 -  }
  15.181 -  ssize_t prev_sweep() const {
  15.182 -    return _allocation_stats.prev_sweep();
  15.183 -  }
  15.184 -  void set_prev_sweep(ssize_t v) {
  15.185 -    assert_proper_lock_protection();
  15.186 -    _allocation_stats.set_prev_sweep(v);
  15.187 -  }
  15.188 -  ssize_t before_sweep() const {
  15.189 -    return _allocation_stats.before_sweep();
  15.190 -  }
  15.191 -  void set_before_sweep(ssize_t v) {
  15.192 -    assert_proper_lock_protection();
  15.193 -    _allocation_stats.set_before_sweep(v);
  15.194 -  }
  15.195 -
  15.196 -  ssize_t coal_births() const {
  15.197 -    return _allocation_stats.coal_births();
  15.198 -  }
  15.199 -  void set_coal_births(ssize_t v) {
  15.200 -    assert_proper_lock_protection();
  15.201 -    _allocation_stats.set_coal_births(v);
  15.202 -  }
  15.203 -  void increment_coal_births() {
  15.204 -    assert_proper_lock_protection();
  15.205 -    _allocation_stats.increment_coal_births();
  15.206 -  }
  15.207 -
  15.208 -  ssize_t coal_deaths() const {
  15.209 -    return _allocation_stats.coal_deaths();
  15.210 -  }
  15.211 -  void set_coal_deaths(ssize_t v) {
  15.212 -    assert_proper_lock_protection();
  15.213 -    _allocation_stats.set_coal_deaths(v);
  15.214 -  }
  15.215 -  void increment_coal_deaths() {
  15.216 -    assert_proper_lock_protection();
  15.217 -    _allocation_stats.increment_coal_deaths();
  15.218 -  }
  15.219 -
  15.220 -  ssize_t split_births() const {
  15.221 -    return _allocation_stats.split_births();
  15.222 -  }
  15.223 -  void set_split_births(ssize_t v) {
  15.224 -    assert_proper_lock_protection();
  15.225 -    _allocation_stats.set_split_births(v);
  15.226 -  }
  15.227 -  void increment_split_births() {
  15.228 -    assert_proper_lock_protection();
  15.229 -    _allocation_stats.increment_split_births();
  15.230 -  }
  15.231 -
  15.232 -  ssize_t split_deaths() const {
  15.233 -    return _allocation_stats.split_deaths();
  15.234 -  }
  15.235 -  void set_split_deaths(ssize_t v) {
  15.236 -    assert_proper_lock_protection();
  15.237 -    _allocation_stats.set_split_deaths(v);
  15.238 -  }
  15.239 -  void increment_split_deaths() {
  15.240 -    assert_proper_lock_protection();
  15.241 -    _allocation_stats.increment_split_deaths();
  15.242 -  }
  15.243 -
  15.244 -  NOT_PRODUCT(
  15.245 -    // For debugging.  The "_returned_bytes" in all the lists are summed
  15.246 -    // and compared with the total number of bytes swept during a
  15.247 -    // collection.
  15.248 -    size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
  15.249 -    void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
  15.250 -    void increment_returned_bytes_by(size_t v) {
  15.251 -      _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
  15.252 -    }
  15.253 -  )
  15.254 +  size_t returned_bytes() const { ShouldNotReachHere(); return 0; }
  15.255 +  void set_returned_bytes(size_t v) {}
  15.256 +  void increment_returned_bytes_by(size_t v) {}
  15.257  
  15.258    // Unlink head of list and return it.  Returns NULL if
  15.259    // the list is empty.
  15.260 -  Chunk* get_chunk_at_head();
  15.261 +  Chunk_t* get_chunk_at_head();
  15.262  
  15.263    // Remove the first "n" or "count", whichever is smaller, chunks from the
  15.264    // list, setting "fl", which is required to be empty, to point to them.
  15.265 -  void getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl);
  15.266 +  void getFirstNChunksFromList(size_t n, FreeList<Chunk_t>* fl);
  15.267  
  15.268    // Unlink this chunk from it's free list
  15.269 -  void remove_chunk(Chunk* fc);
  15.270 +  void remove_chunk(Chunk_t* fc);
  15.271  
  15.272    // Add this chunk to this free list.
  15.273 -  void return_chunk_at_head(Chunk* fc);
  15.274 -  void return_chunk_at_tail(Chunk* fc);
  15.275 +  void return_chunk_at_head(Chunk_t* fc);
  15.276 +  void return_chunk_at_tail(Chunk_t* fc);
  15.277  
  15.278    // Similar to returnChunk* but also records some diagnostic
  15.279    // information.
  15.280 -  void return_chunk_at_head(Chunk* fc, bool record_return);
  15.281 -  void return_chunk_at_tail(Chunk* fc, bool record_return);
  15.282 +  void return_chunk_at_head(Chunk_t* fc, bool record_return);
  15.283 +  void return_chunk_at_tail(Chunk_t* fc, bool record_return);
  15.284  
  15.285    // Prepend "fl" (whose size is required to be the same as that of "this")
  15.286    // to the front of "this" list.
  15.287 -  void prepend(FreeList<Chunk>* fl);
  15.288 +  void prepend(FreeList<Chunk_t>* fl);
  15.289  
  15.290    // Verify that the chunk is in the list.
  15.291    // found.  Return NULL if "fc" is not found.
  15.292 -  bool verify_chunk_in_free_list(Chunk* fc) const;
  15.293 +  bool verify_chunk_in_free_list(Chunk_t* fc) const;
  15.294  
  15.295    // Stats verification
  15.296 -  void verify_stats() const PRODUCT_RETURN;
  15.297 +//  void verify_stats() const { ShouldNotReachHere(); };
  15.298  
  15.299    // Printing support
  15.300    static void print_labels_on(outputStream* st, const char* c);
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/src/share/vm/memory/metablock.hpp	Tue Sep 18 23:35:42 2012 -0700
    16.3 @@ -0,0 +1,103 @@
    16.4 +/*
    16.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    16.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    16.7 + *
    16.8 + * This code is free software; you can redistribute it and/or modify it
    16.9 + * under the terms of the GNU General Public License version 2 only, as
   16.10 + * published by the Free Software Foundation.
   16.11 + *
   16.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   16.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   16.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   16.15 + * version 2 for more details (a copy is included in the LICENSE file that
   16.16 + * accompanied this code).
   16.17 + *
   16.18 + * You should have received a copy of the GNU General Public License version
   16.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   16.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   16.21 + *
   16.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   16.23 + * or visit www.oracle.com if you need additional information or have any
   16.24 + * questions.
   16.25 + *
   16.26 + */
   16.27 +#ifndef SHARE_VM_MEMORY_METABLOCK_HPP
   16.28 +#define SHARE_VM_MEMORY_METABLOCK_HPP
   16.29 +
   16.30 +// Metablock are the unit of allocation from a Chunk.  It is initialized
   16.31 +// with the size of the requested allocation.  That size is overwritten
   16.32 +// once the allocation returns.
   16.33 +//
   16.34 +// A Metablock may be reused by its SpaceManager but are never moved between
   16.35 +// SpaceManagers.  There is no explicit link to the Metachunk
   16.36 +// from which it was allocated.  Metablock may be deallocated and
   16.37 +// put on a freelist but the space is never freed, rather
   16.38 +// the Metachunk it is a part of will be deallocated when it's
   16.39 +// associated class loader is collected.
   16.40 +
   16.41 +class Metablock VALUE_OBJ_CLASS_SPEC {
   16.42 +  friend class VMStructs;
   16.43 + private:
   16.44 +  // Used to align the allocation (see below).
   16.45 +  union block_t {
   16.46 +    void* _data[3];
   16.47 +    struct header_t {
   16.48 +      size_t _word_size;
   16.49 +      Metablock* _next;
   16.50 +      Metablock* _prev;
   16.51 +    } _header;
   16.52 +  } _block;
   16.53 +  static size_t _min_block_byte_size;
   16.54 +  static size_t _overhead;
   16.55 +
   16.56 +  typedef union block_t Block;
   16.57 +  typedef struct header_t Header;
   16.58 +  const Block* block() const { return &_block; }
   16.59 +  const Block::header_t* header() const { return &(block()->_header); }
   16.60 + public:
   16.61 +
   16.62 +  static Metablock* initialize(MetaWord* p, size_t word_size);
   16.63 +
   16.64 +  // This places the body of the block at a 2 word boundary
   16.65 +  // because every block starts on a 2 word boundary.  Work out
   16.66 +  // how to make the body on a 2 word boundary if the block
   16.67 +  // starts on a arbitrary boundary.  JJJ
   16.68 +
   16.69 +  size_t word_size() const  { return header()->_word_size; }
   16.70 +  void set_word_size(size_t v) { _block._header._word_size = v; }
   16.71 +  size_t size() const volatile { return _block._header._word_size; }
   16.72 +  void set_size(size_t v) { _block._header._word_size = v; }
   16.73 +  Metablock* next() const { return header()->_next; }
   16.74 +  void set_next(Metablock* v) { _block._header._next = v; }
   16.75 +  Metablock* prev() const { return header()->_prev; }
   16.76 +  void set_prev(Metablock* v) { _block._header._prev = v; }
   16.77 +
   16.78 +  static size_t min_block_byte_size() { return _min_block_byte_size; }
   16.79 +  static size_t overhead() { return _overhead; }
   16.80 +
   16.81 +  bool is_free()                 { return header()->_word_size != 0; }
   16.82 +  void clear_next()              { set_next(NULL); }
   16.83 +  void link_prev(Metablock* ptr) { set_prev(ptr); }
   16.84 +  uintptr_t* end()              { return ((uintptr_t*) this) + size(); }
   16.85 +  bool cantCoalesce() const     { return false; }
   16.86 +  void link_next(Metablock* ptr) { set_next(ptr); }
   16.87 +  void link_after(Metablock* ptr){
   16.88 +    link_next(ptr);
   16.89 +    if (ptr != NULL) ptr->link_prev(this);
   16.90 +  }
   16.91 +
   16.92 +  // Should not be needed in a free list of Metablocks
   16.93 +  void markNotFree()            { ShouldNotReachHere(); }
   16.94 +
   16.95 +  // Debug support
   16.96 +#ifdef ASSERT
   16.97 +  void* prev_addr() const { return (void*)&_block._header._prev; }
   16.98 +  void* next_addr() const { return (void*)&_block._header._next; }
   16.99 +  void* size_addr() const { return (void*)&_block._header._word_size; }
  16.100 +#endif
  16.101 +  bool verify_chunk_in_free_list(Metablock* tc) const { return true; }
  16.102 +  bool verify_par_locked() { return true; }
  16.103 +
  16.104 +  void assert_is_mangled() const {/* Don't check "\*/}
  16.105 +};
  16.106 +#endif // SHARE_VM_MEMORY_METABLOCK_HPP
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/src/share/vm/memory/metachunk.hpp	Tue Sep 18 23:35:42 2012 -0700
    17.3 @@ -0,0 +1,133 @@
    17.4 +/*
    17.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    17.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    17.7 + *
    17.8 + * This code is free software; you can redistribute it and/or modify it
    17.9 + * under the terms of the GNU General Public License version 2 only, as
   17.10 + * published by the Free Software Foundation.
   17.11 + *
   17.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   17.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   17.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   17.15 + * version 2 for more details (a copy is included in the LICENSE file that
   17.16 + * accompanied this code).
   17.17 + *
   17.18 + * You should have received a copy of the GNU General Public License version
   17.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   17.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   17.21 + *
   17.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   17.23 + * or visit www.oracle.com if you need additional information or have any
   17.24 + * questions.
   17.25 + *
   17.26 + */
   17.27 +#ifndef SHARE_VM_MEMORY_METACHUNK_HPP
   17.28 +#define SHARE_VM_MEMORY_METACHUNK_HPP
   17.29 +
   17.30 +//  Metachunk - Quantum of allocation from a Virtualspace
   17.31 +//    Metachunks are reused (when freed are put on a global freelist) and
   17.32 +//    have no permanent association to a SpaceManager.
   17.33 +
   17.34 +//            +--------------+ <- end
   17.35 +//            |              |          --+       ---+
   17.36 +//            |              |            | free     |
   17.37 +//            |              |            |          |
   17.38 +//            |              |            |          | capacity
   17.39 +//            |              |            |          |
   17.40 +//            |              | <- top   --+          |
   17.41 +//            |              |           ---+        |
   17.42 +//            |              |              | used   |
   17.43 +//            |              |              |        |
   17.44 +//            |              |              |        |
   17.45 +//            +--------------+ <- bottom ---+     ---+
   17.46 +
   17.47 +class Metachunk VALUE_OBJ_CLASS_SPEC {
   17.48 +  // link to support lists of chunks
   17.49 +  Metachunk* _next;
   17.50 +  Metachunk* _prev;
   17.51 +
   17.52 +  MetaWord* _bottom;
   17.53 +  MetaWord* _end;
   17.54 +  MetaWord* _top;
   17.55 +  size_t _word_size;
   17.56 +  // Used in a guarantee() so included in the Product builds
   17.57 +  // even through it is only for debugging.
   17.58 +  bool _is_free;
   17.59 +
   17.60 +  // Metachunks are allocated out of a MetadataVirtualSpace and
   17.61 +  // and use some of its space to describe itself (plus alignment
   17.62 +  // considerations).  Metadata is allocated in the rest of the chunk.
   17.63 +  // This size is the overhead of maintaining the Metachunk within
   17.64 +  // the space.
   17.65 +  static size_t _overhead;
   17.66 +
   17.67 +  void set_bottom(MetaWord* v) { _bottom = v; }
   17.68 +  void set_end(MetaWord* v) { _end = v; }
   17.69 +  void set_top(MetaWord* v) { _top = v; }
   17.70 +  void set_word_size(size_t v) { _word_size = v; }
   17.71 + public:
   17.72 +#ifdef ASSERT
   17.73 +  Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false) {}
   17.74 +#else
   17.75 +  Metachunk() : _bottom(NULL), _end(NULL), _top(NULL) {}
   17.76 +#endif
   17.77 +
   17.78 +  // Used to add a Metachunk to a list of Metachunks
   17.79 +  void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
   17.80 +  void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
   17.81 +
   17.82 +  MetaWord* allocate(size_t word_size);
   17.83 +  static Metachunk* initialize(MetaWord* ptr, size_t word_size);
   17.84 +
   17.85 +  // Accessors
   17.86 +  Metachunk* next() const { return _next; }
   17.87 +  Metachunk* prev() const { return _prev; }
   17.88 +  MetaWord* bottom() const { return _bottom; }
   17.89 +  MetaWord* end() const { return _end; }
   17.90 +  MetaWord* top() const { return _top; }
   17.91 +  size_t word_size() const { return _word_size; }
   17.92 +  size_t size() const volatile { return _word_size; }
   17.93 +  void set_size(size_t v) { _word_size = v; }
   17.94 +  bool is_free() { return _is_free; }
   17.95 +  void set_is_free(bool v) { _is_free = v; }
   17.96 +  static size_t overhead() { return _overhead; }
   17.97 +  void clear_next()              { set_next(NULL); }
   17.98 +  void link_prev(Metachunk* ptr) { set_prev(ptr); }
   17.99 +  uintptr_t* end()              { return ((uintptr_t*) this) + size(); }
  17.100 +  bool cantCoalesce() const     { return false; }
  17.101 +  void link_next(Metachunk* ptr) { set_next(ptr); }
  17.102 +  void link_after(Metachunk* ptr){
  17.103 +    link_next(ptr);
  17.104 +    if (ptr != NULL) ptr->link_prev(this);
  17.105 +  }
  17.106 +
  17.107 +  // Reset top to bottom so chunk can be reused.
  17.108 +  void reset_empty() { _top = (_bottom + _overhead); }
  17.109 +  bool is_empty() { return _top == (_bottom + _overhead); }
  17.110 +
  17.111 +  // used (has been allocated)
  17.112 +  // free (available for future allocations)
  17.113 +  // capacity (total size of chunk)
  17.114 +  size_t used_word_size();
  17.115 +  size_t free_word_size();
  17.116 +  size_t capacity_word_size();
  17.117 +
  17.118 +  // Debug support
  17.119 +#ifdef ASSERT
  17.120 +  void* prev_addr() const { return (void*)&_prev; }
  17.121 +  void* next_addr() const { return (void*)&_next; }
  17.122 +  void* size_addr() const { return (void*)&_word_size; }
  17.123 +#endif
  17.124 +  bool verify_chunk_in_free_list(Metachunk* tc) const { return true; }
  17.125 +  bool verify_par_locked() { return true; }
  17.126 +
  17.127 +  void assert_is_mangled() const {/* Don't check "\*/}
  17.128 +
  17.129 +#ifdef ASSERT
  17.130 +  void mangle();
  17.131 +#endif // ASSERT
  17.132 +
  17.133 +  void print_on(outputStream* st) const;
  17.134 +  void verify();
  17.135 +};
  17.136 +#endif  // SHARE_VM_MEMORY_METACHUNK_HPP
    18.1 --- a/src/share/vm/memory/metaspace.cpp	Fri Oct 19 11:26:17 2012 -0700
    18.2 +++ b/src/share/vm/memory/metaspace.cpp	Tue Sep 18 23:35:42 2012 -0700
    18.3 @@ -24,9 +24,12 @@
    18.4  #include "precompiled.hpp"
    18.5  #include "gc_interface/collectedHeap.hpp"
    18.6  #include "memory/binaryTreeDictionary.hpp"
    18.7 +#include "memory/freeList.hpp"
    18.8  #include "memory/collectorPolicy.hpp"
    18.9  #include "memory/filemap.hpp"
   18.10  #include "memory/freeList.hpp"
   18.11 +#include "memory/metablock.hpp"
   18.12 +#include "memory/metachunk.hpp"
   18.13  #include "memory/metaspace.hpp"
   18.14  #include "memory/metaspaceShared.hpp"
   18.15  #include "memory/resourceArea.hpp"
   18.16 @@ -37,15 +40,8 @@
   18.17  #include "utilities/copy.hpp"
   18.18  #include "utilities/debug.hpp"
   18.19  
   18.20 -// Define this macro to deallocate Metablock.  If not defined,
   18.21 -// blocks are not yet deallocated and are only mangled.
   18.22 -#undef DEALLOCATE_BLOCKS
   18.23 -
   18.24 -// Easily recognizable patterns
   18.25 -// These patterns can be the same in 32bit or 64bit since
   18.26 -// they only have to be easily recognizable.
   18.27 -const void* metaspace_allocation_leader = (void*) 0X11111111;
   18.28 -const void* metaspace_allocation_trailer = (void*) 0X77777777;
   18.29 +typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
   18.30 +typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
   18.31  
   18.32  // Parameters for stress mode testing
   18.33  const uint metadata_deallocate_a_lot_block = 10;
   18.34 @@ -53,7 +49,6 @@
   18.35  size_t const allocation_from_dictionary_limit = 64 * K;
   18.36  const size_t metadata_chunk_initialize = 0xf7f7f7f7;
   18.37  const size_t metadata_deallocate = 0xf5f5f5f5;
   18.38 -const size_t metadata_space_manager_allocate = 0xf3f3f3f3;
   18.39  
   18.40  MetaWord* last_allocated = 0;
   18.41  
   18.42 @@ -62,11 +57,12 @@
   18.43    SmallIndex = 0,
   18.44    MediumIndex = 1,
   18.45    HumongousIndex = 2,
   18.46 -  NumberOfFreeLists = 3
   18.47 +  NumberOfFreeLists = 2,
   18.48 +  NumberOfInUseLists = 3
   18.49  };
   18.50  
   18.51  static ChunkIndex next_chunk_index(ChunkIndex i) {
   18.52 -  assert(i < NumberOfFreeLists, "Out of bound");
   18.53 +  assert(i < NumberOfInUseLists, "Out of bound");
   18.54    return (ChunkIndex) (i+1);
   18.55  }
   18.56  
   18.57 @@ -100,164 +96,13 @@
   18.58  // the Chunk after the header for the Chunk) where as Metachunks
   18.59  // point to space in a VirtualSpace.  To replace Metachunks with
   18.60  // Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
   18.61 -//
   18.62 -
   18.63 -// Metablock are the unit of allocation from a Chunk.  It contains
   18.64 -// the size of the requested allocation in a debug build.
   18.65 -// Also in a debug build it has a marker before and after the
   18.66 -// body of the block. The address of the body is the address returned
   18.67 -// by the allocation.
   18.68 -//
   18.69 -// Layout in a debug build.  In a product build only the body is present.
   18.70 -//
   18.71 -//     +-----------+-----------+------------+     +-----------+
   18.72 -//     | word size | leader    | body       | ... | trailer   |
   18.73 -//     +-----------+-----------+------------+     +-----------+
   18.74 -//
   18.75 -// A Metablock may be reused by its SpaceManager but are never moved between
   18.76 -// SpaceManagers.  There is no explicit link to the Metachunk
   18.77 -// from which it was allocated.  Metablock are not deallocated, rather
   18.78 -// the Metachunk it is a part of will be deallocated when it's
   18.79 -// associated class loader is collected.
   18.80 -//
   18.81 -// When the word size of a block is passed in to the deallocation
   18.82 -// call the word size no longer needs to be part of a Metablock.
   18.83 -
   18.84 -class Metablock {
   18.85 -  friend class VMStructs;
   18.86 - private:
   18.87 -  // Used to align the allocation (see below) and for debugging.
   18.88 +size_t Metablock::_min_block_byte_size = sizeof(Metablock);
   18.89  #ifdef ASSERT
   18.90 -  struct {
   18.91 -    size_t _word_size;
   18.92 -    void*  _leader;
   18.93 -  } _header;
   18.94 -  void* _data[1];
   18.95 +  size_t Metablock::_overhead =
   18.96 +    Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
   18.97 +#else
   18.98 +  size_t Metablock::_overhead = 0;
   18.99  #endif
  18.100 -  static size_t _overhead;
  18.101 -
  18.102 -#ifdef ASSERT
  18.103 -  void set_word_size(size_t v) { _header._word_size = v; }
  18.104 -  void* leader() { return _header._leader; }
  18.105 -  void* trailer() {
  18.106 -    jlong index = (jlong) _header._word_size - sizeof(_header)/BytesPerWord - 1;
  18.107 -    assert(index > 0, err_msg("Bad indexling of trailer %d", index));
  18.108 -    void** ptr = &_data[index];
  18.109 -    return *ptr;
  18.110 -  }
  18.111 -  void set_leader(void* v) { _header._leader = v; }
  18.112 -  void set_trailer(void* v) {
  18.113 -    void** ptr = &_data[_header._word_size - sizeof(_header)/BytesPerWord - 1];
  18.114 -    *ptr = v;
  18.115 -  }
  18.116 - public:
  18.117 -  size_t word_size() { return _header._word_size; }
  18.118 -#endif
  18.119 - public:
  18.120 -
  18.121 -  static Metablock* initialize(MetaWord* p, size_t word_size);
  18.122 -
  18.123 -  // This places the body of the block at a 2 word boundary
  18.124 -  // because every block starts on a 2 word boundary.  Work out
  18.125 -  // how to make the body on a 2 word boundary if the block
  18.126 -  // starts on a arbitrary boundary.  JJJ
  18.127 -
  18.128 -#ifdef ASSERT
  18.129 -  MetaWord* data() { return (MetaWord*) &_data[0]; }
  18.130 -#else
  18.131 -  MetaWord* data() { return (MetaWord*) this; }
  18.132 -#endif
  18.133 -  static Metablock* metablock_from_data(MetaWord* p) {
  18.134 -#ifdef ASSERT
  18.135 -    size_t word_offset = offset_of(Metablock, _data)/BytesPerWord;
  18.136 -    Metablock* result = (Metablock*) (p - word_offset);
  18.137 -    return result;
  18.138 -#else
  18.139 -    return (Metablock*) p;
  18.140 -#endif
  18.141 -  }
  18.142 -
  18.143 -  static size_t overhead() { return _overhead; }
  18.144 -  void verify();
  18.145 -};
  18.146 -
  18.147 -//  Metachunk - Quantum of allocation from a Virtualspace
  18.148 -//    Metachunks are reused (when freed are put on a global freelist) and
  18.149 -//    have no permanent association to a SpaceManager.
  18.150 -
  18.151 -//            +--------------+ <- end
  18.152 -//            |              |          --+       ---+
  18.153 -//            |              |            | free     |
  18.154 -//            |              |            |          |
  18.155 -//            |              |            |          | capacity
  18.156 -//            |              |            |          |
  18.157 -//            |              | <- top   --+          |
  18.158 -//            |              |           ---+        |
  18.159 -//            |              |              | used   |
  18.160 -//            |              |              |        |
  18.161 -//            |              |              |        |
  18.162 -//            +--------------+ <- bottom ---+     ---+
  18.163 -
  18.164 -class Metachunk VALUE_OBJ_CLASS_SPEC {
  18.165 -  // link to support lists of chunks
  18.166 -  Metachunk* _next;
  18.167 -
  18.168 -  MetaWord* _bottom;
  18.169 -  MetaWord* _end;
  18.170 -  MetaWord* _top;
  18.171 -  size_t _word_size;
  18.172 -
  18.173 -  // Metachunks are allocated out of a MetadataVirtualSpace and
  18.174 -  // and use some of its space to describe itself (plus alignment
  18.175 -  // considerations).  Metadata is allocated in the rest of the chunk.
  18.176 -  // This size is the overhead of maintaining the Metachunk within
  18.177 -  // the space.
  18.178 -  static size_t _overhead;
  18.179 -
  18.180 -  void set_bottom(MetaWord* v) { _bottom = v; }
  18.181 -  void set_end(MetaWord* v) { _end = v; }
  18.182 -  void set_top(MetaWord* v) { _top = v; }
  18.183 -  void set_word_size(size_t v) { _word_size = v; }
  18.184 - public:
  18.185 -
  18.186 -  // Used to add a Metachunk to a list of Metachunks
  18.187 -  void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
  18.188 -
  18.189 -  Metablock* allocate(size_t word_size);
  18.190 -  static Metachunk* initialize(MetaWord* ptr, size_t word_size);
  18.191 -
  18.192 -  // Accessors
  18.193 -  Metachunk* next() const { return _next; }
  18.194 -  MetaWord* bottom() const { return _bottom; }
  18.195 -  MetaWord* end() const { return _end; }
  18.196 -  MetaWord* top() const { return _top; }
  18.197 -  size_t word_size() const { return _word_size; }
  18.198 -  static size_t overhead() { return _overhead; }
  18.199 -
  18.200 -  // Reset top to bottom so chunk can be reused.
  18.201 -  void reset_empty() { _top = (_bottom + _overhead); }
  18.202 -  bool is_empty() { return _top == (_bottom + _overhead); }
  18.203 -
  18.204 -  // used (has been allocated)
  18.205 -  // free (available for future allocations)
  18.206 -  // capacity (total size of chunk)
  18.207 -  size_t used_word_size();
  18.208 -  size_t free_word_size();
  18.209 -  size_t capacity_word_size();
  18.210 -
  18.211 -#ifdef ASSERT
  18.212 -  void mangle() {
  18.213 -    // Mangle the payload of the chunk and not the links that
  18.214 -    // maintain list of chunks.
  18.215 -    HeapWord* start = (HeapWord*)(bottom() + overhead());
  18.216 -    size_t word_size = capacity_word_size() - overhead();
  18.217 -    Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
  18.218 -  }
  18.219 -#endif // ASSERT
  18.220 -
  18.221 -  void print_on(outputStream* st) const;
  18.222 -  void verify();
  18.223 -};
  18.224  
  18.225  
  18.226  // Pointer to list of Metachunks.
  18.227 @@ -292,7 +137,10 @@
  18.228    //   SmallChunk
  18.229    //   MediumChunk
  18.230    //   HumongousChunk
  18.231 -  ChunkList _free_chunks[3];
  18.232 +  ChunkList _free_chunks[NumberOfFreeLists];
  18.233 +
  18.234 +  //   HumongousChunk
  18.235 +  ChunkTreeDictionary _humongous_dictionary;
  18.236  
  18.237    // ChunkManager in all lists of this type
  18.238    size_t _free_chunks_total;
  18.239 @@ -337,7 +185,9 @@
  18.240    }
  18.241    ChunkList* free_medium_chunks() { return &_free_chunks[1]; }
  18.242    ChunkList* free_small_chunks() { return &_free_chunks[0]; }
  18.243 -  ChunkList* free_humongous_chunks() { return &_free_chunks[2]; }
  18.244 +  ChunkTreeDictionary* humongous_dictionary() {
  18.245 +    return &_humongous_dictionary;
  18.246 +  }
  18.247  
  18.248    ChunkList* free_chunks(ChunkIndex index);
  18.249  
  18.250 @@ -356,41 +206,35 @@
  18.251  
  18.252    void locked_print_free_chunks(outputStream* st);
  18.253    void locked_print_sum_free_chunks(outputStream* st);
  18.254 +
  18.255 +  void print_on(outputStream* st);
  18.256  };
  18.257  
  18.258  
  18.259  // Used to manage the free list of Metablocks (a block corresponds
  18.260  // to the allocation of a quantum of metadata).
  18.261  class BlockFreelist VALUE_OBJ_CLASS_SPEC {
  18.262 -#ifdef DEALLOCATE_BLOCKS
  18.263 -  BinaryTreeDictionary<Metablock>* _dictionary;
  18.264 -#endif
  18.265 -  static Metablock* initialize_free_chunk(Metablock* block, size_t word_size);
  18.266 -
  18.267 -#ifdef DEALLOCATE_BLOCKS
  18.268 +  BlockTreeDictionary* _dictionary;
  18.269 +  static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
  18.270 +
  18.271    // Accessors
  18.272 -  BinaryTreeDictionary<Metablock>* dictionary() const { return _dictionary; }
  18.273 -#endif
  18.274 +  BlockTreeDictionary* dictionary() const { return _dictionary; }
  18.275  
  18.276   public:
  18.277    BlockFreelist();
  18.278    ~BlockFreelist();
  18.279  
  18.280    // Get and return a block to the free list
  18.281 -  Metablock* get_block(size_t word_size);
  18.282 -  void return_block(Metablock* block, size_t word_size);
  18.283 -
  18.284 -  size_t totalSize() {
  18.285 -#ifdef DEALLOCATE_BLOCKS
  18.286 -    if (dictionary() == NULL) {
  18.287 -      return 0;
  18.288 -    } else {
  18.289 -      return dictionary()->totalSize();
  18.290 -    }
  18.291 -#else
  18.292 +  MetaWord* get_block(size_t word_size);
  18.293 +  void return_block(MetaWord* p, size_t word_size);
  18.294 +
  18.295 +  size_t total_size() {
  18.296 +  if (dictionary() == NULL) {
  18.297      return 0;
  18.298 -#endif
  18.299 +  } else {
  18.300 +    return dictionary()->total_size();
  18.301    }
  18.302 +}
  18.303  
  18.304    void print_on(outputStream* st) const;
  18.305  };
  18.306 @@ -600,7 +444,6 @@
  18.307    };
  18.308  };
  18.309  
  18.310 -
  18.311  class Metadebug : AllStatic {
  18.312    // Debugging support for Metaspaces
  18.313    static int _deallocate_block_a_lot_count;
  18.314 @@ -655,7 +498,7 @@
  18.315    // List of chunks in use by this SpaceManager.  Allocations
  18.316    // are done from the current chunk.  The list is used for deallocating
  18.317    // chunks when the SpaceManager is freed.
  18.318 -  Metachunk* _chunks_in_use[NumberOfFreeLists];
  18.319 +  Metachunk* _chunks_in_use[NumberOfInUseLists];
  18.320    Metachunk* _current_chunk;
  18.321  
  18.322    // Virtual space where allocation comes from.
  18.323 @@ -700,24 +543,6 @@
  18.324    // Add chunk to the list of chunks in use
  18.325    void add_chunk(Metachunk* v, bool make_current);
  18.326  
  18.327 -  // Debugging support
  18.328 -  void verify_chunks_in_use_index(ChunkIndex index, Metachunk* v) {
  18.329 -    switch (index) {
  18.330 -    case 0:
  18.331 -      assert(v->word_size() == SmallChunk, "Not a SmallChunk");
  18.332 -      break;
  18.333 -    case 1:
  18.334 -      assert(v->word_size() == MediumChunk, "Not a MediumChunk");
  18.335 -      break;
  18.336 -    case 2:
  18.337 -      assert(v->word_size() > MediumChunk, "Not a HumongousChunk");
  18.338 -      break;
  18.339 -    default:
  18.340 -      assert(false, "Wrong list.");
  18.341 -    }
  18.342 -  }
  18.343 -
  18.344 - protected:
  18.345    Mutex* lock() const { return _lock; }
  18.346  
  18.347   public:
  18.348 @@ -751,10 +576,10 @@
  18.349    MetaWord* allocate(size_t word_size);
  18.350  
  18.351    // Helper for allocations
  18.352 -  Metablock* allocate_work(size_t word_size);
  18.353 +  MetaWord* allocate_work(size_t word_size);
  18.354  
  18.355    // Returns a block to the per manager freelist
  18.356 -  void deallocate(MetaWord* p);
  18.357 +  void deallocate(MetaWord* p, size_t word_size);
  18.358  
  18.359    // Based on the allocation size and a minimum chunk size,
  18.360    // returned chunk size (for expanding space for chunk allocation).
  18.361 @@ -763,7 +588,7 @@
  18.362    // Called when an allocation from the current chunk fails.
  18.363    // Gets a new chunk (may require getting a new virtual space),
  18.364    // and allocates from that chunk.
  18.365 -  Metablock* grow_and_allocate(size_t word_size);
  18.366 +  MetaWord* grow_and_allocate(size_t word_size);
  18.367  
  18.368    // debugging support.
  18.369  
  18.370 @@ -780,6 +605,8 @@
  18.371  
  18.372  uint const SpaceManager::_small_chunk_limit = 4;
  18.373  
  18.374 +
  18.375 +
  18.376  const char* SpaceManager::_expand_lock_name =
  18.377    "SpaceManager chunk allocation lock";
  18.378  const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
  18.379 @@ -788,39 +615,26 @@
  18.380              SpaceManager::_expand_lock_name,
  18.381              Mutex::_allow_vm_block_flag);
  18.382  
  18.383 -#ifdef ASSERT
  18.384 -size_t Metablock::_overhead =
  18.385 -  Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
  18.386 -#else
  18.387 -size_t Metablock::_overhead = 0;
  18.388 -#endif
  18.389  size_t Metachunk::_overhead =
  18.390    Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
  18.391  
  18.392  // New blocks returned by the Metaspace are zero initialized.
  18.393  // We should fix the constructors to not assume this instead.
  18.394  Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
  18.395 +  if (p == NULL) {
  18.396 +    return NULL;
  18.397 +  }
  18.398 +
  18.399    Metablock* result = (Metablock*) p;
  18.400  
  18.401    // Clear the memory
  18.402    Copy::fill_to_aligned_words((HeapWord*)result, word_size);
  18.403  #ifdef ASSERT
  18.404    result->set_word_size(word_size);
  18.405 -  // Check after work size is set.
  18.406 -  result->set_leader((void*) metaspace_allocation_leader);
  18.407 -  result->set_trailer((void*) metaspace_allocation_trailer);
  18.408  #endif
  18.409    return result;
  18.410  }
  18.411  
  18.412 -void Metablock::verify() {
  18.413 -#ifdef ASSERT
  18.414 -  assert(leader() == metaspace_allocation_leader &&
  18.415 -         trailer() == metaspace_allocation_trailer,
  18.416 -         "block has been corrupted");
  18.417 -#endif
  18.418 -}
  18.419 -
  18.420  // Metachunk methods
  18.421  
  18.422  Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
  18.423 @@ -843,18 +657,13 @@
  18.424  }
  18.425  
  18.426  
  18.427 -Metablock* Metachunk::allocate(size_t word_size) {
  18.428 -  Metablock* result = NULL;
  18.429 +MetaWord* Metachunk::allocate(size_t word_size) {
  18.430 +  MetaWord* result = NULL;
  18.431    // If available, bump the pointer to allocate.
  18.432    if (free_word_size() >= word_size) {
  18.433 -    result = Metablock::initialize(_top, word_size);
  18.434 +    result = _top;
  18.435      _top = _top + word_size;
  18.436    }
  18.437 -#ifdef ASSERT
  18.438 -  assert(result == NULL ||
  18.439 -         result->word_size() == word_size,
  18.440 -         "Block size is not set correctly");
  18.441 -#endif
  18.442    return result;
  18.443  }
  18.444  
  18.445 @@ -878,103 +687,85 @@
  18.446                 bottom(), top(), end(), word_size());
  18.447  }
  18.448  
  18.449 +#ifdef ASSERT
  18.450 +void Metachunk::mangle() {
  18.451 +  // Mangle the payload of the chunk and not the links that
  18.452 +  // maintain list of chunks.
  18.453 +  HeapWord* start = (HeapWord*)(bottom() + overhead());
  18.454 +  size_t word_size = capacity_word_size() - overhead();
  18.455 +  Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
  18.456 +}
  18.457 +#endif // ASSERT
  18.458  
  18.459  void Metachunk::verify() {
  18.460  #ifdef ASSERT
  18.461    // Cannot walk through the blocks unless the blocks have
  18.462    // headers with sizes.
  18.463 -  MetaWord* curr = bottom() + overhead();
  18.464 -  while (curr < top()) {
  18.465 -    Metablock* block = (Metablock*) curr;
  18.466 -    size_t word_size = block->word_size();
  18.467 -    block->verify();
  18.468 -    curr = curr + word_size;
  18.469 -  }
  18.470 +  assert(_bottom <= _top &&
  18.471 +         _top <= _end,
  18.472 +         "Chunk has been smashed");
  18.473 +  assert(SpaceManager::is_humongous(_word_size) ||
  18.474 +         _word_size == SpaceManager::MediumChunk ||
  18.475 +         _word_size == SpaceManager::SmallChunk,
  18.476 +         "Chunk size is wrong");
  18.477  #endif
  18.478    return;
  18.479  }
  18.480  
  18.481  // BlockFreelist methods
  18.482  
  18.483 -#ifdef DEALLOCATE_BLOCKS
  18.484  BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
  18.485 -#else
  18.486 -BlockFreelist::BlockFreelist() {}
  18.487 -#endif
  18.488  
  18.489  BlockFreelist::~BlockFreelist() {
  18.490 -#ifdef DEALLOCATE_BLOCKS
  18.491    if (_dictionary != NULL) {
  18.492      if (Verbose && TraceMetadataChunkAllocation) {
  18.493        _dictionary->print_free_lists(gclog_or_tty);
  18.494      }
  18.495      delete _dictionary;
  18.496    }
  18.497 -#endif
  18.498  }
  18.499  
  18.500 -Metablock* BlockFreelist::initialize_free_chunk(Metablock* block, size_t word_size) {
  18.501 -#ifdef DEALLOCATE_BLOCKS
  18.502 -#ifdef ASSERT
  18.503 -  assert(word_size = block->word_size(), "Wrong chunk size");
  18.504 -#endif
  18.505 -  Metablock* result = block;
  18.506 -  result->setSize(word_size);
  18.507 -  result->linkPrev(NULL);
  18.508 -  result->linkNext(NULL);
  18.509 -
  18.510 -  return result;
  18.511 -#else
  18.512 -  ShouldNotReachHere();
  18.513 +Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
  18.514 +  Metablock* block = (Metablock*) p;
  18.515 +  block->set_word_size(word_size);
  18.516 +  block->set_prev(NULL);
  18.517 +  block->set_next(NULL);
  18.518 +
  18.519    return block;
  18.520 -#endif
  18.521  }
  18.522  
  18.523 -void BlockFreelist::return_block(Metablock* block, size_t word_size) {
  18.524 -#ifdef ASSERT
  18.525 -  assert(word_size = block->word_size(), "Block size is wrong");;
  18.526 -#endif
  18.527 -  Metablock* free_chunk = initialize_free_chunk(block, word_size);
  18.528 -#ifdef DEALLOCATE_BLOCKS
  18.529 +void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
  18.530 +  Metablock* free_chunk = initialize_free_chunk(p, word_size);
  18.531    if (dictionary() == NULL) {
  18.532 -   _dictionary = new BinaryTreeDictionary<Metablock>(false /* adaptive_freelists */);
  18.533 +   _dictionary = new BlockTreeDictionary();
  18.534    }
  18.535 -  dictionary()->returnChunk(free_chunk);
  18.536 -#endif
  18.537 +  dictionary()->return_chunk(free_chunk);
  18.538  }
  18.539  
  18.540 -Metablock* BlockFreelist::get_block(size_t word_size) {
  18.541 -#ifdef DEALLOCATE_BLOCKS
  18.542 +MetaWord* BlockFreelist::get_block(size_t word_size) {
  18.543    if (dictionary() == NULL) {
  18.544      return NULL;
  18.545    }
  18.546  
  18.547 -  Metablock* free_chunk =
  18.548 -    dictionary()->getChunk(word_size, FreeBlockDictionary<Metablock>::exactly);
  18.549 -#else
  18.550 -  Metablock* free_chunk = NULL;
  18.551 -#endif
  18.552 -  if (free_chunk == NULL) {
  18.553 +  if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
  18.554 +    // Dark matter.  Too small for dictionary.
  18.555      return NULL;
  18.556    }
  18.557 -  assert(free_chunk->word_size() == word_size, "Size of chunk is incorrect");
  18.558 -  Metablock* block = Metablock::initialize((MetaWord*) free_chunk, word_size);
  18.559 -#ifdef ASSERT
  18.560 -  assert(block->word_size() == word_size, "Block size is not set correctly");
  18.561 -#endif
  18.562 -
  18.563 -  return block;
  18.564 +
  18.565 +  Metablock* free_block =
  18.566 +    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
  18.567 +  if (free_block == NULL) {
  18.568 +    return NULL;
  18.569 +  }
  18.570 +
  18.571 +  return (MetaWord*) free_block;
  18.572  }
  18.573  
  18.574  void BlockFreelist::print_on(outputStream* st) const {
  18.575 -#ifdef DEALLOCATE_BLOCKS
  18.576    if (dictionary() == NULL) {
  18.577      return;
  18.578    }
  18.579    dictionary()->print_free_lists(st);
  18.580 -#else
  18.581 -  return;
  18.582 -#endif
  18.583  }
  18.584  
  18.585  // VirtualSpaceNode methods
  18.586 @@ -1597,14 +1388,11 @@
  18.587          Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
  18.588      Metadebug::set_deallocate_block_a_lot_count(0);
  18.589      for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
  18.590 -      Metablock* dummy_block = sm->allocate_work(raw_word_size);
  18.591 +      MetaWord* dummy_block = sm->allocate_work(raw_word_size);
  18.592        if (dummy_block == 0) {
  18.593          break;
  18.594        }
  18.595 -#ifdef ASSERT
  18.596 -      assert(dummy_block->word_size() == raw_word_size, "Block size is not set correctly");
  18.597 -#endif
  18.598 -      sm->deallocate(dummy_block->data());
  18.599 +      sm->deallocate(dummy_block, raw_word_size);
  18.600      }
  18.601    } else {
  18.602      Metadebug::inc_deallocate_block_a_lot_count();
  18.603 @@ -1784,8 +1572,8 @@
  18.604  }
  18.605  
  18.606  void ChunkManager::locked_verify() {
  18.607 +  locked_verify_free_chunks_count();
  18.608    locked_verify_free_chunks_total();
  18.609 -  locked_verify_free_chunks_count();
  18.610  }
  18.611  
  18.612  void ChunkManager::locked_print_free_chunks(outputStream* st) {
  18.613 @@ -1803,7 +1591,6 @@
  18.614    return &_free_chunks[index];
  18.615  }
  18.616  
  18.617 -
  18.618  // These methods that sum the free chunk lists are used in printing
  18.619  // methods that are used in product builds.
  18.620  size_t ChunkManager::sum_free_chunks() {
  18.621 @@ -1818,6 +1605,7 @@
  18.622  
  18.623      result = result + list->sum_list_capacity();
  18.624    }
  18.625 +  result = result + humongous_dictionary()->total_size();
  18.626    return result;
  18.627  }
  18.628  
  18.629 @@ -1831,6 +1619,7 @@
  18.630      }
  18.631      count = count + list->sum_list_count();
  18.632    }
  18.633 +  count = count + humongous_dictionary()->total_free_blocks();
  18.634    return count;
  18.635  }
  18.636  
  18.637 @@ -1875,23 +1664,24 @@
  18.638    assert_lock_strong(SpaceManager::expand_lock());
  18.639  
  18.640    locked_verify();
  18.641 -  ChunkList* free_list = find_free_chunks_list(word_size);
  18.642 -  assert(free_list != NULL, "Sanity check");
  18.643 -
  18.644 -  Metachunk* chunk = free_list->head();
  18.645 -  debug_only(Metachunk* debug_head = chunk;)
  18.646 -
  18.647 -  if (chunk == NULL) {
  18.648 -    return NULL;
  18.649 -  }
  18.650 -
  18.651 -  Metachunk* prev_chunk = chunk;
  18.652 -  if (chunk->word_size() == word_size) {
  18.653 -    // Chunk is being removed from the chunks free list.
  18.654 -    dec_free_chunks_total(chunk->capacity_word_size());
  18.655 +
  18.656 +  Metachunk* chunk = NULL;
  18.657 +  if (!SpaceManager::is_humongous(word_size)) {
  18.658 +    ChunkList* free_list = find_free_chunks_list(word_size);
  18.659 +    assert(free_list != NULL, "Sanity check");
  18.660 +
  18.661 +    chunk = free_list->head();
  18.662 +    debug_only(Metachunk* debug_head = chunk;)
  18.663 +
  18.664 +    if (chunk == NULL) {
  18.665 +      return NULL;
  18.666 +    }
  18.667 +
  18.668      // Remove the chunk as the head of the list.
  18.669      free_list->set_head(chunk->next());
  18.670      chunk->set_next(NULL);
  18.671 +    // Chunk has been removed from the chunks free list.
  18.672 +    dec_free_chunks_total(chunk->capacity_word_size());
  18.673  
  18.674      if (TraceMetadataChunkAllocation && Verbose) {
  18.675        tty->print_cr("ChunkManager::free_chunks_get: free_list "
  18.676 @@ -1899,79 +1689,24 @@
  18.677                      free_list, chunk, chunk->word_size());
  18.678      }
  18.679    } else {
  18.680 -    assert(SpaceManager::is_humongous(word_size),
  18.681 -      "Should only need to check humongous");
  18.682 -    // This code to find the best fit is just for purposes of
  18.683 -    // investigating the loss due to fragmentation on a humongous
  18.684 -    // chunk.  It will be replace by a binaryTreeDictionary for
  18.685 -    // the humongous chunks.
  18.686 -    uint count = 0;
  18.687 -    Metachunk* best_fit = NULL;
  18.688 -    Metachunk* best_fit_prev = NULL;
  18.689 -    while (chunk != NULL) {
  18.690 -      count++;
  18.691 -      if (chunk->word_size() < word_size) {
  18.692 -        prev_chunk = chunk;
  18.693 -        chunk = chunk->next();
  18.694 -      } else if (chunk->word_size() == word_size) {
  18.695 -        break;
  18.696 -      } else {
  18.697 -        if (best_fit == NULL ||
  18.698 -            best_fit->word_size() > chunk->word_size()) {
  18.699 -          best_fit_prev = prev_chunk;
  18.700 -          best_fit = chunk;
  18.701 -        }
  18.702 -        prev_chunk = chunk;
  18.703 -        chunk = chunk->next();
  18.704 +    chunk = humongous_dictionary()->get_chunk(
  18.705 +      word_size,
  18.706 +      FreeBlockDictionary<Metachunk>::atLeast);
  18.707 +
  18.708 +    if (chunk != NULL) {
  18.709 +      if (TraceMetadataHumongousAllocation) {
  18.710 +        size_t waste = chunk->word_size() - word_size;
  18.711 +        tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
  18.712 +                      " for requested size " SIZE_FORMAT
  18.713 +                      " waste " SIZE_FORMAT,
  18.714 +                      chunk->word_size(), word_size, waste);
  18.715        }
  18.716 +      // Chunk is being removed from the chunks free list.
  18.717 +      dec_free_chunks_total(chunk->capacity_word_size());
  18.718 +#ifdef ASSERT
  18.719 +      chunk->set_is_free(false);
  18.720 +#endif
  18.721      }
  18.722 -      if (chunk == NULL) {
  18.723 -        prev_chunk = best_fit_prev;
  18.724 -        chunk = best_fit;
  18.725 -      }
  18.726 -      if (chunk != NULL) {
  18.727 -        if (TraceMetadataHumongousAllocation) {
  18.728 -          size_t waste = chunk->word_size() - word_size;
  18.729 -          tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
  18.730 -                        " for requested size " SIZE_FORMAT
  18.731 -                        " waste " SIZE_FORMAT
  18.732 -                        " found at " SIZE_FORMAT " of " SIZE_FORMAT,
  18.733 -                        chunk->word_size(), word_size, waste,
  18.734 -                        count, free_list->sum_list_count());
  18.735 -        }
  18.736 -        // Chunk is being removed from the chunks free list.
  18.737 -        dec_free_chunks_total(chunk->capacity_word_size());
  18.738 -        // Remove the chunk if it is at the head of the list.
  18.739 -        if (chunk == free_list->head()) {
  18.740 -          free_list->set_head(chunk->next());
  18.741 -
  18.742 -          if (TraceMetadataHumongousAllocation) {
  18.743 -            tty->print_cr("ChunkManager::free_chunks_get: humongous free_list "
  18.744 -                          PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT
  18.745 -                          " new head " PTR_FORMAT,
  18.746 -                          free_list, chunk, chunk->word_size(),
  18.747 -                          free_list->head());
  18.748 -          }
  18.749 -        } else {
  18.750 -          // Remove a chunk in the interior of the list
  18.751 -          prev_chunk->set_next(chunk->next());
  18.752 -
  18.753 -          if (TraceMetadataHumongousAllocation) {
  18.754 -            tty->print_cr("ChunkManager::free_chunks_get: humongous free_list "
  18.755 -                          PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT
  18.756 -                          PTR_FORMAT "  prev " PTR_FORMAT " next " PTR_FORMAT,
  18.757 -                          free_list, chunk, chunk->word_size(),
  18.758 -                          prev_chunk, chunk->next());
  18.759 -          }
  18.760 -        }
  18.761 -        chunk->set_next(NULL);
  18.762 -      } else {
  18.763 -        if (TraceMetadataHumongousAllocation) {
  18.764 -          tty->print_cr("ChunkManager::free_chunks_get: New humongous chunk of size "
  18.765 -                        SIZE_FORMAT,
  18.766 -                        word_size);
  18.767 -        }
  18.768 -      }
  18.769    }
  18.770    locked_verify();
  18.771    return chunk;
  18.772 @@ -2000,12 +1735,18 @@
  18.773    return chunk;
  18.774  }
  18.775  
  18.776 +void ChunkManager::print_on(outputStream* out) {
  18.777 +  if (PrintFLSStatistics != 0) {
  18.778 +    humongous_dictionary()->report_statistics();
  18.779 +  }
  18.780 +}
  18.781 +
  18.782  // SpaceManager methods
  18.783  
  18.784  size_t SpaceManager::sum_free_in_chunks_in_use() const {
  18.785    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  18.786    size_t free = 0;
  18.787 -  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  18.788 +  for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  18.789      Metachunk* chunk = chunks_in_use(i);
  18.790      while (chunk != NULL) {
  18.791        free += chunk->free_word_size();
  18.792 @@ -2018,11 +1759,12 @@
  18.793  size_t SpaceManager::sum_waste_in_chunks_in_use() const {
  18.794    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  18.795    size_t result = 0;
  18.796 -  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  18.797 -   // Count the free space in all the chunk but not the
  18.798 -   // current chunk from which allocations are still being done.
  18.799 +  for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  18.800 +
  18.801 +
  18.802     result += sum_waste_in_chunks_in_use(i);
  18.803    }
  18.804 +
  18.805    return result;
  18.806  }
  18.807  
  18.808 @@ -2033,10 +1775,10 @@
  18.809    // Count the free space in all the chunk but not the
  18.810    // current chunk from which allocations are still being done.
  18.811    if (chunk != NULL) {
  18.812 -    while (chunk != NULL) {
  18.813 -      if (chunk != current_chunk()) {
  18.814 -        result += chunk->free_word_size();
  18.815 -      }
  18.816 +    Metachunk* prev = chunk;
  18.817 +    while (chunk != NULL && chunk != current_chunk()) {
  18.818 +      result += chunk->free_word_size();
  18.819 +      prev = chunk;
  18.820        chunk = chunk->next();
  18.821        count++;
  18.822      }
  18.823 @@ -2047,7 +1789,7 @@
  18.824  size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
  18.825    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  18.826    size_t sum = 0;
  18.827 -  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  18.828 +  for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  18.829      Metachunk* chunk = chunks_in_use(i);
  18.830      while (chunk != NULL) {
  18.831        // Just changed this sum += chunk->capacity_word_size();
  18.832 @@ -2061,9 +1803,10 @@
  18.833  
  18.834  size_t SpaceManager::sum_count_in_chunks_in_use() {
  18.835    size_t count = 0;
  18.836 -  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  18.837 +  for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  18.838      count = count + sum_count_in_chunks_in_use(i);
  18.839    }
  18.840 +
  18.841    return count;
  18.842  }
  18.843  
  18.844 @@ -2081,7 +1824,7 @@
  18.845  size_t SpaceManager::sum_used_in_chunks_in_use() const {
  18.846    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  18.847    size_t used = 0;
  18.848 -  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  18.849 +  for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  18.850      Metachunk* chunk = chunks_in_use(i);
  18.851      while (chunk != NULL) {
  18.852        used += chunk->used_word_size();
  18.853 @@ -2139,15 +1882,13 @@
  18.854      gclog_or_tty->print_cr("  word_size " PTR_FORMAT, word_size);
  18.855      gclog_or_tty->print_cr("  chunk_word_size " PTR_FORMAT,
  18.856                             chunk_word_size);
  18.857 -    gclog_or_tty->print_cr("    block overhead " PTR_FORMAT
  18.858 -                           " chunk overhead " PTR_FORMAT,
  18.859 -                           Metablock::overhead(),
  18.860 +    gclog_or_tty->print_cr("    chunk overhead " PTR_FORMAT,
  18.861                             Metachunk::overhead());
  18.862    }
  18.863    return chunk_word_size;
  18.864  }
  18.865  
  18.866 -Metablock* SpaceManager::grow_and_allocate(size_t word_size) {
  18.867 +MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
  18.868    assert(vs_list()->current_virtual_space() != NULL,
  18.869           "Should have been set");
  18.870    assert(current_chunk() == NULL ||
  18.871 @@ -2180,7 +1921,7 @@
  18.872  void SpaceManager::print_on(outputStream* st) const {
  18.873  
  18.874    for (ChunkIndex i = SmallIndex;
  18.875 -       i < NumberOfFreeLists ;
  18.876 +       i < NumberOfInUseLists ;
  18.877         i = next_chunk_index(i) ) {
  18.878      st->print_cr("  chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
  18.879                   chunks_in_use(i),
  18.880 @@ -2191,8 +1932,11 @@
  18.881                 sum_waste_in_chunks_in_use(SmallIndex),
  18.882                 sum_waste_in_chunks_in_use(MediumIndex),
  18.883                 sum_waste_in_chunks_in_use(HumongousIndex));
  18.884 -  // Nothing in them yet
  18.885 -  // block_freelists()->print_on(st);
  18.886 +  // block free lists
  18.887 +  if (block_freelists() != NULL) {
  18.888 +    st->print_cr("total in block free lists " SIZE_FORMAT,
  18.889 +      block_freelists()->total_size());
  18.890 +  }
  18.891  }
  18.892  
  18.893  SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) :
  18.894 @@ -2200,7 +1944,7 @@
  18.895    _allocation_total(0),
  18.896    _lock(lock) {
  18.897    Metadebug::init_allocation_fail_alot_count();
  18.898 -  for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
  18.899 +  for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
  18.900      _chunks_in_use[i] = NULL;
  18.901    }
  18.902    _current_chunk = NULL;
  18.903 @@ -2262,22 +2006,24 @@
  18.904    // Humongous chunks are never the current chunk.
  18.905    Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
  18.906  
  18.907 -  if (humongous_chunks != NULL) {
  18.908 -    chunk_manager->free_humongous_chunks()->add_at_head(humongous_chunks);
  18.909 -    set_chunks_in_use(HumongousIndex, NULL);
  18.910 +  while (humongous_chunks != NULL) {
  18.911 +#ifdef ASSERT
  18.912 +    humongous_chunks->set_is_free(true);
  18.913 +#endif
  18.914 +    Metachunk* next_humongous_chunks = humongous_chunks->next();
  18.915 +    chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
  18.916 +    humongous_chunks = next_humongous_chunks;
  18.917    }
  18.918 +  set_chunks_in_use(HumongousIndex, NULL);
  18.919    chunk_manager->locked_verify();
  18.920  }
  18.921  
  18.922 -void SpaceManager::deallocate(MetaWord* p) {
  18.923 +void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
  18.924    assert_lock_strong(_lock);
  18.925 -  ShouldNotReachHere();  // Where is this needed.
  18.926 -#ifdef DEALLOCATE_BLOCKS
  18.927 -  Metablock* block = Metablock::metablock_from_data(p);
  18.928 -  // This is expense but kept it until integration JJJ
  18.929 -  assert(contains((address)block), "Block does not belong to this metaspace");
  18.930 -  block_freelists()->return_block(block, word_size);
  18.931 -#endif
  18.932 +  size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
  18.933 +  assert(word_size >= min_size,
  18.934 +    err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
  18.935 +  block_freelists()->return_block(p, word_size);
  18.936  }
  18.937  
  18.938  // Adds a chunk to the list of chunks in use.
  18.939 @@ -2366,50 +2112,40 @@
  18.940  MetaWord* SpaceManager::allocate(size_t word_size) {
  18.941    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
  18.942  
  18.943 -  size_t block_overhead = Metablock::overhead();
  18.944    // If only the dictionary is going to be used (i.e., no
  18.945    // indexed free list), then there is a minimum size requirement.
  18.946    // MinChunkSize is a placeholder for the real minimum size JJJ
  18.947 -  size_t byte_size_with_overhead = (word_size + block_overhead) * BytesPerWord;
  18.948 -#ifdef DEALLOCATE_BLOCKS
  18.949 -  size_t raw_bytes_size = MAX2(ARENA_ALIGN(byte_size_with_overhead),
  18.950 -                               MinChunkSize * BytesPerWord);
  18.951 -#else
  18.952 -  size_t raw_bytes_size = ARENA_ALIGN(byte_size_with_overhead);
  18.953 -#endif
  18.954 +  size_t byte_size = word_size * BytesPerWord;
  18.955 +
  18.956 +  size_t byte_size_with_overhead = byte_size + Metablock::overhead();
  18.957 +
  18.958 +  size_t raw_bytes_size = MAX2(byte_size_with_overhead,
  18.959 +                               Metablock::min_block_byte_size());
  18.960 +  raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
  18.961    size_t raw_word_size = raw_bytes_size / BytesPerWord;
  18.962    assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
  18.963  
  18.964    BlockFreelist* fl =  block_freelists();
  18.965 -  Metablock* block = NULL;
  18.966 +  MetaWord* p = NULL;
  18.967    // Allocation from the dictionary is expensive in the sense that
  18.968    // the dictionary has to be searched for a size.  Don't allocate
  18.969    // from the dictionary until it starts to get fat.  Is this
  18.970    // a reasonable policy?  Maybe an skinny dictionary is fast enough
  18.971    // for allocations.  Do some profiling.  JJJ
  18.972 -  if (fl->totalSize() > allocation_from_dictionary_limit) {
  18.973 -    block = fl->get_block(raw_word_size);
  18.974 +  if (fl->total_size() > allocation_from_dictionary_limit) {
  18.975 +    p = fl->get_block(raw_word_size);
  18.976    }
  18.977 -  if (block == NULL) {
  18.978 -    block = allocate_work(raw_word_size);
  18.979 -    if (block == NULL) {
  18.980 -      return NULL;
  18.981 -    }
  18.982 +  if (p == NULL) {
  18.983 +    p = allocate_work(raw_word_size);
  18.984    }
  18.985    Metadebug::deallocate_block_a_lot(this, raw_word_size);
  18.986  
  18.987 -  // Push the allocation past the word containing the size and leader.
  18.988 -#ifdef ASSERT
  18.989 -  MetaWord* result =  block->data();
  18.990 -  return result;
  18.991 -#else
  18.992 -  return (MetaWord*) block;
  18.993 -#endif
  18.994 +  return p;
  18.995  }
  18.996  
  18.997  // Returns the address of spaced allocated for "word_size".
  18.998  // This methods does not know about blocks (Metablocks)
  18.999 -Metablock* SpaceManager::allocate_work(size_t word_size) {
 18.1000 +MetaWord* SpaceManager::allocate_work(size_t word_size) {
 18.1001    assert_lock_strong(_lock);
 18.1002  #ifdef ASSERT
 18.1003    if (Metadebug::test_metadata_failure()) {
 18.1004 @@ -2417,7 +2153,7 @@
 18.1005    }
 18.1006  #endif
 18.1007    // Is there space in the current chunk?
 18.1008 -  Metablock* result = NULL;
 18.1009 +  MetaWord* result = NULL;
 18.1010  
 18.1011    // For DumpSharedSpaces, only allocate out of the current chunk which is
 18.1012    // never null because we gave it the size we wanted.   Caller reports out
 18.1013 @@ -2436,8 +2172,8 @@
 18.1014    }
 18.1015    if (result > 0) {
 18.1016      inc_allocation_total(word_size);
 18.1017 -    assert(result != (Metablock*) chunks_in_use(MediumIndex), "Head of the list is being allocated");
 18.1018 -    assert(result->word_size() == word_size, "Size not set correctly");
 18.1019 +    assert(result != (MetaWord*) chunks_in_use(MediumIndex),
 18.1020 +           "Head of the list is being allocated");
 18.1021    }
 18.1022  
 18.1023    return result;
 18.1024 @@ -2447,13 +2183,13 @@
 18.1025    // If there are blocks in the dictionary, then
 18.1026    // verfication of chunks does not work since
 18.1027    // being in the dictionary alters a chunk.
 18.1028 -  if (block_freelists()->totalSize() == 0) {
 18.1029 +  if (block_freelists()->total_size() == 0) {
 18.1030      // Skip the small chunks because their next link points to
 18.1031      // medium chunks.  This is because the small chunk is the
 18.1032      // current chunk (for allocations) until it is full and the
 18.1033      // the addition of the next chunk does not NULL the next
 18.1034      // like of the small chunk.
 18.1035 -    for (ChunkIndex i = MediumIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
 18.1036 +    for (ChunkIndex i = MediumIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
 18.1037        Metachunk* curr = chunks_in_use(i);
 18.1038        while (curr != NULL) {
 18.1039          curr->verify();
 18.1040 @@ -2492,7 +2228,7 @@
 18.1041  
 18.1042    // Add up statistics for all chunks in this SpaceManager.
 18.1043    for (ChunkIndex index = SmallIndex;
 18.1044 -       index < NumberOfFreeLists;
 18.1045 +       index < NumberOfInUseLists;
 18.1046         index = next_chunk_index(index)) {
 18.1047      for (Metachunk* curr = chunks_in_use(index);
 18.1048           curr != NULL;
 18.1049 @@ -2521,7 +2257,7 @@
 18.1050  #ifdef ASSERT
 18.1051  void SpaceManager::mangle_freed_chunks() {
 18.1052    for (ChunkIndex index = SmallIndex;
 18.1053 -       index < NumberOfFreeLists;
 18.1054 +       index < NumberOfInUseLists;
 18.1055         index = next_chunk_index(index)) {
 18.1056      for (Metachunk* curr = chunks_in_use(index);
 18.1057           curr != NULL;
 18.1058 @@ -2833,13 +2569,12 @@
 18.1059    }
 18.1060  }
 18.1061  
 18.1062 -
 18.1063  MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
 18.1064    // DumpSharedSpaces doesn't use class metadata area (yet)
 18.1065    if (mdtype == ClassType && !DumpSharedSpaces) {
 18.1066 -    return class_vsm()->allocate(word_size);
 18.1067 +    return  class_vsm()->allocate(word_size);
 18.1068    } else {
 18.1069 -    return vsm()->allocate(word_size);
 18.1070 +    return  vsm()->allocate(word_size);
 18.1071    }
 18.1072  }
 18.1073  
 18.1074 @@ -2853,6 +2588,7 @@
 18.1075      gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
 18.1076        " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
 18.1077    }
 18.1078 +
 18.1079    result = allocate(word_size, mdtype);
 18.1080  
 18.1081    return result;
 18.1082 @@ -2889,37 +2625,39 @@
 18.1083  void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
 18.1084    if (SafepointSynchronize::is_at_safepoint()) {
 18.1085      assert(Thread::current()->is_VM_thread(), "should be the VM thread");
 18.1086 -    // Don't take lock
 18.1087 -#ifdef DEALLOCATE_BLOCKS
 18.1088 +    // Don't take Heap_lock
 18.1089 +    MutexLocker ml(vsm()->lock());
 18.1090 +    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
 18.1091 +      // Dark matter.  Too small for dictionary.
 18.1092 +#ifdef ASSERT
 18.1093 +      Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
 18.1094 +#endif
 18.1095 +      return;
 18.1096 +    }
 18.1097      if (is_class) {
 18.1098 -      class_vsm()->deallocate(ptr);
 18.1099 +       class_vsm()->deallocate(ptr, word_size);
 18.1100      } else {
 18.1101 -      vsm()->deallocate(ptr);
 18.1102 +      vsm()->deallocate(ptr, word_size);
 18.1103      }
 18.1104 -#else
 18.1105 -#ifdef ASSERT
 18.1106 -    Copy::fill_to_words((HeapWord*)ptr, word_size, metadata_deallocate);
 18.1107 -#endif
 18.1108 -#endif
 18.1109 -
 18.1110    } else {
 18.1111      MutexLocker ml(vsm()->lock());
 18.1112  
 18.1113 -#ifdef DEALLOCATE_BLOCKS
 18.1114 +    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
 18.1115 +      // Dark matter.  Too small for dictionary.
 18.1116 +#ifdef ASSERT
 18.1117 +      Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
 18.1118 +#endif
 18.1119 +      return;
 18.1120 +    }
 18.1121      if (is_class) {
 18.1122 -      class_vsm()->deallocate(ptr);
 18.1123 +      class_vsm()->deallocate(ptr, word_size);
 18.1124      } else {
 18.1125 -      vsm()->deallocate(ptr);
 18.1126 +      vsm()->deallocate(ptr, word_size);
 18.1127      }
 18.1128 -#else
 18.1129 -#ifdef ASSERT
 18.1130 -    Copy::fill_to_words((HeapWord*)ptr, word_size, metadata_deallocate);
 18.1131 -#endif
 18.1132 -#endif
 18.1133    }
 18.1134  }
 18.1135  
 18.1136 -MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 18.1137 +Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 18.1138                                bool read_only, MetadataType mdtype, TRAPS) {
 18.1139    if (HAS_PENDING_EXCEPTION) {
 18.1140      assert(false, "Should not allocate with exception pending");
 18.1141 @@ -2943,7 +2681,7 @@
 18.1142      if (result == NULL) {
 18.1143        report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
 18.1144      }
 18.1145 -    return result;
 18.1146 +    return Metablock::initialize(result, word_size);
 18.1147    }
 18.1148  
 18.1149    result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 18.1150 @@ -2951,7 +2689,7 @@
 18.1151    if (result == NULL) {
 18.1152      // Try to clean out some memory and retry.
 18.1153      result =
 18.1154 -    Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
 18.1155 +      Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
 18.1156          loader_data, word_size, mdtype);
 18.1157  
 18.1158      // If result is still null, we are out of memory.
 18.1159 @@ -2967,7 +2705,7 @@
 18.1160        THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
 18.1161      }
 18.1162    }
 18.1163 -  return result;
 18.1164 +  return Metablock::initialize(result, word_size);
 18.1165  }
 18.1166  
 18.1167  void Metaspace::print_on(outputStream* out) const {
    19.1 --- a/src/share/vm/memory/metaspace.hpp	Fri Oct 19 11:26:17 2012 -0700
    19.2 +++ b/src/share/vm/memory/metaspace.hpp	Tue Sep 18 23:35:42 2012 -0700
    19.3 @@ -57,12 +57,10 @@
    19.4  //
    19.5  
    19.6  class ClassLoaderData;
    19.7 +class Metablock;
    19.8  class MetaWord;
    19.9  class Mutex;
   19.10  class outputStream;
   19.11 -class FreeChunk;
   19.12 -template <class Chunk_t> class FreeList;
   19.13 -template <class Chunk_t> class BinaryTreeDictionary;
   19.14  class SpaceManager;
   19.15  
   19.16  // Metaspaces each have a  SpaceManager and allocations
   19.17 @@ -128,7 +126,7 @@
   19.18    size_t capacity_words(MetadataType mdtype) const;
   19.19    size_t waste_words(MetadataType mdtype) const;
   19.20  
   19.21 -  static MetaWord* allocate(ClassLoaderData* loader_data, size_t size,
   19.22 +  static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
   19.23                              bool read_only, MetadataType mdtype, TRAPS);
   19.24    void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
   19.25  
    20.1 --- a/src/share/vm/runtime/vmStructs.cpp	Fri Oct 19 11:26:17 2012 -0700
    20.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Tue Sep 18 23:35:42 2012 -0700
    20.3 @@ -59,6 +59,7 @@
    20.4  #include "memory/generation.hpp"
    20.5  #include "memory/generationSpec.hpp"
    20.6  #include "memory/heap.hpp"
    20.7 +#include "memory/metablock.hpp"
    20.8  #include "memory/space.hpp"
    20.9  #include "memory/tenuredGeneration.hpp"
   20.10  #include "memory/universe.hpp"
   20.11 @@ -249,6 +250,7 @@
   20.12  typedef Hashtable<Klass*, mtClass>            KlassHashtable;
   20.13  typedef HashtableEntry<Klass*, mtClass>       KlassHashtableEntry;
   20.14  typedef TwoOopHashtable<Symbol*, mtClass>     SymbolTwoOopHashtable;
   20.15 +typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
   20.16  
   20.17  //--------------------------------------------------------------------------------
   20.18  // VM_STRUCTS
   20.19 @@ -1237,7 +1239,15 @@
   20.20    nonstatic_field(AccessFlags,                 _flags,                                       jint)                                   \
   20.21    nonstatic_field(elapsedTimer,                _counter,                                     jlong)                                  \
   20.22    nonstatic_field(elapsedTimer,                _active,                                      bool)                                   \
   20.23 -  nonstatic_field(InvocationCounter,           _counter,                                     unsigned int)
   20.24 +  nonstatic_field(InvocationCounter,           _counter,                                     unsigned int)                           \
   20.25 +  volatile_nonstatic_field(FreeChunk,          _size,                                        size_t)                                 \
   20.26 +  nonstatic_field(FreeChunk,                   _next,                                        FreeChunk*)                             \
   20.27 +  nonstatic_field(FreeChunk,                   _prev,                                        FreeChunk*)                             \
   20.28 +  nonstatic_field(FreeList<FreeChunk>,         _size,                                        size_t)                                 \
   20.29 +  nonstatic_field(FreeList<Metablock>,         _size,                                        size_t)                                 \
   20.30 +  nonstatic_field(FreeList<FreeChunk>,         _count,                                       ssize_t)                                \
   20.31 +  nonstatic_field(FreeList<Metablock>,         _count,                                       ssize_t)                                \
   20.32 +  nonstatic_field(MetablockTreeDictionary,     _total_size,                                  size_t)
   20.33  
   20.34    /* NOTE that we do not use the last_entry() macro here; it is used  */
   20.35    /* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must  */
   20.36 @@ -2080,7 +2090,24 @@
   20.37    declare_toplevel_type(Universe)                                         \
   20.38    declare_toplevel_type(vframeArray)                                      \
   20.39    declare_toplevel_type(vframeArrayElement)                               \
   20.40 -  declare_toplevel_type(Annotations*)
   20.41 +  declare_toplevel_type(Annotations*)                                     \
   20.42 +                                                                          \
   20.43 +  /***************/                                                       \
   20.44 +  /* Miscellaneous types */                                               \
   20.45 +  /***************/                                                       \
   20.46 +                                                                          \
   20.47 +  /* freelist */                                                          \
   20.48 +  declare_toplevel_type(FreeChunk*)                                       \
   20.49 +  declare_toplevel_type(Metablock*)                                       \
   20.50 +  declare_toplevel_type(FreeBlockDictionary<FreeChunk>*)                  \
   20.51 +  declare_toplevel_type(FreeList<FreeChunk>*)                             \
   20.52 +  declare_toplevel_type(FreeList<FreeChunk>)                              \
   20.53 +  declare_toplevel_type(FreeBlockDictionary<Metablock>*)                  \
   20.54 +  declare_toplevel_type(FreeList<Metablock>*)                             \
   20.55 +  declare_toplevel_type(FreeList<Metablock>)                              \
   20.56 +  declare_toplevel_type(MetablockTreeDictionary*)                         \
   20.57 +  declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>)   \
   20.58 +              declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>)
   20.59  
   20.60  
   20.61    /* NOTE that we do not use the last_entry() macro here; it is used  */

mercurial