src/share/vm/memory/space.hpp

Thu, 22 Aug 2013 09:39:54 -0700

author
goetz
date
Thu, 22 Aug 2013 09:39:54 -0700
changeset 6461
bdd155477289
parent 5119
12f651e29f6b
child 6503
a9becfeecd1b
permissions
-rw-r--r--

8023033: PPC64 (part 13): basic changes for AIX
Summary: Added AIX includes alpha-sorted before BSD. Fix compilation issues with xlC in shared code. Basic shared platform dependend adaption (vm_version etc.).
Reviewed-by: kvn, dholmes, stefank

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
    26 #define SHARE_VM_MEMORY_SPACE_HPP
    28 #include "memory/allocation.hpp"
    29 #include "memory/blockOffsetTable.hpp"
    30 #include "memory/cardTableModRefBS.hpp"
    31 #include "memory/iterator.hpp"
    32 #include "memory/memRegion.hpp"
    33 #include "memory/watermark.hpp"
    34 #include "oops/markOop.hpp"
    35 #include "runtime/mutexLocker.hpp"
    36 #include "runtime/prefetch.hpp"
    37 #include "utilities/macros.hpp"
    38 #include "utilities/workgroup.hpp"
    39 #ifdef TARGET_OS_FAMILY_linux
    40 # include "os_linux.inline.hpp"
    41 #endif
    42 #ifdef TARGET_OS_FAMILY_solaris
    43 # include "os_solaris.inline.hpp"
    44 #endif
    45 #ifdef TARGET_OS_FAMILY_windows
    46 # include "os_windows.inline.hpp"
    47 #endif
    48 #ifdef TARGET_OS_FAMILY_aix
    49 # include "os_aix.inline.hpp"
    50 #endif
    51 #ifdef TARGET_OS_FAMILY_bsd
    52 # include "os_bsd.inline.hpp"
    53 #endif
    55 // A space is an abstraction for the "storage units" backing
    56 // up the generation abstraction. It includes specific
    57 // implementations for keeping track of free and used space,
    58 // for iterating over objects and free blocks, etc.
    60 // Here's the Space hierarchy:
    61 //
    62 // - Space               -- an asbtract base class describing a heap area
    63 //   - CompactibleSpace  -- a space supporting compaction
    64 //     - CompactibleFreeListSpace -- (used for CMS generation)
    65 //     - ContiguousSpace -- a compactible space in which all free space
    66 //                          is contiguous
    67 //       - EdenSpace     -- contiguous space used as nursery
    68 //         - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
    69 //       - OffsetTableContigSpace -- contiguous space with a block offset array
    70 //                          that allows "fast" block_start calls
    71 //         - TenuredSpace -- (used for TenuredGeneration)
    73 // Forward decls.
    74 class Space;
    75 class BlockOffsetArray;
    76 class BlockOffsetArrayContigSpace;
    77 class Generation;
    78 class CompactibleSpace;
    79 class BlockOffsetTable;
    80 class GenRemSet;
    81 class CardTableRS;
    82 class DirtyCardToOopClosure;
    84 // An oop closure that is circumscribed by a filtering memory region.
    85 class SpaceMemRegionOopsIterClosure: public ExtendedOopClosure {
    86  private:
    87   ExtendedOopClosure* _cl;
    88   MemRegion   _mr;
    89  protected:
    90   template <class T> void do_oop_work(T* p) {
    91     if (_mr.contains(p)) {
    92       _cl->do_oop(p);
    93     }
    94   }
    95  public:
    96   SpaceMemRegionOopsIterClosure(ExtendedOopClosure* cl, MemRegion mr):
    97     _cl(cl), _mr(mr) {}
    98   virtual void do_oop(oop* p);
    99   virtual void do_oop(narrowOop* p);
   100   virtual bool do_metadata() {
   101     // _cl is of type ExtendedOopClosure instead of OopClosure, so that we can check this.
   102     assert(!_cl->do_metadata(), "I've checked all call paths, this shouldn't happen.");
   103     return false;
   104   }
   105   virtual void do_klass(Klass* k)                         { ShouldNotReachHere(); }
   106   virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
   107 };
   109 // A Space describes a heap area. Class Space is an abstract
   110 // base class.
   111 //
   112 // Space supports allocation, size computation and GC support is provided.
   113 //
   114 // Invariant: bottom() and end() are on page_size boundaries and
   115 // bottom() <= top() <= end()
   116 // top() is inclusive and end() is exclusive.
   118 class Space: public CHeapObj<mtGC> {
   119   friend class VMStructs;
   120  protected:
   121   HeapWord* _bottom;
   122   HeapWord* _end;
   124   // Used in support of save_marks()
   125   HeapWord* _saved_mark_word;
   127   MemRegionClosure* _preconsumptionDirtyCardClosure;
   129   // A sequential tasks done structure. This supports
   130   // parallel GC, where we have threads dynamically
   131   // claiming sub-tasks from a larger parallel task.
   132   SequentialSubTasksDone _par_seq_tasks;
   134   Space():
   135     _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
   137  public:
   138   // Accessors
   139   HeapWord* bottom() const         { return _bottom; }
   140   HeapWord* end() const            { return _end;    }
   141   virtual void set_bottom(HeapWord* value) { _bottom = value; }
   142   virtual void set_end(HeapWord* value)    { _end = value; }
   144   virtual HeapWord* saved_mark_word() const  { return _saved_mark_word; }
   146   void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
   148   MemRegionClosure* preconsumptionDirtyCardClosure() const {
   149     return _preconsumptionDirtyCardClosure;
   150   }
   151   void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
   152     _preconsumptionDirtyCardClosure = cl;
   153   }
   155   // Returns a subregion of the space containing all the objects in
   156   // the space.
   157   virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
   159   // Returns a region that is guaranteed to contain (at least) all objects
   160   // allocated at the time of the last call to "save_marks".  If the space
   161   // initializes its DirtyCardToOopClosure's specifying the "contig" option
   162   // (that is, if the space is contiguous), then this region must contain only
   163   // such objects: the memregion will be from the bottom of the region to the
   164   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
   165   // the space must distiguish between objects in the region allocated before
   166   // and after the call to save marks.
   167   virtual MemRegion used_region_at_save_marks() const {
   168     return MemRegion(bottom(), saved_mark_word());
   169   }
   171   // Initialization.
   172   // "initialize" should be called once on a space, before it is used for
   173   // any purpose.  The "mr" arguments gives the bounds of the space, and
   174   // the "clear_space" argument should be true unless the memory in "mr" is
   175   // known to be zeroed.
   176   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   178   // The "clear" method must be called on a region that may have
   179   // had allocation performed in it, but is now to be considered empty.
   180   virtual void clear(bool mangle_space);
   182   // For detecting GC bugs.  Should only be called at GC boundaries, since
   183   // some unused space may be used as scratch space during GC's.
   184   // Default implementation does nothing. We also call this when expanding
   185   // a space to satisfy an allocation request. See bug #4668531
   186   virtual void mangle_unused_area() {}
   187   virtual void mangle_unused_area_complete() {}
   188   virtual void mangle_region(MemRegion mr) {}
   190   // Testers
   191   bool is_empty() const              { return used() == 0; }
   192   bool not_empty() const             { return used() > 0; }
   194   // Returns true iff the given the space contains the
   195   // given address as part of an allocated object. For
   196   // ceratin kinds of spaces, this might be a potentially
   197   // expensive operation. To prevent performance problems
   198   // on account of its inadvertent use in product jvm's,
   199   // we restrict its use to assertion checks only.
   200   virtual bool is_in(const void* p) const = 0;
   202   // Returns true iff the given reserved memory of the space contains the
   203   // given address.
   204   bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
   206   // Returns true iff the given block is not allocated.
   207   virtual bool is_free_block(const HeapWord* p) const = 0;
   209   // Test whether p is double-aligned
   210   static bool is_aligned(void* p) {
   211     return ((intptr_t)p & (sizeof(double)-1)) == 0;
   212   }
   214   // Size computations.  Sizes are in bytes.
   215   size_t capacity()     const { return byte_size(bottom(), end()); }
   216   virtual size_t used() const = 0;
   217   virtual size_t free() const = 0;
   219   // Iterate over all the ref-containing fields of all objects in the
   220   // space, calling "cl.do_oop" on each.  Fields in objects allocated by
   221   // applications of the closure are not included in the iteration.
   222   virtual void oop_iterate(ExtendedOopClosure* cl);
   224   // Same as above, restricted to the intersection of a memory region and
   225   // the space.  Fields in objects allocated by applications of the closure
   226   // are not included in the iteration.
   227   virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
   229   // Iterate over all objects in the space, calling "cl.do_object" on
   230   // each.  Objects allocated by applications of the closure are not
   231   // included in the iteration.
   232   virtual void object_iterate(ObjectClosure* blk) = 0;
   233   // Similar to object_iterate() except only iterates over
   234   // objects whose internal references point to objects in the space.
   235   virtual void safe_object_iterate(ObjectClosure* blk) = 0;
   237   // Iterate over all objects that intersect with mr, calling "cl->do_object"
   238   // on each.  There is an exception to this: if this closure has already
   239   // been invoked on an object, it may skip such objects in some cases.  This is
   240   // Most likely to happen in an "upwards" (ascending address) iteration of
   241   // MemRegions.
   242   virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
   244   // Iterate over as many initialized objects in the space as possible,
   245   // calling "cl.do_object_careful" on each. Return NULL if all objects
   246   // in the space (at the start of the iteration) were iterated over.
   247   // Return an address indicating the extent of the iteration in the
   248   // event that the iteration had to return because of finding an
   249   // uninitialized object in the space, or if the closure "cl"
   250   // signalled early termination.
   251   virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
   252   virtual HeapWord* object_iterate_careful_m(MemRegion mr,
   253                                              ObjectClosureCareful* cl);
   255   // Create and return a new dirty card to oop closure. Can be
   256   // overriden to return the appropriate type of closure
   257   // depending on the type of space in which the closure will
   258   // operate. ResourceArea allocated.
   259   virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
   260                                              CardTableModRefBS::PrecisionStyle precision,
   261                                              HeapWord* boundary = NULL);
   263   // If "p" is in the space, returns the address of the start of the
   264   // "block" that contains "p".  We say "block" instead of "object" since
   265   // some heaps may not pack objects densely; a chunk may either be an
   266   // object or a non-object.  If "p" is not in the space, return NULL.
   267   virtual HeapWord* block_start_const(const void* p) const = 0;
   269   // The non-const version may have benevolent side effects on the data
   270   // structure supporting these calls, possibly speeding up future calls.
   271   // The default implementation, however, is simply to call the const
   272   // version.
   273   inline virtual HeapWord* block_start(const void* p);
   275   // Requires "addr" to be the start of a chunk, and returns its size.
   276   // "addr + size" is required to be the start of a new chunk, or the end
   277   // of the active area of the heap.
   278   virtual size_t block_size(const HeapWord* addr) const = 0;
   280   // Requires "addr" to be the start of a block, and returns "TRUE" iff
   281   // the block is an object.
   282   virtual bool block_is_obj(const HeapWord* addr) const = 0;
   284   // Requires "addr" to be the start of a block, and returns "TRUE" iff
   285   // the block is an object and the object is alive.
   286   virtual bool obj_is_alive(const HeapWord* addr) const;
   288   // Allocation (return NULL if full).  Assumes the caller has established
   289   // mutually exclusive access to the space.
   290   virtual HeapWord* allocate(size_t word_size) = 0;
   292   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
   293   virtual HeapWord* par_allocate(size_t word_size) = 0;
   295   // Returns true if this object has been allocated since a
   296   // generation's "save_marks" call.
   297   virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
   299   // Mark-sweep-compact support: all spaces can update pointers to objects
   300   // moving as a part of compaction.
   301   virtual void adjust_pointers();
   303   // PrintHeapAtGC support
   304   virtual void print() const;
   305   virtual void print_on(outputStream* st) const;
   306   virtual void print_short() const;
   307   virtual void print_short_on(outputStream* st) const;
   310   // Accessor for parallel sequential tasks.
   311   SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
   313   // IF "this" is a ContiguousSpace, return it, else return NULL.
   314   virtual ContiguousSpace* toContiguousSpace() {
   315     return NULL;
   316   }
   318   // Debugging
   319   virtual void verify() const = 0;
   320 };
   322 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
   323 // OopClosure to (the addresses of) all the ref-containing fields that could
   324 // be modified by virtue of the given MemRegion being dirty. (Note that
   325 // because of the imprecise nature of the write barrier, this may iterate
   326 // over oops beyond the region.)
   327 // This base type for dirty card to oop closures handles memory regions
   328 // in non-contiguous spaces with no boundaries, and should be sub-classed
   329 // to support other space types. See ContiguousDCTOC for a sub-class
   330 // that works with ContiguousSpaces.
   332 class DirtyCardToOopClosure: public MemRegionClosureRO {
   333 protected:
   334   ExtendedOopClosure* _cl;
   335   Space* _sp;
   336   CardTableModRefBS::PrecisionStyle _precision;
   337   HeapWord* _boundary;          // If non-NULL, process only non-NULL oops
   338                                 // pointing below boundary.
   339   HeapWord* _min_done;          // ObjHeadPreciseArray precision requires
   340                                 // a downwards traversal; this is the
   341                                 // lowest location already done (or,
   342                                 // alternatively, the lowest address that
   343                                 // shouldn't be done again.  NULL means infinity.)
   344   NOT_PRODUCT(HeapWord* _last_bottom;)
   345   NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
   347   // Get the actual top of the area on which the closure will
   348   // operate, given where the top is assumed to be (the end of the
   349   // memory region passed to do_MemRegion) and where the object
   350   // at the top is assumed to start. For example, an object may
   351   // start at the top but actually extend past the assumed top,
   352   // in which case the top becomes the end of the object.
   353   virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
   355   // Walk the given memory region from bottom to (actual) top
   356   // looking for objects and applying the oop closure (_cl) to
   357   // them. The base implementation of this treats the area as
   358   // blocks, where a block may or may not be an object. Sub-
   359   // classes should override this to provide more accurate
   360   // or possibly more efficient walking.
   361   virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
   363 public:
   364   DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
   365                         CardTableModRefBS::PrecisionStyle precision,
   366                         HeapWord* boundary) :
   367     _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
   368     _min_done(NULL) {
   369     NOT_PRODUCT(_last_bottom = NULL);
   370     NOT_PRODUCT(_last_explicit_min_done = NULL);
   371   }
   373   void do_MemRegion(MemRegion mr);
   375   void set_min_done(HeapWord* min_done) {
   376     _min_done = min_done;
   377     NOT_PRODUCT(_last_explicit_min_done = _min_done);
   378   }
   379 #ifndef PRODUCT
   380   void set_last_bottom(HeapWord* last_bottom) {
   381     _last_bottom = last_bottom;
   382   }
   383 #endif
   384 };
   386 // A structure to represent a point at which objects are being copied
   387 // during compaction.
   388 class CompactPoint : public StackObj {
   389 public:
   390   Generation* gen;
   391   CompactibleSpace* space;
   392   HeapWord* threshold;
   393   CompactPoint(Generation* _gen, CompactibleSpace* _space,
   394                HeapWord* _threshold) :
   395     gen(_gen), space(_space), threshold(_threshold) {}
   396 };
   399 // A space that supports compaction operations.  This is usually, but not
   400 // necessarily, a space that is normally contiguous.  But, for example, a
   401 // free-list-based space whose normal collection is a mark-sweep without
   402 // compaction could still support compaction in full GC's.
   404 class CompactibleSpace: public Space {
   405   friend class VMStructs;
   406   friend class CompactibleFreeListSpace;
   407 private:
   408   HeapWord* _compaction_top;
   409   CompactibleSpace* _next_compaction_space;
   411 public:
   412   CompactibleSpace() :
   413    _compaction_top(NULL), _next_compaction_space(NULL) {}
   415   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   416   virtual void clear(bool mangle_space);
   418   // Used temporarily during a compaction phase to hold the value
   419   // top should have when compaction is complete.
   420   HeapWord* compaction_top() const { return _compaction_top;    }
   422   void set_compaction_top(HeapWord* value) {
   423     assert(value == NULL || (value >= bottom() && value <= end()),
   424       "should point inside space");
   425     _compaction_top = value;
   426   }
   428   // Perform operations on the space needed after a compaction
   429   // has been performed.
   430   virtual void reset_after_compaction() {}
   432   // Returns the next space (in the current generation) to be compacted in
   433   // the global compaction order.  Also is used to select the next
   434   // space into which to compact.
   436   virtual CompactibleSpace* next_compaction_space() const {
   437     return _next_compaction_space;
   438   }
   440   void set_next_compaction_space(CompactibleSpace* csp) {
   441     _next_compaction_space = csp;
   442   }
   444   // MarkSweep support phase2
   446   // Start the process of compaction of the current space: compute
   447   // post-compaction addresses, and insert forwarding pointers.  The fields
   448   // "cp->gen" and "cp->compaction_space" are the generation and space into
   449   // which we are currently compacting.  This call updates "cp" as necessary,
   450   // and leaves the "compaction_top" of the final value of
   451   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
   452   // this phase as if the final copy had occurred; if so, "cp->threshold"
   453   // indicates when the next such action should be taken.
   454   virtual void prepare_for_compaction(CompactPoint* cp);
   455   // MarkSweep support phase3
   456   virtual void adjust_pointers();
   457   // MarkSweep support phase4
   458   virtual void compact();
   460   // The maximum percentage of objects that can be dead in the compacted
   461   // live part of a compacted space ("deadwood" support.)
   462   virtual size_t allowed_dead_ratio() const { return 0; };
   464   // Some contiguous spaces may maintain some data structures that should
   465   // be updated whenever an allocation crosses a boundary.  This function
   466   // returns the first such boundary.
   467   // (The default implementation returns the end of the space, so the
   468   // boundary is never crossed.)
   469   virtual HeapWord* initialize_threshold() { return end(); }
   471   // "q" is an object of the given "size" that should be forwarded;
   472   // "cp" names the generation ("gen") and containing "this" (which must
   473   // also equal "cp->space").  "compact_top" is where in "this" the
   474   // next object should be forwarded to.  If there is room in "this" for
   475   // the object, insert an appropriate forwarding pointer in "q".
   476   // If not, go to the next compaction space (there must
   477   // be one, since compaction must succeed -- we go to the first space of
   478   // the previous generation if necessary, updating "cp"), reset compact_top
   479   // and then forward.  In either case, returns the new value of "compact_top".
   480   // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
   481   // function of the then-current compaction space, and updates "cp->threshold
   482   // accordingly".
   483   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
   484                     HeapWord* compact_top);
   486   // Return a size with adjusments as required of the space.
   487   virtual size_t adjust_object_size_v(size_t size) const { return size; }
   489 protected:
   490   // Used during compaction.
   491   HeapWord* _first_dead;
   492   HeapWord* _end_of_live;
   494   // Minimum size of a free block.
   495   virtual size_t minimum_free_block_size() const = 0;
   497   // This the function is invoked when an allocation of an object covering
   498   // "start" to "end occurs crosses the threshold; returns the next
   499   // threshold.  (The default implementation does nothing.)
   500   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
   501     return end();
   502   }
   504   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
   505   // free block of the given "word_len", and that "q", were it an object,
   506   // would not move if forwared.  If the size allows, fill the free
   507   // block with an object, to prevent excessive compaction.  Returns "true"
   508   // iff the free region was made deadspace, and modifies
   509   // "allowed_deadspace_words" to reflect the number of available deadspace
   510   // words remaining after this operation.
   511   bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
   512                         size_t word_len);
   513 };
   515 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
   516   /* Compute the new addresses for the live objects and store it in the mark \
   517    * Used by universe::mark_sweep_phase2()                                   \
   518    */                                                                        \
   519   HeapWord* compact_top; /* This is where we are currently compacting to. */ \
   520                                                                              \
   521   /* We're sure to be here before any objects are compacted into this        \
   522    * space, so this is a good time to initialize this:                       \
   523    */                                                                        \
   524   set_compaction_top(bottom());                                              \
   525                                                                              \
   526   if (cp->space == NULL) {                                                   \
   527     assert(cp->gen != NULL, "need a generation");                            \
   528     assert(cp->threshold == NULL, "just checking");                          \
   529     assert(cp->gen->first_compaction_space() == this, "just checking");      \
   530     cp->space = cp->gen->first_compaction_space();                           \
   531     compact_top = cp->space->bottom();                                       \
   532     cp->space->set_compaction_top(compact_top);                              \
   533     cp->threshold = cp->space->initialize_threshold();                       \
   534   } else {                                                                   \
   535     compact_top = cp->space->compaction_top();                               \
   536   }                                                                          \
   537                                                                              \
   538   /* We allow some amount of garbage towards the bottom of the space, so     \
   539    * we don't start compacting before there is a significant gain to be made.\
   540    * Occasionally, we want to ensure a full compaction, which is determined  \
   541    * by the MarkSweepAlwaysCompactCount parameter.                           \
   542    */                                                                        \
   543   uint invocations = MarkSweep::total_invocations();                         \
   544   bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);       \
   545                                                                              \
   546   size_t allowed_deadspace = 0;                                              \
   547   if (skip_dead) {                                                           \
   548     const size_t ratio = allowed_dead_ratio();                               \
   549     allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize;           \
   550   }                                                                          \
   551                                                                              \
   552   HeapWord* q = bottom();                                                    \
   553   HeapWord* t = scan_limit();                                                \
   554                                                                              \
   555   HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last \
   556                                    live object. */                           \
   557   HeapWord*  first_dead = end();/* The first dead object. */                 \
   558   LiveRange* liveRange  = NULL; /* The current live range, recorded in the   \
   559                                    first header of preceding free area. */   \
   560   _first_dead = first_dead;                                                  \
   561                                                                              \
   562   const intx interval = PrefetchScanIntervalInBytes;                         \
   563                                                                              \
   564   while (q < t) {                                                            \
   565     assert(!block_is_obj(q) ||                                               \
   566            oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||   \
   567            oop(q)->mark()->has_bias_pattern(),                               \
   568            "these are the only valid states during a mark sweep");           \
   569     if (block_is_obj(q) && oop(q)->is_gc_marked()) {                         \
   570       /* prefetch beyond q */                                                \
   571       Prefetch::write(q, interval);                                          \
   572       size_t size = block_size(q);                                           \
   573       compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
   574       q += size;                                                             \
   575       end_of_live = q;                                                       \
   576     } else {                                                                 \
   577       /* run over all the contiguous dead objects */                         \
   578       HeapWord* end = q;                                                     \
   579       do {                                                                   \
   580         /* prefetch beyond end */                                            \
   581         Prefetch::write(end, interval);                                      \
   582         end += block_size(end);                                              \
   583       } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
   584                                                                              \
   585       /* see if we might want to pretend this object is alive so that        \
   586        * we don't have to compact quite as often.                            \
   587        */                                                                    \
   588       if (allowed_deadspace > 0 && q == compact_top) {                       \
   589         size_t sz = pointer_delta(end, q);                                   \
   590         if (insert_deadspace(allowed_deadspace, q, sz)) {                    \
   591           compact_top = cp->space->forward(oop(q), sz, cp, compact_top);     \
   592           q = end;                                                           \
   593           end_of_live = end;                                                 \
   594           continue;                                                          \
   595         }                                                                    \
   596       }                                                                      \
   597                                                                              \
   598       /* otherwise, it really is a free region. */                           \
   599                                                                              \
   600       /* for the previous LiveRange, record the end of the live objects. */  \
   601       if (liveRange) {                                                       \
   602         liveRange->set_end(q);                                               \
   603       }                                                                      \
   604                                                                              \
   605       /* record the current LiveRange object.                                \
   606        * liveRange->start() is overlaid on the mark word.                    \
   607        */                                                                    \
   608       liveRange = (LiveRange*)q;                                             \
   609       liveRange->set_start(end);                                             \
   610       liveRange->set_end(end);                                               \
   611                                                                              \
   612       /* see if this is the first dead region. */                            \
   613       if (q < first_dead) {                                                  \
   614         first_dead = q;                                                      \
   615       }                                                                      \
   616                                                                              \
   617       /* move on to the next object */                                       \
   618       q = end;                                                               \
   619     }                                                                        \
   620   }                                                                          \
   621                                                                              \
   622   assert(q == t, "just checking");                                           \
   623   if (liveRange != NULL) {                                                   \
   624     liveRange->set_end(q);                                                   \
   625   }                                                                          \
   626   _end_of_live = end_of_live;                                                \
   627   if (end_of_live < first_dead) {                                            \
   628     first_dead = end_of_live;                                                \
   629   }                                                                          \
   630   _first_dead = first_dead;                                                  \
   631                                                                              \
   632   /* save the compaction_top of the compaction space. */                     \
   633   cp->space->set_compaction_top(compact_top);                                \
   634 }
   636 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
   637   /* adjust all the interior pointers to point at the new locations of objects  \
   638    * Used by MarkSweep::mark_sweep_phase3() */                                  \
   639                                                                                 \
   640   HeapWord* q = bottom();                                                       \
   641   HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
   642                                                                                 \
   643   assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
   644                                                                                 \
   645   if (q < t && _first_dead > q &&                                               \
   646       !oop(q)->is_gc_marked()) {                                                \
   647     /* we have a chunk of the space which hasn't moved and we've                \
   648      * reinitialized the mark word during the previous pass, so we can't        \
   649      * use is_gc_marked for the traversal. */                                   \
   650     HeapWord* end = _first_dead;                                                \
   651                                                                                 \
   652     while (q < end) {                                                           \
   653       /* I originally tried to conjoin "block_start(q) == q" to the             \
   654        * assertion below, but that doesn't work, because you can't              \
   655        * accurately traverse previous objects to get to the current one         \
   656        * after their pointers have been                                         \
   657        * updated, until the actual compaction is done.  dld, 4/00 */            \
   658       assert(block_is_obj(q),                                                   \
   659              "should be at block boundaries, and should be looking at objs");   \
   660                                                                                 \
   661       /* point all the oops to the new location */                              \
   662       size_t size = oop(q)->adjust_pointers();                                  \
   663       size = adjust_obj_size(size);                                             \
   664                                                                                 \
   665       q += size;                                                                \
   666     }                                                                           \
   667                                                                                 \
   668     if (_first_dead == t) {                                                     \
   669       q = t;                                                                    \
   670     } else {                                                                    \
   671       /* $$$ This is funky.  Using this to read the previously written          \
   672        * LiveRange.  See also use below. */                                     \
   673       q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
   674     }                                                                           \
   675   }                                                                             \
   676                                                                                 \
   677   const intx interval = PrefetchScanIntervalInBytes;                            \
   678                                                                                 \
   679   debug_only(HeapWord* prev_q = NULL);                                          \
   680   while (q < t) {                                                               \
   681     /* prefetch beyond q */                                                     \
   682     Prefetch::write(q, interval);                                               \
   683     if (oop(q)->is_gc_marked()) {                                               \
   684       /* q is alive */                                                          \
   685       /* point all the oops to the new location */                              \
   686       size_t size = oop(q)->adjust_pointers();                                  \
   687       size = adjust_obj_size(size);                                             \
   688       debug_only(prev_q = q);                                                   \
   689       q += size;                                                                \
   690     } else {                                                                    \
   691       /* q is not a live object, so its mark should point at the next           \
   692        * live object */                                                         \
   693       debug_only(prev_q = q);                                                   \
   694       q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
   695       assert(q > prev_q, "we should be moving forward through memory");         \
   696     }                                                                           \
   697   }                                                                             \
   698                                                                                 \
   699   assert(q == t, "just checking");                                              \
   700 }
   702 #define SCAN_AND_COMPACT(obj_size) {                                            \
   703   /* Copy all live objects to their new location                                \
   704    * Used by MarkSweep::mark_sweep_phase4() */                                  \
   705                                                                                 \
   706   HeapWord*       q = bottom();                                                 \
   707   HeapWord* const t = _end_of_live;                                             \
   708   debug_only(HeapWord* prev_q = NULL);                                          \
   709                                                                                 \
   710   if (q < t && _first_dead > q &&                                               \
   711       !oop(q)->is_gc_marked()) {                                                \
   712     debug_only(                                                                 \
   713     /* we have a chunk of the space which hasn't moved and we've reinitialized  \
   714      * the mark word during the previous pass, so we can't use is_gc_marked for \
   715      * the traversal. */                                                        \
   716     HeapWord* const end = _first_dead;                                          \
   717                                                                                 \
   718     while (q < end) {                                                           \
   719       size_t size = obj_size(q);                                                \
   720       assert(!oop(q)->is_gc_marked(),                                           \
   721              "should be unmarked (special dense prefix handling)");             \
   722       debug_only(prev_q = q);                                                   \
   723       q += size;                                                                \
   724     }                                                                           \
   725     )  /* debug_only */                                                         \
   726                                                                                 \
   727     if (_first_dead == t) {                                                     \
   728       q = t;                                                                    \
   729     } else {                                                                    \
   730       /* $$$ Funky */                                                           \
   731       q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();               \
   732     }                                                                           \
   733   }                                                                             \
   734                                                                                 \
   735   const intx scan_interval = PrefetchScanIntervalInBytes;                       \
   736   const intx copy_interval = PrefetchCopyIntervalInBytes;                       \
   737   while (q < t) {                                                               \
   738     if (!oop(q)->is_gc_marked()) {                                              \
   739       /* mark is pointer to next marked oop */                                  \
   740       debug_only(prev_q = q);                                                   \
   741       q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
   742       assert(q > prev_q, "we should be moving forward through memory");         \
   743     } else {                                                                    \
   744       /* prefetch beyond q */                                                   \
   745       Prefetch::read(q, scan_interval);                                         \
   746                                                                                 \
   747       /* size and destination */                                                \
   748       size_t size = obj_size(q);                                                \
   749       HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();                \
   750                                                                                 \
   751       /* prefetch beyond compaction_top */                                      \
   752       Prefetch::write(compaction_top, copy_interval);                           \
   753                                                                                 \
   754       /* copy object and reinit its mark */                                     \
   755       assert(q != compaction_top, "everything in this pass should be moving");  \
   756       Copy::aligned_conjoint_words(q, compaction_top, size);                    \
   757       oop(compaction_top)->init_mark();                                         \
   758       assert(oop(compaction_top)->klass() != NULL, "should have a class");      \
   759                                                                                 \
   760       debug_only(prev_q = q);                                                   \
   761       q += size;                                                                \
   762     }                                                                           \
   763   }                                                                             \
   764                                                                                 \
   765   /* Let's remember if we were empty before we did the compaction. */           \
   766   bool was_empty = used_region().is_empty();                                    \
   767   /* Reset space after compaction is complete */                                \
   768   reset_after_compaction();                                                     \
   769   /* We do this clear, below, since it has overloaded meanings for some */      \
   770   /* space subtypes.  For example, OffsetTableContigSpace's that were   */      \
   771   /* compacted into will have had their offset table thresholds updated */      \
   772   /* continuously, but those that weren't need to have their thresholds */      \
   773   /* re-initialized.  Also mangles unused area for debugging.           */      \
   774   if (used_region().is_empty()) {                                               \
   775     if (!was_empty) clear(SpaceDecorator::Mangle);                              \
   776   } else {                                                                      \
   777     if (ZapUnusedHeapArea) mangle_unused_area();                                \
   778   }                                                                             \
   779 }
   781 class GenSpaceMangler;
   783 // A space in which the free area is contiguous.  It therefore supports
   784 // faster allocation, and compaction.
   785 class ContiguousSpace: public CompactibleSpace {
   786   friend class OneContigSpaceCardGeneration;
   787   friend class VMStructs;
   788  protected:
   789   HeapWord* _top;
   790   HeapWord* _concurrent_iteration_safe_limit;
   791   // A helper for mangling the unused area of the space in debug builds.
   792   GenSpaceMangler* _mangler;
   794   GenSpaceMangler* mangler() { return _mangler; }
   796   // Allocation helpers (return NULL if full).
   797   inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
   798   inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
   800  public:
   801   ContiguousSpace();
   802   ~ContiguousSpace();
   804   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   805   virtual void clear(bool mangle_space);
   807   // Accessors
   808   HeapWord* top() const            { return _top;    }
   809   void set_top(HeapWord* value)    { _top = value; }
   811   virtual void set_saved_mark()    { _saved_mark_word = top();    }
   812   void reset_saved_mark()          { _saved_mark_word = bottom(); }
   814   WaterMark bottom_mark()     { return WaterMark(this, bottom()); }
   815   WaterMark top_mark()        { return WaterMark(this, top()); }
   816   WaterMark saved_mark()      { return WaterMark(this, saved_mark_word()); }
   817   bool saved_mark_at_top() const { return saved_mark_word() == top(); }
   819   // In debug mode mangle (write it with a particular bit
   820   // pattern) the unused part of a space.
   822   // Used to save the an address in a space for later use during mangling.
   823   void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
   824   // Used to save the space's current top for later use during mangling.
   825   void set_top_for_allocations() PRODUCT_RETURN;
   827   // Mangle regions in the space from the current top up to the
   828   // previously mangled part of the space.
   829   void mangle_unused_area() PRODUCT_RETURN;
   830   // Mangle [top, end)
   831   void mangle_unused_area_complete() PRODUCT_RETURN;
   832   // Mangle the given MemRegion.
   833   void mangle_region(MemRegion mr) PRODUCT_RETURN;
   835   // Do some sparse checking on the area that should have been mangled.
   836   void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
   837   // Check the complete area that should have been mangled.
   838   // This code may be NULL depending on the macro DEBUG_MANGLING.
   839   void check_mangled_unused_area_complete() PRODUCT_RETURN;
   841   // Size computations: sizes in bytes.
   842   size_t capacity() const        { return byte_size(bottom(), end()); }
   843   size_t used() const            { return byte_size(bottom(), top()); }
   844   size_t free() const            { return byte_size(top(),    end()); }
   846   // Override from space.
   847   bool is_in(const void* p) const;
   849   virtual bool is_free_block(const HeapWord* p) const;
   851   // In a contiguous space we have a more obvious bound on what parts
   852   // contain objects.
   853   MemRegion used_region() const { return MemRegion(bottom(), top()); }
   855   MemRegion used_region_at_save_marks() const {
   856     return MemRegion(bottom(), saved_mark_word());
   857   }
   859   // Allocation (return NULL if full)
   860   virtual HeapWord* allocate(size_t word_size);
   861   virtual HeapWord* par_allocate(size_t word_size);
   863   virtual bool obj_allocated_since_save_marks(const oop obj) const {
   864     return (HeapWord*)obj >= saved_mark_word();
   865   }
   867   // Iteration
   868   void oop_iterate(ExtendedOopClosure* cl);
   869   void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
   870   void object_iterate(ObjectClosure* blk);
   871   // For contiguous spaces this method will iterate safely over objects
   872   // in the space (i.e., between bottom and top) when at a safepoint.
   873   void safe_object_iterate(ObjectClosure* blk);
   874   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
   875   // iterates on objects up to the safe limit
   876   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
   877   HeapWord* concurrent_iteration_safe_limit() {
   878     assert(_concurrent_iteration_safe_limit <= top(),
   879            "_concurrent_iteration_safe_limit update missed");
   880     return _concurrent_iteration_safe_limit;
   881   }
   882   // changes the safe limit, all objects from bottom() to the new
   883   // limit should be properly initialized
   884   void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
   885     assert(new_limit <= top(), "uninitialized objects in the safe range");
   886     _concurrent_iteration_safe_limit = new_limit;
   887   }
   890 #if INCLUDE_ALL_GCS
   891   // In support of parallel oop_iterate.
   892   #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
   893     void par_oop_iterate(MemRegion mr, OopClosureType* blk);
   895     ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
   896   #undef ContigSpace_PAR_OOP_ITERATE_DECL
   897 #endif // INCLUDE_ALL_GCS
   899   // Compaction support
   900   virtual void reset_after_compaction() {
   901     assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
   902     set_top(compaction_top());
   903     // set new iteration safe limit
   904     set_concurrent_iteration_safe_limit(compaction_top());
   905   }
   906   virtual size_t minimum_free_block_size() const { return 0; }
   908   // Override.
   909   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
   910                                      CardTableModRefBS::PrecisionStyle precision,
   911                                      HeapWord* boundary = NULL);
   913   // Apply "blk->do_oop" to the addresses of all reference fields in objects
   914   // starting with the _saved_mark_word, which was noted during a generation's
   915   // save_marks and is required to denote the head of an object.
   916   // Fields in objects allocated by applications of the closure
   917   // *are* included in the iteration.
   918   // Updates _saved_mark_word to point to just after the last object
   919   // iterated over.
   920 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
   921   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
   923   ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
   924 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
   926   // Same as object_iterate, but starting from "mark", which is required
   927   // to denote the start of an object.  Objects allocated by
   928   // applications of the closure *are* included in the iteration.
   929   virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
   931   // Very inefficient implementation.
   932   virtual HeapWord* block_start_const(const void* p) const;
   933   size_t block_size(const HeapWord* p) const;
   934   // If a block is in the allocated area, it is an object.
   935   bool block_is_obj(const HeapWord* p) const { return p < top(); }
   937   // Addresses for inlined allocation
   938   HeapWord** top_addr() { return &_top; }
   939   HeapWord** end_addr() { return &_end; }
   941   // Overrides for more efficient compaction support.
   942   void prepare_for_compaction(CompactPoint* cp);
   944   // PrintHeapAtGC support.
   945   virtual void print_on(outputStream* st) const;
   947   // Checked dynamic downcasts.
   948   virtual ContiguousSpace* toContiguousSpace() {
   949     return this;
   950   }
   952   // Debugging
   953   virtual void verify() const;
   955   // Used to increase collection frequency.  "factor" of 0 means entire
   956   // space.
   957   void allocate_temporary_filler(int factor);
   959 };
   962 // A dirty card to oop closure that does filtering.
   963 // It knows how to filter out objects that are outside of the _boundary.
   964 class Filtering_DCTOC : public DirtyCardToOopClosure {
   965 protected:
   966   // Override.
   967   void walk_mem_region(MemRegion mr,
   968                        HeapWord* bottom, HeapWord* top);
   970   // Walk the given memory region, from bottom to top, applying
   971   // the given oop closure to (possibly) all objects found. The
   972   // given oop closure may or may not be the same as the oop
   973   // closure with which this closure was created, as it may
   974   // be a filtering closure which makes use of the _boundary.
   975   // We offer two signatures, so the FilteringClosure static type is
   976   // apparent.
   977   virtual void walk_mem_region_with_cl(MemRegion mr,
   978                                        HeapWord* bottom, HeapWord* top,
   979                                        ExtendedOopClosure* cl) = 0;
   980   virtual void walk_mem_region_with_cl(MemRegion mr,
   981                                        HeapWord* bottom, HeapWord* top,
   982                                        FilteringClosure* cl) = 0;
   984 public:
   985   Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
   986                   CardTableModRefBS::PrecisionStyle precision,
   987                   HeapWord* boundary) :
   988     DirtyCardToOopClosure(sp, cl, precision, boundary) {}
   989 };
   991 // A dirty card to oop closure for contiguous spaces
   992 // (ContiguousSpace and sub-classes).
   993 // It is a FilteringClosure, as defined above, and it knows:
   994 //
   995 // 1. That the actual top of any area in a memory region
   996 //    contained by the space is bounded by the end of the contiguous
   997 //    region of the space.
   998 // 2. That the space is really made up of objects and not just
   999 //    blocks.
  1001 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
  1002 protected:
  1003   // Overrides.
  1004   HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
  1006   virtual void walk_mem_region_with_cl(MemRegion mr,
  1007                                        HeapWord* bottom, HeapWord* top,
  1008                                        ExtendedOopClosure* cl);
  1009   virtual void walk_mem_region_with_cl(MemRegion mr,
  1010                                        HeapWord* bottom, HeapWord* top,
  1011                                        FilteringClosure* cl);
  1013 public:
  1014   ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
  1015                        CardTableModRefBS::PrecisionStyle precision,
  1016                        HeapWord* boundary) :
  1017     Filtering_DCTOC(sp, cl, precision, boundary)
  1018   {}
  1019 };
  1022 // Class EdenSpace describes eden-space in new generation.
  1024 class DefNewGeneration;
  1026 class EdenSpace : public ContiguousSpace {
  1027   friend class VMStructs;
  1028  private:
  1029   DefNewGeneration* _gen;
  1031   // _soft_end is used as a soft limit on allocation.  As soft limits are
  1032   // reached, the slow-path allocation code can invoke other actions and then
  1033   // adjust _soft_end up to a new soft limit or to end().
  1034   HeapWord* _soft_end;
  1036  public:
  1037   EdenSpace(DefNewGeneration* gen) :
  1038    _gen(gen), _soft_end(NULL) {}
  1040   // Get/set just the 'soft' limit.
  1041   HeapWord* soft_end()               { return _soft_end; }
  1042   HeapWord** soft_end_addr()         { return &_soft_end; }
  1043   void set_soft_end(HeapWord* value) { _soft_end = value; }
  1045   // Override.
  1046   void clear(bool mangle_space);
  1048   // Set both the 'hard' and 'soft' limits (_end and _soft_end).
  1049   void set_end(HeapWord* value) {
  1050     set_soft_end(value);
  1051     ContiguousSpace::set_end(value);
  1054   // Allocation (return NULL if full)
  1055   HeapWord* allocate(size_t word_size);
  1056   HeapWord* par_allocate(size_t word_size);
  1057 };
  1059 // Class ConcEdenSpace extends EdenSpace for the sake of safe
  1060 // allocation while soft-end is being modified concurrently
  1062 class ConcEdenSpace : public EdenSpace {
  1063  public:
  1064   ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
  1066   // Allocation (return NULL if full)
  1067   HeapWord* par_allocate(size_t word_size);
  1068 };
  1071 // A ContigSpace that Supports an efficient "block_start" operation via
  1072 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
  1073 // other spaces.)  This is the abstract base class for old generation
  1074 // (tenured) spaces.
  1076 class OffsetTableContigSpace: public ContiguousSpace {
  1077   friend class VMStructs;
  1078  protected:
  1079   BlockOffsetArrayContigSpace _offsets;
  1080   Mutex _par_alloc_lock;
  1082  public:
  1083   // Constructor
  1084   OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
  1085                          MemRegion mr);
  1087   void set_bottom(HeapWord* value);
  1088   void set_end(HeapWord* value);
  1090   void clear(bool mangle_space);
  1092   inline HeapWord* block_start_const(const void* p) const;
  1094   // Add offset table update.
  1095   virtual inline HeapWord* allocate(size_t word_size);
  1096   inline HeapWord* par_allocate(size_t word_size);
  1098   // MarkSweep support phase3
  1099   virtual HeapWord* initialize_threshold();
  1100   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
  1102   virtual void print_on(outputStream* st) const;
  1104   // Debugging
  1105   void verify() const;
  1106 };
  1109 // Class TenuredSpace is used by TenuredGeneration
  1111 class TenuredSpace: public OffsetTableContigSpace {
  1112   friend class VMStructs;
  1113  protected:
  1114   // Mark sweep support
  1115   size_t allowed_dead_ratio() const;
  1116  public:
  1117   // Constructor
  1118   TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
  1119                MemRegion mr) :
  1120     OffsetTableContigSpace(sharedOffsetArray, mr) {}
  1121 };
  1122 #endif // SHARE_VM_MEMORY_SPACE_HPP

mercurial