src/share/vm/memory/space.hpp

Tue, 30 Oct 2012 10:23:55 -0700

author
jmasa
date
Tue, 30 Oct 2012 10:23:55 -0700
changeset 4234
3fadc0e8cffe
parent 4037
da91efe96a93
child 4384
b735136e0d82
permissions
-rw-r--r--

8000988: VM deadlock when running btree006 on windows-i586
Reviewed-by: johnc, jcoomes, ysr

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
    26 #define SHARE_VM_MEMORY_SPACE_HPP
    28 #include "memory/allocation.hpp"
    29 #include "memory/blockOffsetTable.hpp"
    30 #include "memory/cardTableModRefBS.hpp"
    31 #include "memory/iterator.hpp"
    32 #include "memory/memRegion.hpp"
    33 #include "memory/watermark.hpp"
    34 #include "oops/markOop.hpp"
    35 #include "runtime/mutexLocker.hpp"
    36 #include "runtime/prefetch.hpp"
    37 #include "utilities/workgroup.hpp"
    38 #ifdef TARGET_OS_FAMILY_linux
    39 # include "os_linux.inline.hpp"
    40 #endif
    41 #ifdef TARGET_OS_FAMILY_solaris
    42 # include "os_solaris.inline.hpp"
    43 #endif
    44 #ifdef TARGET_OS_FAMILY_windows
    45 # include "os_windows.inline.hpp"
    46 #endif
    47 #ifdef TARGET_OS_FAMILY_bsd
    48 # include "os_bsd.inline.hpp"
    49 #endif
    51 // A space is an abstraction for the "storage units" backing
    52 // up the generation abstraction. It includes specific
    53 // implementations for keeping track of free and used space,
    54 // for iterating over objects and free blocks, etc.
    56 // Here's the Space hierarchy:
    57 //
    58 // - Space               -- an asbtract base class describing a heap area
    59 //   - CompactibleSpace  -- a space supporting compaction
    60 //     - CompactibleFreeListSpace -- (used for CMS generation)
    61 //     - ContiguousSpace -- a compactible space in which all free space
    62 //                          is contiguous
    63 //       - EdenSpace     -- contiguous space used as nursery
    64 //         - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
    65 //       - OffsetTableContigSpace -- contiguous space with a block offset array
    66 //                          that allows "fast" block_start calls
    67 //         - TenuredSpace -- (used for TenuredGeneration)
    69 // Forward decls.
    70 class Space;
    71 class BlockOffsetArray;
    72 class BlockOffsetArrayContigSpace;
    73 class Generation;
    74 class CompactibleSpace;
    75 class BlockOffsetTable;
    76 class GenRemSet;
    77 class CardTableRS;
    78 class DirtyCardToOopClosure;
    80 // An oop closure that is circumscribed by a filtering memory region.
    81 class SpaceMemRegionOopsIterClosure: public ExtendedOopClosure {
    82  private:
    83   ExtendedOopClosure* _cl;
    84   MemRegion   _mr;
    85  protected:
    86   template <class T> void do_oop_work(T* p) {
    87     if (_mr.contains(p)) {
    88       _cl->do_oop(p);
    89     }
    90   }
    91  public:
    92   SpaceMemRegionOopsIterClosure(ExtendedOopClosure* cl, MemRegion mr):
    93     _cl(cl), _mr(mr) {}
    94   virtual void do_oop(oop* p);
    95   virtual void do_oop(narrowOop* p);
    96   virtual bool do_metadata() {
    97     // _cl is of type ExtendedOopClosure instead of OopClosure, so that we can check this.
    98     assert(!_cl->do_metadata(), "I've checked all call paths, this shouldn't happen.");
    99     return false;
   100   }
   101   virtual void do_klass(Klass* k)                         { ShouldNotReachHere(); }
   102   virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
   103 };
   105 // A Space describes a heap area. Class Space is an abstract
   106 // base class.
   107 //
   108 // Space supports allocation, size computation and GC support is provided.
   109 //
   110 // Invariant: bottom() and end() are on page_size boundaries and
   111 // bottom() <= top() <= end()
   112 // top() is inclusive and end() is exclusive.
   114 class Space: public CHeapObj<mtGC> {
   115   friend class VMStructs;
   116  protected:
   117   HeapWord* _bottom;
   118   HeapWord* _end;
   120   // Used in support of save_marks()
   121   HeapWord* _saved_mark_word;
   123   MemRegionClosure* _preconsumptionDirtyCardClosure;
   125   // A sequential tasks done structure. This supports
   126   // parallel GC, where we have threads dynamically
   127   // claiming sub-tasks from a larger parallel task.
   128   SequentialSubTasksDone _par_seq_tasks;
   130   Space():
   131     _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
   133  public:
   134   // Accessors
   135   HeapWord* bottom() const         { return _bottom; }
   136   HeapWord* end() const            { return _end;    }
   137   virtual void set_bottom(HeapWord* value) { _bottom = value; }
   138   virtual void set_end(HeapWord* value)    { _end = value; }
   140   virtual HeapWord* saved_mark_word() const  { return _saved_mark_word; }
   142   void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
   144   MemRegionClosure* preconsumptionDirtyCardClosure() const {
   145     return _preconsumptionDirtyCardClosure;
   146   }
   147   void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
   148     _preconsumptionDirtyCardClosure = cl;
   149   }
   151   // Returns a subregion of the space containing all the objects in
   152   // the space.
   153   virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
   155   // Returns a region that is guaranteed to contain (at least) all objects
   156   // allocated at the time of the last call to "save_marks".  If the space
   157   // initializes its DirtyCardToOopClosure's specifying the "contig" option
   158   // (that is, if the space is contiguous), then this region must contain only
   159   // such objects: the memregion will be from the bottom of the region to the
   160   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
   161   // the space must distiguish between objects in the region allocated before
   162   // and after the call to save marks.
   163   virtual MemRegion used_region_at_save_marks() const {
   164     return MemRegion(bottom(), saved_mark_word());
   165   }
   167   // Initialization.
   168   // "initialize" should be called once on a space, before it is used for
   169   // any purpose.  The "mr" arguments gives the bounds of the space, and
   170   // the "clear_space" argument should be true unless the memory in "mr" is
   171   // known to be zeroed.
   172   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   174   // The "clear" method must be called on a region that may have
   175   // had allocation performed in it, but is now to be considered empty.
   176   virtual void clear(bool mangle_space);
   178   // For detecting GC bugs.  Should only be called at GC boundaries, since
   179   // some unused space may be used as scratch space during GC's.
   180   // Default implementation does nothing. We also call this when expanding
   181   // a space to satisfy an allocation request. See bug #4668531
   182   virtual void mangle_unused_area() {}
   183   virtual void mangle_unused_area_complete() {}
   184   virtual void mangle_region(MemRegion mr) {}
   186   // Testers
   187   bool is_empty() const              { return used() == 0; }
   188   bool not_empty() const             { return used() > 0; }
   190   // Returns true iff the given the space contains the
   191   // given address as part of an allocated object. For
   192   // ceratin kinds of spaces, this might be a potentially
   193   // expensive operation. To prevent performance problems
   194   // on account of its inadvertent use in product jvm's,
   195   // we restrict its use to assertion checks only.
   196   virtual bool is_in(const void* p) const = 0;
   198   // Returns true iff the given reserved memory of the space contains the
   199   // given address.
   200   bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
   202   // Returns true iff the given block is not allocated.
   203   virtual bool is_free_block(const HeapWord* p) const = 0;
   205   // Test whether p is double-aligned
   206   static bool is_aligned(void* p) {
   207     return ((intptr_t)p & (sizeof(double)-1)) == 0;
   208   }
   210   // Size computations.  Sizes are in bytes.
   211   size_t capacity()     const { return byte_size(bottom(), end()); }
   212   virtual size_t used() const = 0;
   213   virtual size_t free() const = 0;
   215   // Iterate over all the ref-containing fields of all objects in the
   216   // space, calling "cl.do_oop" on each.  Fields in objects allocated by
   217   // applications of the closure are not included in the iteration.
   218   virtual void oop_iterate(ExtendedOopClosure* cl);
   220   // Same as above, restricted to the intersection of a memory region and
   221   // the space.  Fields in objects allocated by applications of the closure
   222   // are not included in the iteration.
   223   virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
   225   // Iterate over all objects in the space, calling "cl.do_object" on
   226   // each.  Objects allocated by applications of the closure are not
   227   // included in the iteration.
   228   virtual void object_iterate(ObjectClosure* blk) = 0;
   229   // Similar to object_iterate() except only iterates over
   230   // objects whose internal references point to objects in the space.
   231   virtual void safe_object_iterate(ObjectClosure* blk) = 0;
   233   // Iterate over all objects that intersect with mr, calling "cl->do_object"
   234   // on each.  There is an exception to this: if this closure has already
   235   // been invoked on an object, it may skip such objects in some cases.  This is
   236   // Most likely to happen in an "upwards" (ascending address) iteration of
   237   // MemRegions.
   238   virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
   240   // Iterate over as many initialized objects in the space as possible,
   241   // calling "cl.do_object_careful" on each. Return NULL if all objects
   242   // in the space (at the start of the iteration) were iterated over.
   243   // Return an address indicating the extent of the iteration in the
   244   // event that the iteration had to return because of finding an
   245   // uninitialized object in the space, or if the closure "cl"
   246   // signalled early termination.
   247   virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
   248   virtual HeapWord* object_iterate_careful_m(MemRegion mr,
   249                                              ObjectClosureCareful* cl);
   251   // Create and return a new dirty card to oop closure. Can be
   252   // overriden to return the appropriate type of closure
   253   // depending on the type of space in which the closure will
   254   // operate. ResourceArea allocated.
   255   virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
   256                                              CardTableModRefBS::PrecisionStyle precision,
   257                                              HeapWord* boundary = NULL);
   259   // If "p" is in the space, returns the address of the start of the
   260   // "block" that contains "p".  We say "block" instead of "object" since
   261   // some heaps may not pack objects densely; a chunk may either be an
   262   // object or a non-object.  If "p" is not in the space, return NULL.
   263   virtual HeapWord* block_start_const(const void* p) const = 0;
   265   // The non-const version may have benevolent side effects on the data
   266   // structure supporting these calls, possibly speeding up future calls.
   267   // The default implementation, however, is simply to call the const
   268   // version.
   269   inline virtual HeapWord* block_start(const void* p);
   271   // Requires "addr" to be the start of a chunk, and returns its size.
   272   // "addr + size" is required to be the start of a new chunk, or the end
   273   // of the active area of the heap.
   274   virtual size_t block_size(const HeapWord* addr) const = 0;
   276   // Requires "addr" to be the start of a block, and returns "TRUE" iff
   277   // the block is an object.
   278   virtual bool block_is_obj(const HeapWord* addr) const = 0;
   280   // Requires "addr" to be the start of a block, and returns "TRUE" iff
   281   // the block is an object and the object is alive.
   282   virtual bool obj_is_alive(const HeapWord* addr) const;
   284   // Allocation (return NULL if full).  Assumes the caller has established
   285   // mutually exclusive access to the space.
   286   virtual HeapWord* allocate(size_t word_size) = 0;
   288   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
   289   virtual HeapWord* par_allocate(size_t word_size) = 0;
   291   // Returns true if this object has been allocated since a
   292   // generation's "save_marks" call.
   293   virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
   295   // Mark-sweep-compact support: all spaces can update pointers to objects
   296   // moving as a part of compaction.
   297   virtual void adjust_pointers();
   299   // PrintHeapAtGC support
   300   virtual void print() const;
   301   virtual void print_on(outputStream* st) const;
   302   virtual void print_short() const;
   303   virtual void print_short_on(outputStream* st) const;
   306   // Accessor for parallel sequential tasks.
   307   SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
   309   // IF "this" is a ContiguousSpace, return it, else return NULL.
   310   virtual ContiguousSpace* toContiguousSpace() {
   311     return NULL;
   312   }
   314   // Debugging
   315   virtual void verify() const = 0;
   316 };
   318 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
   319 // OopClosure to (the addresses of) all the ref-containing fields that could
   320 // be modified by virtue of the given MemRegion being dirty. (Note that
   321 // because of the imprecise nature of the write barrier, this may iterate
   322 // over oops beyond the region.)
   323 // This base type for dirty card to oop closures handles memory regions
   324 // in non-contiguous spaces with no boundaries, and should be sub-classed
   325 // to support other space types. See ContiguousDCTOC for a sub-class
   326 // that works with ContiguousSpaces.
   328 class DirtyCardToOopClosure: public MemRegionClosureRO {
   329 protected:
   330   ExtendedOopClosure* _cl;
   331   Space* _sp;
   332   CardTableModRefBS::PrecisionStyle _precision;
   333   HeapWord* _boundary;          // If non-NULL, process only non-NULL oops
   334                                 // pointing below boundary.
   335   HeapWord* _min_done;          // ObjHeadPreciseArray precision requires
   336                                 // a downwards traversal; this is the
   337                                 // lowest location already done (or,
   338                                 // alternatively, the lowest address that
   339                                 // shouldn't be done again.  NULL means infinity.)
   340   NOT_PRODUCT(HeapWord* _last_bottom;)
   341   NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
   343   // Get the actual top of the area on which the closure will
   344   // operate, given where the top is assumed to be (the end of the
   345   // memory region passed to do_MemRegion) and where the object
   346   // at the top is assumed to start. For example, an object may
   347   // start at the top but actually extend past the assumed top,
   348   // in which case the top becomes the end of the object.
   349   virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
   351   // Walk the given memory region from bottom to (actual) top
   352   // looking for objects and applying the oop closure (_cl) to
   353   // them. The base implementation of this treats the area as
   354   // blocks, where a block may or may not be an object. Sub-
   355   // classes should override this to provide more accurate
   356   // or possibly more efficient walking.
   357   virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
   359 public:
   360   DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
   361                         CardTableModRefBS::PrecisionStyle precision,
   362                         HeapWord* boundary) :
   363     _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
   364     _min_done(NULL) {
   365     NOT_PRODUCT(_last_bottom = NULL);
   366     NOT_PRODUCT(_last_explicit_min_done = NULL);
   367   }
   369   void do_MemRegion(MemRegion mr);
   371   void set_min_done(HeapWord* min_done) {
   372     _min_done = min_done;
   373     NOT_PRODUCT(_last_explicit_min_done = _min_done);
   374   }
   375 #ifndef PRODUCT
   376   void set_last_bottom(HeapWord* last_bottom) {
   377     _last_bottom = last_bottom;
   378   }
   379 #endif
   380 };
   382 // A structure to represent a point at which objects are being copied
   383 // during compaction.
   384 class CompactPoint : public StackObj {
   385 public:
   386   Generation* gen;
   387   CompactibleSpace* space;
   388   HeapWord* threshold;
   389   CompactPoint(Generation* _gen, CompactibleSpace* _space,
   390                HeapWord* _threshold) :
   391     gen(_gen), space(_space), threshold(_threshold) {}
   392 };
   395 // A space that supports compaction operations.  This is usually, but not
   396 // necessarily, a space that is normally contiguous.  But, for example, a
   397 // free-list-based space whose normal collection is a mark-sweep without
   398 // compaction could still support compaction in full GC's.
   400 class CompactibleSpace: public Space {
   401   friend class VMStructs;
   402   friend class CompactibleFreeListSpace;
   403 private:
   404   HeapWord* _compaction_top;
   405   CompactibleSpace* _next_compaction_space;
   407 public:
   408   CompactibleSpace() :
   409    _compaction_top(NULL), _next_compaction_space(NULL) {}
   411   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   412   virtual void clear(bool mangle_space);
   414   // Used temporarily during a compaction phase to hold the value
   415   // top should have when compaction is complete.
   416   HeapWord* compaction_top() const { return _compaction_top;    }
   418   void set_compaction_top(HeapWord* value) {
   419     assert(value == NULL || (value >= bottom() && value <= end()),
   420       "should point inside space");
   421     _compaction_top = value;
   422   }
   424   // Perform operations on the space needed after a compaction
   425   // has been performed.
   426   virtual void reset_after_compaction() {}
   428   // Returns the next space (in the current generation) to be compacted in
   429   // the global compaction order.  Also is used to select the next
   430   // space into which to compact.
   432   virtual CompactibleSpace* next_compaction_space() const {
   433     return _next_compaction_space;
   434   }
   436   void set_next_compaction_space(CompactibleSpace* csp) {
   437     _next_compaction_space = csp;
   438   }
   440   // MarkSweep support phase2
   442   // Start the process of compaction of the current space: compute
   443   // post-compaction addresses, and insert forwarding pointers.  The fields
   444   // "cp->gen" and "cp->compaction_space" are the generation and space into
   445   // which we are currently compacting.  This call updates "cp" as necessary,
   446   // and leaves the "compaction_top" of the final value of
   447   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
   448   // this phase as if the final copy had occurred; if so, "cp->threshold"
   449   // indicates when the next such action should be taken.
   450   virtual void prepare_for_compaction(CompactPoint* cp);
   451   // MarkSweep support phase3
   452   virtual void adjust_pointers();
   453   // MarkSweep support phase4
   454   virtual void compact();
   456   // The maximum percentage of objects that can be dead in the compacted
   457   // live part of a compacted space ("deadwood" support.)
   458   virtual size_t allowed_dead_ratio() const { return 0; };
   460   // Some contiguous spaces may maintain some data structures that should
   461   // be updated whenever an allocation crosses a boundary.  This function
   462   // returns the first such boundary.
   463   // (The default implementation returns the end of the space, so the
   464   // boundary is never crossed.)
   465   virtual HeapWord* initialize_threshold() { return end(); }
   467   // "q" is an object of the given "size" that should be forwarded;
   468   // "cp" names the generation ("gen") and containing "this" (which must
   469   // also equal "cp->space").  "compact_top" is where in "this" the
   470   // next object should be forwarded to.  If there is room in "this" for
   471   // the object, insert an appropriate forwarding pointer in "q".
   472   // If not, go to the next compaction space (there must
   473   // be one, since compaction must succeed -- we go to the first space of
   474   // the previous generation if necessary, updating "cp"), reset compact_top
   475   // and then forward.  In either case, returns the new value of "compact_top".
   476   // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
   477   // function of the then-current compaction space, and updates "cp->threshold
   478   // accordingly".
   479   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
   480                     HeapWord* compact_top);
   482   // Return a size with adjusments as required of the space.
   483   virtual size_t adjust_object_size_v(size_t size) const { return size; }
   485 protected:
   486   // Used during compaction.
   487   HeapWord* _first_dead;
   488   HeapWord* _end_of_live;
   490   // Minimum size of a free block.
   491   virtual size_t minimum_free_block_size() const = 0;
   493   // This the function is invoked when an allocation of an object covering
   494   // "start" to "end occurs crosses the threshold; returns the next
   495   // threshold.  (The default implementation does nothing.)
   496   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
   497     return end();
   498   }
   500   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
   501   // free block of the given "word_len", and that "q", were it an object,
   502   // would not move if forwared.  If the size allows, fill the free
   503   // block with an object, to prevent excessive compaction.  Returns "true"
   504   // iff the free region was made deadspace, and modifies
   505   // "allowed_deadspace_words" to reflect the number of available deadspace
   506   // words remaining after this operation.
   507   bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
   508                         size_t word_len);
   509 };
   511 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
   512   /* Compute the new addresses for the live objects and store it in the mark \
   513    * Used by universe::mark_sweep_phase2()                                   \
   514    */                                                                        \
   515   HeapWord* compact_top; /* This is where we are currently compacting to. */ \
   516                                                                              \
   517   /* We're sure to be here before any objects are compacted into this        \
   518    * space, so this is a good time to initialize this:                       \
   519    */                                                                        \
   520   set_compaction_top(bottom());                                              \
   521                                                                              \
   522   if (cp->space == NULL) {                                                   \
   523     assert(cp->gen != NULL, "need a generation");                            \
   524     assert(cp->threshold == NULL, "just checking");                          \
   525     assert(cp->gen->first_compaction_space() == this, "just checking");      \
   526     cp->space = cp->gen->first_compaction_space();                           \
   527     compact_top = cp->space->bottom();                                       \
   528     cp->space->set_compaction_top(compact_top);                              \
   529     cp->threshold = cp->space->initialize_threshold();                       \
   530   } else {                                                                   \
   531     compact_top = cp->space->compaction_top();                               \
   532   }                                                                          \
   533                                                                              \
   534   /* We allow some amount of garbage towards the bottom of the space, so     \
   535    * we don't start compacting before there is a significant gain to be made.\
   536    * Occasionally, we want to ensure a full compaction, which is determined  \
   537    * by the MarkSweepAlwaysCompactCount parameter.                           \
   538    */                                                                        \
   539   int invocations = MarkSweep::total_invocations();                          \
   540   bool skip_dead = (MarkSweepAlwaysCompactCount < 1)                         \
   541     ||((invocations % MarkSweepAlwaysCompactCount) != 0);                    \
   542                                                                              \
   543   size_t allowed_deadspace = 0;                                              \
   544   if (skip_dead) {                                                           \
   545     const size_t ratio = allowed_dead_ratio();                               \
   546     allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize;           \
   547   }                                                                          \
   548                                                                              \
   549   HeapWord* q = bottom();                                                    \
   550   HeapWord* t = scan_limit();                                                \
   551                                                                              \
   552   HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last \
   553                                    live object. */                           \
   554   HeapWord*  first_dead = end();/* The first dead object. */                 \
   555   LiveRange* liveRange  = NULL; /* The current live range, recorded in the   \
   556                                    first header of preceding free area. */   \
   557   _first_dead = first_dead;                                                  \
   558                                                                              \
   559   const intx interval = PrefetchScanIntervalInBytes;                         \
   560                                                                              \
   561   while (q < t) {                                                            \
   562     assert(!block_is_obj(q) ||                                               \
   563            oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||   \
   564            oop(q)->mark()->has_bias_pattern(),                               \
   565            "these are the only valid states during a mark sweep");           \
   566     if (block_is_obj(q) && oop(q)->is_gc_marked()) {                         \
   567       /* prefetch beyond q */                                                \
   568       Prefetch::write(q, interval);                                          \
   569       size_t size = block_size(q);                                           \
   570       compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
   571       q += size;                                                             \
   572       end_of_live = q;                                                       \
   573     } else {                                                                 \
   574       /* run over all the contiguous dead objects */                         \
   575       HeapWord* end = q;                                                     \
   576       do {                                                                   \
   577         /* prefetch beyond end */                                            \
   578         Prefetch::write(end, interval);                                      \
   579         end += block_size(end);                                              \
   580       } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
   581                                                                              \
   582       /* see if we might want to pretend this object is alive so that        \
   583        * we don't have to compact quite as often.                            \
   584        */                                                                    \
   585       if (allowed_deadspace > 0 && q == compact_top) {                       \
   586         size_t sz = pointer_delta(end, q);                                   \
   587         if (insert_deadspace(allowed_deadspace, q, sz)) {                    \
   588           compact_top = cp->space->forward(oop(q), sz, cp, compact_top);     \
   589           q = end;                                                           \
   590           end_of_live = end;                                                 \
   591           continue;                                                          \
   592         }                                                                    \
   593       }                                                                      \
   594                                                                              \
   595       /* otherwise, it really is a free region. */                           \
   596                                                                              \
   597       /* for the previous LiveRange, record the end of the live objects. */  \
   598       if (liveRange) {                                                       \
   599         liveRange->set_end(q);                                               \
   600       }                                                                      \
   601                                                                              \
   602       /* record the current LiveRange object.                                \
   603        * liveRange->start() is overlaid on the mark word.                    \
   604        */                                                                    \
   605       liveRange = (LiveRange*)q;                                             \
   606       liveRange->set_start(end);                                             \
   607       liveRange->set_end(end);                                               \
   608                                                                              \
   609       /* see if this is the first dead region. */                            \
   610       if (q < first_dead) {                                                  \
   611         first_dead = q;                                                      \
   612       }                                                                      \
   613                                                                              \
   614       /* move on to the next object */                                       \
   615       q = end;                                                               \
   616     }                                                                        \
   617   }                                                                          \
   618                                                                              \
   619   assert(q == t, "just checking");                                           \
   620   if (liveRange != NULL) {                                                   \
   621     liveRange->set_end(q);                                                   \
   622   }                                                                          \
   623   _end_of_live = end_of_live;                                                \
   624   if (end_of_live < first_dead) {                                            \
   625     first_dead = end_of_live;                                                \
   626   }                                                                          \
   627   _first_dead = first_dead;                                                  \
   628                                                                              \
   629   /* save the compaction_top of the compaction space. */                     \
   630   cp->space->set_compaction_top(compact_top);                                \
   631 }
   633 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
   634   /* adjust all the interior pointers to point at the new locations of objects  \
   635    * Used by MarkSweep::mark_sweep_phase3() */                                  \
   636                                                                                 \
   637   HeapWord* q = bottom();                                                       \
   638   HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
   639                                                                                 \
   640   assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
   641                                                                                 \
   642   if (q < t && _first_dead > q &&                                               \
   643       !oop(q)->is_gc_marked()) {                                                \
   644     /* we have a chunk of the space which hasn't moved and we've                \
   645      * reinitialized the mark word during the previous pass, so we can't        \
   646      * use is_gc_marked for the traversal. */                                   \
   647     HeapWord* end = _first_dead;                                                \
   648                                                                                 \
   649     while (q < end) {                                                           \
   650       /* I originally tried to conjoin "block_start(q) == q" to the             \
   651        * assertion below, but that doesn't work, because you can't              \
   652        * accurately traverse previous objects to get to the current one         \
   653        * after their pointers have been                                         \
   654        * updated, until the actual compaction is done.  dld, 4/00 */            \
   655       assert(block_is_obj(q),                                                   \
   656              "should be at block boundaries, and should be looking at objs");   \
   657                                                                                 \
   658       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));     \
   659                                                                                 \
   660       /* point all the oops to the new location */                              \
   661       size_t size = oop(q)->adjust_pointers();                                  \
   662       size = adjust_obj_size(size);                                             \
   663                                                                                 \
   664       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());           \
   665                                                                                 \
   666       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));     \
   667                                                                                 \
   668       q += size;                                                                \
   669     }                                                                           \
   670                                                                                 \
   671     if (_first_dead == t) {                                                     \
   672       q = t;                                                                    \
   673     } else {                                                                    \
   674       /* $$$ This is funky.  Using this to read the previously written          \
   675        * LiveRange.  See also use below. */                                     \
   676       q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
   677     }                                                                           \
   678   }                                                                             \
   679                                                                                 \
   680   const intx interval = PrefetchScanIntervalInBytes;                            \
   681                                                                                 \
   682   debug_only(HeapWord* prev_q = NULL);                                          \
   683   while (q < t) {                                                               \
   684     /* prefetch beyond q */                                                     \
   685     Prefetch::write(q, interval);                                               \
   686     if (oop(q)->is_gc_marked()) {                                               \
   687       /* q is alive */                                                          \
   688       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));     \
   689       /* point all the oops to the new location */                              \
   690       size_t size = oop(q)->adjust_pointers();                                  \
   691       size = adjust_obj_size(size);                                             \
   692       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());           \
   693       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));     \
   694       debug_only(prev_q = q);                                                   \
   695       q += size;                                                                \
   696     } else {                                                                    \
   697       /* q is not a live object, so its mark should point at the next           \
   698        * live object */                                                         \
   699       debug_only(prev_q = q);                                                   \
   700       q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
   701       assert(q > prev_q, "we should be moving forward through memory");         \
   702     }                                                                           \
   703   }                                                                             \
   704                                                                                 \
   705   assert(q == t, "just checking");                                              \
   706 }
   708 #define SCAN_AND_COMPACT(obj_size) {                                            \
   709   /* Copy all live objects to their new location                                \
   710    * Used by MarkSweep::mark_sweep_phase4() */                                  \
   711                                                                                 \
   712   HeapWord*       q = bottom();                                                 \
   713   HeapWord* const t = _end_of_live;                                             \
   714   debug_only(HeapWord* prev_q = NULL);                                          \
   715                                                                                 \
   716   if (q < t && _first_dead > q &&                                               \
   717       !oop(q)->is_gc_marked()) {                                                \
   718     debug_only(                                                                 \
   719     /* we have a chunk of the space which hasn't moved and we've reinitialized  \
   720      * the mark word during the previous pass, so we can't use is_gc_marked for \
   721      * the traversal. */                                                        \
   722     HeapWord* const end = _first_dead;                                          \
   723                                                                                 \
   724     while (q < end) {                                                           \
   725       size_t size = obj_size(q);                                                \
   726       assert(!oop(q)->is_gc_marked(),                                           \
   727              "should be unmarked (special dense prefix handling)");             \
   728       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));       \
   729       debug_only(prev_q = q);                                                   \
   730       q += size;                                                                \
   731     }                                                                           \
   732     )  /* debug_only */                                                         \
   733                                                                                 \
   734     if (_first_dead == t) {                                                     \
   735       q = t;                                                                    \
   736     } else {                                                                    \
   737       /* $$$ Funky */                                                           \
   738       q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();               \
   739     }                                                                           \
   740   }                                                                             \
   741                                                                                 \
   742   const intx scan_interval = PrefetchScanIntervalInBytes;                       \
   743   const intx copy_interval = PrefetchCopyIntervalInBytes;                       \
   744   while (q < t) {                                                               \
   745     if (!oop(q)->is_gc_marked()) {                                              \
   746       /* mark is pointer to next marked oop */                                  \
   747       debug_only(prev_q = q);                                                   \
   748       q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
   749       assert(q > prev_q, "we should be moving forward through memory");         \
   750     } else {                                                                    \
   751       /* prefetch beyond q */                                                   \
   752       Prefetch::read(q, scan_interval);                                         \
   753                                                                                 \
   754       /* size and destination */                                                \
   755       size_t size = obj_size(q);                                                \
   756       HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();                \
   757                                                                                 \
   758       /* prefetch beyond compaction_top */                                      \
   759       Prefetch::write(compaction_top, copy_interval);                           \
   760                                                                                 \
   761       /* copy object and reinit its mark */                                     \
   762       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size,            \
   763                                                             compaction_top));   \
   764       assert(q != compaction_top, "everything in this pass should be moving");  \
   765       Copy::aligned_conjoint_words(q, compaction_top, size);                    \
   766       oop(compaction_top)->init_mark();                                         \
   767       assert(oop(compaction_top)->klass() != NULL, "should have a class");      \
   768                                                                                 \
   769       debug_only(prev_q = q);                                                   \
   770       q += size;                                                                \
   771     }                                                                           \
   772   }                                                                             \
   773                                                                                 \
   774   /* Let's remember if we were empty before we did the compaction. */           \
   775   bool was_empty = used_region().is_empty();                                    \
   776   /* Reset space after compaction is complete */                                \
   777   reset_after_compaction();                                                     \
   778   /* We do this clear, below, since it has overloaded meanings for some */      \
   779   /* space subtypes.  For example, OffsetTableContigSpace's that were   */      \
   780   /* compacted into will have had their offset table thresholds updated */      \
   781   /* continuously, but those that weren't need to have their thresholds */      \
   782   /* re-initialized.  Also mangles unused area for debugging.           */      \
   783   if (used_region().is_empty()) {                                               \
   784     if (!was_empty) clear(SpaceDecorator::Mangle);                              \
   785   } else {                                                                      \
   786     if (ZapUnusedHeapArea) mangle_unused_area();                                \
   787   }                                                                             \
   788 }
   790 class GenSpaceMangler;
   792 // A space in which the free area is contiguous.  It therefore supports
   793 // faster allocation, and compaction.
   794 class ContiguousSpace: public CompactibleSpace {
   795   friend class OneContigSpaceCardGeneration;
   796   friend class VMStructs;
   797  protected:
   798   HeapWord* _top;
   799   HeapWord* _concurrent_iteration_safe_limit;
   800   // A helper for mangling the unused area of the space in debug builds.
   801   GenSpaceMangler* _mangler;
   803   GenSpaceMangler* mangler() { return _mangler; }
   805   // Allocation helpers (return NULL if full).
   806   inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
   807   inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
   809  public:
   810   ContiguousSpace();
   811   ~ContiguousSpace();
   813   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   814   virtual void clear(bool mangle_space);
   816   // Accessors
   817   HeapWord* top() const            { return _top;    }
   818   void set_top(HeapWord* value)    { _top = value; }
   820   virtual void set_saved_mark()    { _saved_mark_word = top();    }
   821   void reset_saved_mark()          { _saved_mark_word = bottom(); }
   823   WaterMark bottom_mark()     { return WaterMark(this, bottom()); }
   824   WaterMark top_mark()        { return WaterMark(this, top()); }
   825   WaterMark saved_mark()      { return WaterMark(this, saved_mark_word()); }
   826   bool saved_mark_at_top() const { return saved_mark_word() == top(); }
   828   // In debug mode mangle (write it with a particular bit
   829   // pattern) the unused part of a space.
   831   // Used to save the an address in a space for later use during mangling.
   832   void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
   833   // Used to save the space's current top for later use during mangling.
   834   void set_top_for_allocations() PRODUCT_RETURN;
   836   // Mangle regions in the space from the current top up to the
   837   // previously mangled part of the space.
   838   void mangle_unused_area() PRODUCT_RETURN;
   839   // Mangle [top, end)
   840   void mangle_unused_area_complete() PRODUCT_RETURN;
   841   // Mangle the given MemRegion.
   842   void mangle_region(MemRegion mr) PRODUCT_RETURN;
   844   // Do some sparse checking on the area that should have been mangled.
   845   void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
   846   // Check the complete area that should have been mangled.
   847   // This code may be NULL depending on the macro DEBUG_MANGLING.
   848   void check_mangled_unused_area_complete() PRODUCT_RETURN;
   850   // Size computations: sizes in bytes.
   851   size_t capacity() const        { return byte_size(bottom(), end()); }
   852   size_t used() const            { return byte_size(bottom(), top()); }
   853   size_t free() const            { return byte_size(top(),    end()); }
   855   // Override from space.
   856   bool is_in(const void* p) const;
   858   virtual bool is_free_block(const HeapWord* p) const;
   860   // In a contiguous space we have a more obvious bound on what parts
   861   // contain objects.
   862   MemRegion used_region() const { return MemRegion(bottom(), top()); }
   864   MemRegion used_region_at_save_marks() const {
   865     return MemRegion(bottom(), saved_mark_word());
   866   }
   868   // Allocation (return NULL if full)
   869   virtual HeapWord* allocate(size_t word_size);
   870   virtual HeapWord* par_allocate(size_t word_size);
   872   virtual bool obj_allocated_since_save_marks(const oop obj) const {
   873     return (HeapWord*)obj >= saved_mark_word();
   874   }
   876   // Iteration
   877   void oop_iterate(ExtendedOopClosure* cl);
   878   void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
   879   void object_iterate(ObjectClosure* blk);
   880   // For contiguous spaces this method will iterate safely over objects
   881   // in the space (i.e., between bottom and top) when at a safepoint.
   882   void safe_object_iterate(ObjectClosure* blk);
   883   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
   884   // iterates on objects up to the safe limit
   885   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
   886   HeapWord* concurrent_iteration_safe_limit() {
   887     assert(_concurrent_iteration_safe_limit <= top(),
   888            "_concurrent_iteration_safe_limit update missed");
   889     return _concurrent_iteration_safe_limit;
   890   }
   891   // changes the safe limit, all objects from bottom() to the new
   892   // limit should be properly initialized
   893   void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
   894     assert(new_limit <= top(), "uninitialized objects in the safe range");
   895     _concurrent_iteration_safe_limit = new_limit;
   896   }
   899 #ifndef SERIALGC
   900   // In support of parallel oop_iterate.
   901   #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
   902     void par_oop_iterate(MemRegion mr, OopClosureType* blk);
   904     ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
   905   #undef ContigSpace_PAR_OOP_ITERATE_DECL
   906 #endif // SERIALGC
   908   // Compaction support
   909   virtual void reset_after_compaction() {
   910     assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
   911     set_top(compaction_top());
   912     // set new iteration safe limit
   913     set_concurrent_iteration_safe_limit(compaction_top());
   914   }
   915   virtual size_t minimum_free_block_size() const { return 0; }
   917   // Override.
   918   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
   919                                      CardTableModRefBS::PrecisionStyle precision,
   920                                      HeapWord* boundary = NULL);
   922   // Apply "blk->do_oop" to the addresses of all reference fields in objects
   923   // starting with the _saved_mark_word, which was noted during a generation's
   924   // save_marks and is required to denote the head of an object.
   925   // Fields in objects allocated by applications of the closure
   926   // *are* included in the iteration.
   927   // Updates _saved_mark_word to point to just after the last object
   928   // iterated over.
   929 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
   930   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
   932   ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
   933 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
   935   // Same as object_iterate, but starting from "mark", which is required
   936   // to denote the start of an object.  Objects allocated by
   937   // applications of the closure *are* included in the iteration.
   938   virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
   940   // Very inefficient implementation.
   941   virtual HeapWord* block_start_const(const void* p) const;
   942   size_t block_size(const HeapWord* p) const;
   943   // If a block is in the allocated area, it is an object.
   944   bool block_is_obj(const HeapWord* p) const { return p < top(); }
   946   // Addresses for inlined allocation
   947   HeapWord** top_addr() { return &_top; }
   948   HeapWord** end_addr() { return &_end; }
   950   // Overrides for more efficient compaction support.
   951   void prepare_for_compaction(CompactPoint* cp);
   953   // PrintHeapAtGC support.
   954   virtual void print_on(outputStream* st) const;
   956   // Checked dynamic downcasts.
   957   virtual ContiguousSpace* toContiguousSpace() {
   958     return this;
   959   }
   961   // Debugging
   962   virtual void verify() const;
   964   // Used to increase collection frequency.  "factor" of 0 means entire
   965   // space.
   966   void allocate_temporary_filler(int factor);
   968 };
   971 // A dirty card to oop closure that does filtering.
   972 // It knows how to filter out objects that are outside of the _boundary.
   973 class Filtering_DCTOC : public DirtyCardToOopClosure {
   974 protected:
   975   // Override.
   976   void walk_mem_region(MemRegion mr,
   977                        HeapWord* bottom, HeapWord* top);
   979   // Walk the given memory region, from bottom to top, applying
   980   // the given oop closure to (possibly) all objects found. The
   981   // given oop closure may or may not be the same as the oop
   982   // closure with which this closure was created, as it may
   983   // be a filtering closure which makes use of the _boundary.
   984   // We offer two signatures, so the FilteringClosure static type is
   985   // apparent.
   986   virtual void walk_mem_region_with_cl(MemRegion mr,
   987                                        HeapWord* bottom, HeapWord* top,
   988                                        ExtendedOopClosure* cl) = 0;
   989   virtual void walk_mem_region_with_cl(MemRegion mr,
   990                                        HeapWord* bottom, HeapWord* top,
   991                                        FilteringClosure* cl) = 0;
   993 public:
   994   Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
   995                   CardTableModRefBS::PrecisionStyle precision,
   996                   HeapWord* boundary) :
   997     DirtyCardToOopClosure(sp, cl, precision, boundary) {}
   998 };
  1000 // A dirty card to oop closure for contiguous spaces
  1001 // (ContiguousSpace and sub-classes).
  1002 // It is a FilteringClosure, as defined above, and it knows:
  1003 //
  1004 // 1. That the actual top of any area in a memory region
  1005 //    contained by the space is bounded by the end of the contiguous
  1006 //    region of the space.
  1007 // 2. That the space is really made up of objects and not just
  1008 //    blocks.
  1010 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
  1011 protected:
  1012   // Overrides.
  1013   HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
  1015   virtual void walk_mem_region_with_cl(MemRegion mr,
  1016                                        HeapWord* bottom, HeapWord* top,
  1017                                        ExtendedOopClosure* cl);
  1018   virtual void walk_mem_region_with_cl(MemRegion mr,
  1019                                        HeapWord* bottom, HeapWord* top,
  1020                                        FilteringClosure* cl);
  1022 public:
  1023   ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
  1024                        CardTableModRefBS::PrecisionStyle precision,
  1025                        HeapWord* boundary) :
  1026     Filtering_DCTOC(sp, cl, precision, boundary)
  1027   {}
  1028 };
  1031 // Class EdenSpace describes eden-space in new generation.
  1033 class DefNewGeneration;
  1035 class EdenSpace : public ContiguousSpace {
  1036   friend class VMStructs;
  1037  private:
  1038   DefNewGeneration* _gen;
  1040   // _soft_end is used as a soft limit on allocation.  As soft limits are
  1041   // reached, the slow-path allocation code can invoke other actions and then
  1042   // adjust _soft_end up to a new soft limit or to end().
  1043   HeapWord* _soft_end;
  1045  public:
  1046   EdenSpace(DefNewGeneration* gen) :
  1047    _gen(gen), _soft_end(NULL) {}
  1049   // Get/set just the 'soft' limit.
  1050   HeapWord* soft_end()               { return _soft_end; }
  1051   HeapWord** soft_end_addr()         { return &_soft_end; }
  1052   void set_soft_end(HeapWord* value) { _soft_end = value; }
  1054   // Override.
  1055   void clear(bool mangle_space);
  1057   // Set both the 'hard' and 'soft' limits (_end and _soft_end).
  1058   void set_end(HeapWord* value) {
  1059     set_soft_end(value);
  1060     ContiguousSpace::set_end(value);
  1063   // Allocation (return NULL if full)
  1064   HeapWord* allocate(size_t word_size);
  1065   HeapWord* par_allocate(size_t word_size);
  1066 };
  1068 // Class ConcEdenSpace extends EdenSpace for the sake of safe
  1069 // allocation while soft-end is being modified concurrently
  1071 class ConcEdenSpace : public EdenSpace {
  1072  public:
  1073   ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
  1075   // Allocation (return NULL if full)
  1076   HeapWord* par_allocate(size_t word_size);
  1077 };
  1080 // A ContigSpace that Supports an efficient "block_start" operation via
  1081 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
  1082 // other spaces.)  This is the abstract base class for old generation
  1083 // (tenured) spaces.
  1085 class OffsetTableContigSpace: public ContiguousSpace {
  1086   friend class VMStructs;
  1087  protected:
  1088   BlockOffsetArrayContigSpace _offsets;
  1089   Mutex _par_alloc_lock;
  1091  public:
  1092   // Constructor
  1093   OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
  1094                          MemRegion mr);
  1096   void set_bottom(HeapWord* value);
  1097   void set_end(HeapWord* value);
  1099   void clear(bool mangle_space);
  1101   inline HeapWord* block_start_const(const void* p) const;
  1103   // Add offset table update.
  1104   virtual inline HeapWord* allocate(size_t word_size);
  1105   inline HeapWord* par_allocate(size_t word_size);
  1107   // MarkSweep support phase3
  1108   virtual HeapWord* initialize_threshold();
  1109   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
  1111   virtual void print_on(outputStream* st) const;
  1113   // Debugging
  1114   void verify() const;
  1115 };
  1118 // Class TenuredSpace is used by TenuredGeneration
  1120 class TenuredSpace: public OffsetTableContigSpace {
  1121   friend class VMStructs;
  1122  protected:
  1123   // Mark sweep support
  1124   size_t allowed_dead_ratio() const;
  1125  public:
  1126   // Constructor
  1127   TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
  1128                MemRegion mr) :
  1129     OffsetTableContigSpace(sharedOffsetArray, mr) {}
  1130 };
  1131 #endif // SHARE_VM_MEMORY_SPACE_HPP

mercurial