duke@435: /* brutisso@3290: * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_MEMORY_SPACE_HPP stefank@2314: #define SHARE_VM_MEMORY_SPACE_HPP stefank@2314: stefank@2314: #include "memory/allocation.hpp" stefank@2314: #include "memory/blockOffsetTable.hpp" stefank@2314: #include "memory/cardTableModRefBS.hpp" stefank@2314: #include "memory/iterator.hpp" stefank@2314: #include "memory/memRegion.hpp" stefank@2314: #include "memory/watermark.hpp" stefank@2314: #include "oops/markOop.hpp" stefank@2314: #include "runtime/mutexLocker.hpp" stefank@2314: #include "runtime/prefetch.hpp" stefank@2314: #include "utilities/workgroup.hpp" stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "os_linux.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "os_solaris.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "os_windows.inline.hpp" stefank@2314: #endif never@3156: #ifdef TARGET_OS_FAMILY_bsd never@3156: # include "os_bsd.inline.hpp" never@3156: #endif stefank@2314: duke@435: // A space is an abstraction for the "storage units" backing duke@435: // up the generation abstraction. It includes specific duke@435: // implementations for keeping track of free and used space, duke@435: // for iterating over objects and free blocks, etc. duke@435: duke@435: // Here's the Space hierarchy: duke@435: // duke@435: // - Space -- an asbtract base class describing a heap area duke@435: // - CompactibleSpace -- a space supporting compaction duke@435: // - CompactibleFreeListSpace -- (used for CMS generation) duke@435: // - ContiguousSpace -- a compactible space in which all free space duke@435: // is contiguous duke@435: // - EdenSpace -- contiguous space used as nursery duke@435: // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation duke@435: // - OffsetTableContigSpace -- contiguous space with a block offset array duke@435: // that allows "fast" block_start calls duke@435: // - TenuredSpace -- (used for TenuredGeneration) duke@435: // - ContigPermSpace -- an offset table contiguous space for perm gen duke@435: duke@435: // Forward decls. duke@435: class Space; duke@435: class BlockOffsetArray; duke@435: class BlockOffsetArrayContigSpace; duke@435: class Generation; duke@435: class CompactibleSpace; duke@435: class BlockOffsetTable; duke@435: class GenRemSet; duke@435: class CardTableRS; duke@435: class DirtyCardToOopClosure; duke@435: duke@435: // An oop closure that is circumscribed by a filtering memory region. coleenp@548: class SpaceMemRegionOopsIterClosure: public OopClosure { coleenp@548: private: coleenp@548: OopClosure* _cl; coleenp@548: MemRegion _mr; coleenp@548: protected: coleenp@548: template void do_oop_work(T* p) { coleenp@548: if (_mr.contains(p)) { coleenp@548: _cl->do_oop(p); duke@435: } duke@435: } coleenp@548: public: coleenp@548: SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr): coleenp@548: _cl(cl), _mr(mr) {} coleenp@548: virtual void do_oop(oop* p); coleenp@548: virtual void do_oop(narrowOop* p); duke@435: }; duke@435: duke@435: // A Space describes a heap area. Class Space is an abstract duke@435: // base class. duke@435: // duke@435: // Space supports allocation, size computation and GC support is provided. duke@435: // duke@435: // Invariant: bottom() and end() are on page_size boundaries and duke@435: // bottom() <= top() <= end() duke@435: // top() is inclusive and end() is exclusive. duke@435: duke@435: class Space: public CHeapObj { duke@435: friend class VMStructs; duke@435: protected: duke@435: HeapWord* _bottom; duke@435: HeapWord* _end; duke@435: duke@435: // Used in support of save_marks() duke@435: HeapWord* _saved_mark_word; duke@435: duke@435: MemRegionClosure* _preconsumptionDirtyCardClosure; duke@435: duke@435: // A sequential tasks done structure. This supports duke@435: // parallel GC, where we have threads dynamically duke@435: // claiming sub-tasks from a larger parallel task. duke@435: SequentialSubTasksDone _par_seq_tasks; duke@435: duke@435: Space(): duke@435: _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { } duke@435: duke@435: public: duke@435: // Accessors duke@435: HeapWord* bottom() const { return _bottom; } duke@435: HeapWord* end() const { return _end; } duke@435: virtual void set_bottom(HeapWord* value) { _bottom = value; } duke@435: virtual void set_end(HeapWord* value) { _end = value; } duke@435: ysr@777: virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } ysr@1280: duke@435: void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } duke@435: duke@435: MemRegionClosure* preconsumptionDirtyCardClosure() const { duke@435: return _preconsumptionDirtyCardClosure; duke@435: } duke@435: void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { duke@435: _preconsumptionDirtyCardClosure = cl; duke@435: } duke@435: duke@435: // Returns a subregion of the space containing all the objects in duke@435: // the space. duke@435: virtual MemRegion used_region() const { return MemRegion(bottom(), end()); } duke@435: duke@435: // Returns a region that is guaranteed to contain (at least) all objects duke@435: // allocated at the time of the last call to "save_marks". If the space duke@435: // initializes its DirtyCardToOopClosure's specifying the "contig" option duke@435: // (that is, if the space is contiguous), then this region must contain only duke@435: // such objects: the memregion will be from the bottom of the region to the duke@435: // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of duke@435: // the space must distiguish between objects in the region allocated before duke@435: // and after the call to save marks. duke@435: virtual MemRegion used_region_at_save_marks() const { duke@435: return MemRegion(bottom(), saved_mark_word()); duke@435: } duke@435: ysr@777: // Initialization. ysr@777: // "initialize" should be called once on a space, before it is used for ysr@777: // any purpose. The "mr" arguments gives the bounds of the space, and ysr@777: // the "clear_space" argument should be true unless the memory in "mr" is ysr@777: // known to be zeroed. jmasa@698: virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); ysr@777: ysr@777: // The "clear" method must be called on a region that may have ysr@777: // had allocation performed in it, but is now to be considered empty. jmasa@698: virtual void clear(bool mangle_space); duke@435: duke@435: // For detecting GC bugs. Should only be called at GC boundaries, since duke@435: // some unused space may be used as scratch space during GC's. duke@435: // Default implementation does nothing. We also call this when expanding duke@435: // a space to satisfy an allocation request. See bug #4668531 duke@435: virtual void mangle_unused_area() {} jmasa@698: virtual void mangle_unused_area_complete() {} duke@435: virtual void mangle_region(MemRegion mr) {} duke@435: duke@435: // Testers duke@435: bool is_empty() const { return used() == 0; } duke@435: bool not_empty() const { return used() > 0; } duke@435: duke@435: // Returns true iff the given the space contains the duke@435: // given address as part of an allocated object. For duke@435: // ceratin kinds of spaces, this might be a potentially duke@435: // expensive operation. To prevent performance problems duke@435: // on account of its inadvertent use in product jvm's, duke@435: // we restrict its use to assertion checks only. duke@435: virtual bool is_in(const void* p) const; duke@435: duke@435: // Returns true iff the given reserved memory of the space contains the duke@435: // given address. duke@435: bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } duke@435: duke@435: // Returns true iff the given block is not allocated. duke@435: virtual bool is_free_block(const HeapWord* p) const = 0; duke@435: duke@435: // Test whether p is double-aligned duke@435: static bool is_aligned(void* p) { duke@435: return ((intptr_t)p & (sizeof(double)-1)) == 0; duke@435: } duke@435: duke@435: // Size computations. Sizes are in bytes. duke@435: size_t capacity() const { return byte_size(bottom(), end()); } duke@435: virtual size_t used() const = 0; duke@435: virtual size_t free() const = 0; duke@435: duke@435: // Iterate over all the ref-containing fields of all objects in the duke@435: // space, calling "cl.do_oop" on each. Fields in objects allocated by duke@435: // applications of the closure are not included in the iteration. duke@435: virtual void oop_iterate(OopClosure* cl); duke@435: duke@435: // Same as above, restricted to the intersection of a memory region and duke@435: // the space. Fields in objects allocated by applications of the closure duke@435: // are not included in the iteration. duke@435: virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0; duke@435: duke@435: // Iterate over all objects in the space, calling "cl.do_object" on duke@435: // each. Objects allocated by applications of the closure are not duke@435: // included in the iteration. duke@435: virtual void object_iterate(ObjectClosure* blk) = 0; jmasa@952: // Similar to object_iterate() except only iterates over jmasa@952: // objects whose internal references point to objects in the space. jmasa@952: virtual void safe_object_iterate(ObjectClosure* blk) = 0; duke@435: duke@435: // Iterate over all objects that intersect with mr, calling "cl->do_object" duke@435: // on each. There is an exception to this: if this closure has already duke@435: // been invoked on an object, it may skip such objects in some cases. This is duke@435: // Most likely to happen in an "upwards" (ascending address) iteration of duke@435: // MemRegions. duke@435: virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); duke@435: duke@435: // Iterate over as many initialized objects in the space as possible, duke@435: // calling "cl.do_object_careful" on each. Return NULL if all objects duke@435: // in the space (at the start of the iteration) were iterated over. duke@435: // Return an address indicating the extent of the iteration in the duke@435: // event that the iteration had to return because of finding an duke@435: // uninitialized object in the space, or if the closure "cl" duke@435: // signalled early termination. duke@435: virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl); duke@435: virtual HeapWord* object_iterate_careful_m(MemRegion mr, duke@435: ObjectClosureCareful* cl); duke@435: duke@435: // Create and return a new dirty card to oop closure. Can be duke@435: // overriden to return the appropriate type of closure duke@435: // depending on the type of space in which the closure will duke@435: // operate. ResourceArea allocated. duke@435: virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, duke@435: CardTableModRefBS::PrecisionStyle precision, duke@435: HeapWord* boundary = NULL); duke@435: duke@435: // If "p" is in the space, returns the address of the start of the duke@435: // "block" that contains "p". We say "block" instead of "object" since duke@435: // some heaps may not pack objects densely; a chunk may either be an duke@435: // object or a non-object. If "p" is not in the space, return NULL. ysr@777: virtual HeapWord* block_start_const(const void* p) const = 0; ysr@777: ysr@777: // The non-const version may have benevolent side effects on the data ysr@777: // structure supporting these calls, possibly speeding up future calls. ysr@777: // The default implementation, however, is simply to call the const ysr@777: // version. ysr@777: inline virtual HeapWord* block_start(const void* p); duke@435: duke@435: // Requires "addr" to be the start of a chunk, and returns its size. duke@435: // "addr + size" is required to be the start of a new chunk, or the end duke@435: // of the active area of the heap. duke@435: virtual size_t block_size(const HeapWord* addr) const = 0; duke@435: duke@435: // Requires "addr" to be the start of a block, and returns "TRUE" iff duke@435: // the block is an object. duke@435: virtual bool block_is_obj(const HeapWord* addr) const = 0; duke@435: duke@435: // Requires "addr" to be the start of a block, and returns "TRUE" iff duke@435: // the block is an object and the object is alive. duke@435: virtual bool obj_is_alive(const HeapWord* addr) const; duke@435: duke@435: // Allocation (return NULL if full). Assumes the caller has established duke@435: // mutually exclusive access to the space. duke@435: virtual HeapWord* allocate(size_t word_size) = 0; duke@435: duke@435: // Allocation (return NULL if full). Enforces mutual exclusion internally. duke@435: virtual HeapWord* par_allocate(size_t word_size) = 0; duke@435: duke@435: // Returns true if this object has been allocated since a duke@435: // generation's "save_marks" call. duke@435: virtual bool obj_allocated_since_save_marks(const oop obj) const = 0; duke@435: duke@435: // Mark-sweep-compact support: all spaces can update pointers to objects duke@435: // moving as a part of compaction. duke@435: virtual void adjust_pointers(); duke@435: duke@435: // PrintHeapAtGC support duke@435: virtual void print() const; duke@435: virtual void print_on(outputStream* st) const; duke@435: virtual void print_short() const; duke@435: virtual void print_short_on(outputStream* st) const; duke@435: duke@435: duke@435: // Accessor for parallel sequential tasks. duke@435: SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } duke@435: duke@435: // IF "this" is a ContiguousSpace, return it, else return NULL. duke@435: virtual ContiguousSpace* toContiguousSpace() { duke@435: return NULL; duke@435: } duke@435: duke@435: // Debugging duke@435: virtual void verify(bool allow_dirty) const = 0; duke@435: }; duke@435: duke@435: // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an duke@435: // OopClosure to (the addresses of) all the ref-containing fields that could duke@435: // be modified by virtue of the given MemRegion being dirty. (Note that duke@435: // because of the imprecise nature of the write barrier, this may iterate duke@435: // over oops beyond the region.) duke@435: // This base type for dirty card to oop closures handles memory regions duke@435: // in non-contiguous spaces with no boundaries, and should be sub-classed duke@435: // to support other space types. See ContiguousDCTOC for a sub-class duke@435: // that works with ContiguousSpaces. duke@435: duke@435: class DirtyCardToOopClosure: public MemRegionClosureRO { duke@435: protected: duke@435: OopClosure* _cl; duke@435: Space* _sp; duke@435: CardTableModRefBS::PrecisionStyle _precision; duke@435: HeapWord* _boundary; // If non-NULL, process only non-NULL oops duke@435: // pointing below boundary. ysr@777: HeapWord* _min_done; // ObjHeadPreciseArray precision requires duke@435: // a downwards traversal; this is the duke@435: // lowest location already done (or, duke@435: // alternatively, the lowest address that duke@435: // shouldn't be done again. NULL means infinity.) duke@435: NOT_PRODUCT(HeapWord* _last_bottom;) ysr@777: NOT_PRODUCT(HeapWord* _last_explicit_min_done;) duke@435: duke@435: // Get the actual top of the area on which the closure will duke@435: // operate, given where the top is assumed to be (the end of the duke@435: // memory region passed to do_MemRegion) and where the object duke@435: // at the top is assumed to start. For example, an object may duke@435: // start at the top but actually extend past the assumed top, duke@435: // in which case the top becomes the end of the object. duke@435: virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); duke@435: duke@435: // Walk the given memory region from bottom to (actual) top duke@435: // looking for objects and applying the oop closure (_cl) to duke@435: // them. The base implementation of this treats the area as duke@435: // blocks, where a block may or may not be an object. Sub- duke@435: // classes should override this to provide more accurate duke@435: // or possibly more efficient walking. duke@435: virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); duke@435: duke@435: public: duke@435: DirtyCardToOopClosure(Space* sp, OopClosure* cl, duke@435: CardTableModRefBS::PrecisionStyle precision, duke@435: HeapWord* boundary) : duke@435: _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), duke@435: _min_done(NULL) { ysr@777: NOT_PRODUCT(_last_bottom = NULL); ysr@777: NOT_PRODUCT(_last_explicit_min_done = NULL); duke@435: } duke@435: duke@435: void do_MemRegion(MemRegion mr); duke@435: duke@435: void set_min_done(HeapWord* min_done) { duke@435: _min_done = min_done; ysr@777: NOT_PRODUCT(_last_explicit_min_done = _min_done); duke@435: } duke@435: #ifndef PRODUCT duke@435: void set_last_bottom(HeapWord* last_bottom) { duke@435: _last_bottom = last_bottom; duke@435: } duke@435: #endif duke@435: }; duke@435: duke@435: // A structure to represent a point at which objects are being copied duke@435: // during compaction. duke@435: class CompactPoint : public StackObj { duke@435: public: duke@435: Generation* gen; duke@435: CompactibleSpace* space; duke@435: HeapWord* threshold; duke@435: CompactPoint(Generation* _gen, CompactibleSpace* _space, duke@435: HeapWord* _threshold) : duke@435: gen(_gen), space(_space), threshold(_threshold) {} duke@435: }; duke@435: duke@435: duke@435: // A space that supports compaction operations. This is usually, but not duke@435: // necessarily, a space that is normally contiguous. But, for example, a duke@435: // free-list-based space whose normal collection is a mark-sweep without duke@435: // compaction could still support compaction in full GC's. duke@435: duke@435: class CompactibleSpace: public Space { duke@435: friend class VMStructs; duke@435: friend class CompactibleFreeListSpace; duke@435: friend class CompactingPermGenGen; duke@435: friend class CMSPermGenGen; duke@435: private: duke@435: HeapWord* _compaction_top; duke@435: CompactibleSpace* _next_compaction_space; duke@435: duke@435: public: ysr@782: CompactibleSpace() : ysr@782: _compaction_top(NULL), _next_compaction_space(NULL) {} ysr@782: jmasa@698: virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); tonyp@791: virtual void clear(bool mangle_space); duke@435: duke@435: // Used temporarily during a compaction phase to hold the value duke@435: // top should have when compaction is complete. duke@435: HeapWord* compaction_top() const { return _compaction_top; } duke@435: duke@435: void set_compaction_top(HeapWord* value) { duke@435: assert(value == NULL || (value >= bottom() && value <= end()), duke@435: "should point inside space"); duke@435: _compaction_top = value; duke@435: } duke@435: duke@435: // Perform operations on the space needed after a compaction duke@435: // has been performed. duke@435: virtual void reset_after_compaction() {} duke@435: duke@435: // Returns the next space (in the current generation) to be compacted in duke@435: // the global compaction order. Also is used to select the next duke@435: // space into which to compact. duke@435: duke@435: virtual CompactibleSpace* next_compaction_space() const { duke@435: return _next_compaction_space; duke@435: } duke@435: duke@435: void set_next_compaction_space(CompactibleSpace* csp) { duke@435: _next_compaction_space = csp; duke@435: } duke@435: duke@435: // MarkSweep support phase2 duke@435: duke@435: // Start the process of compaction of the current space: compute duke@435: // post-compaction addresses, and insert forwarding pointers. The fields duke@435: // "cp->gen" and "cp->compaction_space" are the generation and space into duke@435: // which we are currently compacting. This call updates "cp" as necessary, duke@435: // and leaves the "compaction_top" of the final value of duke@435: // "cp->compaction_space" up-to-date. Offset tables may be updated in duke@435: // this phase as if the final copy had occurred; if so, "cp->threshold" duke@435: // indicates when the next such action should be taken. duke@435: virtual void prepare_for_compaction(CompactPoint* cp); duke@435: // MarkSweep support phase3 duke@435: virtual void adjust_pointers(); duke@435: // MarkSweep support phase4 duke@435: virtual void compact(); duke@435: duke@435: // The maximum percentage of objects that can be dead in the compacted duke@435: // live part of a compacted space ("deadwood" support.) jcoomes@873: virtual size_t allowed_dead_ratio() const { return 0; }; duke@435: duke@435: // Some contiguous spaces may maintain some data structures that should duke@435: // be updated whenever an allocation crosses a boundary. This function duke@435: // returns the first such boundary. duke@435: // (The default implementation returns the end of the space, so the duke@435: // boundary is never crossed.) duke@435: virtual HeapWord* initialize_threshold() { return end(); } duke@435: duke@435: // "q" is an object of the given "size" that should be forwarded; duke@435: // "cp" names the generation ("gen") and containing "this" (which must duke@435: // also equal "cp->space"). "compact_top" is where in "this" the duke@435: // next object should be forwarded to. If there is room in "this" for duke@435: // the object, insert an appropriate forwarding pointer in "q". duke@435: // If not, go to the next compaction space (there must duke@435: // be one, since compaction must succeed -- we go to the first space of duke@435: // the previous generation if necessary, updating "cp"), reset compact_top duke@435: // and then forward. In either case, returns the new value of "compact_top". duke@435: // If the forwarding crosses "cp->threshold", invokes the "cross_threhold" duke@435: // function of the then-current compaction space, and updates "cp->threshold duke@435: // accordingly". duke@435: virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, duke@435: HeapWord* compact_top); duke@435: duke@435: // Return a size with adjusments as required of the space. duke@435: virtual size_t adjust_object_size_v(size_t size) const { return size; } duke@435: duke@435: protected: duke@435: // Used during compaction. duke@435: HeapWord* _first_dead; duke@435: HeapWord* _end_of_live; duke@435: duke@435: // Minimum size of a free block. duke@435: virtual size_t minimum_free_block_size() const = 0; duke@435: duke@435: // This the function is invoked when an allocation of an object covering duke@435: // "start" to "end occurs crosses the threshold; returns the next duke@435: // threshold. (The default implementation does nothing.) duke@435: virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { duke@435: return end(); duke@435: } duke@435: duke@435: // Requires "allowed_deadspace_words > 0", that "q" is the start of a duke@435: // free block of the given "word_len", and that "q", were it an object, duke@435: // would not move if forwared. If the size allows, fill the free duke@435: // block with an object, to prevent excessive compaction. Returns "true" duke@435: // iff the free region was made deadspace, and modifies duke@435: // "allowed_deadspace_words" to reflect the number of available deadspace duke@435: // words remaining after this operation. duke@435: bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, duke@435: size_t word_len); duke@435: }; duke@435: duke@435: #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ duke@435: /* Compute the new addresses for the live objects and store it in the mark \ duke@435: * Used by universe::mark_sweep_phase2() \ duke@435: */ \ duke@435: HeapWord* compact_top; /* This is where we are currently compacting to. */ \ duke@435: \ duke@435: /* We're sure to be here before any objects are compacted into this \ duke@435: * space, so this is a good time to initialize this: \ duke@435: */ \ duke@435: set_compaction_top(bottom()); \ duke@435: \ duke@435: if (cp->space == NULL) { \ duke@435: assert(cp->gen != NULL, "need a generation"); \ duke@435: assert(cp->threshold == NULL, "just checking"); \ duke@435: assert(cp->gen->first_compaction_space() == this, "just checking"); \ duke@435: cp->space = cp->gen->first_compaction_space(); \ duke@435: compact_top = cp->space->bottom(); \ duke@435: cp->space->set_compaction_top(compact_top); \ duke@435: cp->threshold = cp->space->initialize_threshold(); \ duke@435: } else { \ duke@435: compact_top = cp->space->compaction_top(); \ duke@435: } \ duke@435: \ duke@435: /* We allow some amount of garbage towards the bottom of the space, so \ duke@435: * we don't start compacting before there is a significant gain to be made.\ duke@435: * Occasionally, we want to ensure a full compaction, which is determined \ duke@435: * by the MarkSweepAlwaysCompactCount parameter. \ duke@435: */ \ duke@435: int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\ brutisso@3290: bool skip_dead = (MarkSweepAlwaysCompactCount < 1) \ brutisso@3290: ||((invocations % MarkSweepAlwaysCompactCount) != 0); \ duke@435: \ duke@435: size_t allowed_deadspace = 0; \ duke@435: if (skip_dead) { \ jcoomes@873: const size_t ratio = allowed_dead_ratio(); \ duke@435: allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ duke@435: } \ duke@435: \ duke@435: HeapWord* q = bottom(); \ duke@435: HeapWord* t = scan_limit(); \ duke@435: \ duke@435: HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ duke@435: live object. */ \ duke@435: HeapWord* first_dead = end();/* The first dead object. */ \ duke@435: LiveRange* liveRange = NULL; /* The current live range, recorded in the \ duke@435: first header of preceding free area. */ \ duke@435: _first_dead = first_dead; \ duke@435: \ duke@435: const intx interval = PrefetchScanIntervalInBytes; \ duke@435: \ duke@435: while (q < t) { \ duke@435: assert(!block_is_obj(q) || \ duke@435: oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ duke@435: oop(q)->mark()->has_bias_pattern(), \ duke@435: "these are the only valid states during a mark sweep"); \ duke@435: if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ duke@435: /* prefetch beyond q */ \ duke@435: Prefetch::write(q, interval); \ duke@435: /* size_t size = oop(q)->size(); changing this for cms for perm gen */\ ysr@777: size_t size = block_size(q); \ duke@435: compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ duke@435: q += size; \ duke@435: end_of_live = q; \ duke@435: } else { \ duke@435: /* run over all the contiguous dead objects */ \ duke@435: HeapWord* end = q; \ duke@435: do { \ duke@435: /* prefetch beyond end */ \ duke@435: Prefetch::write(end, interval); \ duke@435: end += block_size(end); \ duke@435: } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ duke@435: \ duke@435: /* see if we might want to pretend this object is alive so that \ duke@435: * we don't have to compact quite as often. \ duke@435: */ \ duke@435: if (allowed_deadspace > 0 && q == compact_top) { \ duke@435: size_t sz = pointer_delta(end, q); \ duke@435: if (insert_deadspace(allowed_deadspace, q, sz)) { \ duke@435: compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ duke@435: q = end; \ duke@435: end_of_live = end; \ duke@435: continue; \ duke@435: } \ duke@435: } \ duke@435: \ duke@435: /* otherwise, it really is a free region. */ \ duke@435: \ duke@435: /* for the previous LiveRange, record the end of the live objects. */ \ duke@435: if (liveRange) { \ duke@435: liveRange->set_end(q); \ duke@435: } \ duke@435: \ duke@435: /* record the current LiveRange object. \ duke@435: * liveRange->start() is overlaid on the mark word. \ duke@435: */ \ duke@435: liveRange = (LiveRange*)q; \ duke@435: liveRange->set_start(end); \ duke@435: liveRange->set_end(end); \ duke@435: \ duke@435: /* see if this is the first dead region. */ \ duke@435: if (q < first_dead) { \ duke@435: first_dead = q; \ duke@435: } \ duke@435: \ duke@435: /* move on to the next object */ \ duke@435: q = end; \ duke@435: } \ duke@435: } \ duke@435: \ duke@435: assert(q == t, "just checking"); \ duke@435: if (liveRange != NULL) { \ duke@435: liveRange->set_end(q); \ duke@435: } \ duke@435: _end_of_live = end_of_live; \ duke@435: if (end_of_live < first_dead) { \ duke@435: first_dead = end_of_live; \ duke@435: } \ duke@435: _first_dead = first_dead; \ duke@435: \ duke@435: /* save the compaction_top of the compaction space. */ \ duke@435: cp->space->set_compaction_top(compact_top); \ duke@435: } duke@435: ysr@777: #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ ysr@777: /* adjust all the interior pointers to point at the new locations of objects \ ysr@777: * Used by MarkSweep::mark_sweep_phase3() */ \ duke@435: \ ysr@777: HeapWord* q = bottom(); \ ysr@777: HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ duke@435: \ ysr@777: assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ duke@435: \ ysr@777: if (q < t && _first_dead > q && \ duke@435: !oop(q)->is_gc_marked()) { \ duke@435: /* we have a chunk of the space which hasn't moved and we've \ duke@435: * reinitialized the mark word during the previous pass, so we can't \ ysr@777: * use is_gc_marked for the traversal. */ \ duke@435: HeapWord* end = _first_dead; \ duke@435: \ ysr@777: while (q < end) { \ ysr@777: /* I originally tried to conjoin "block_start(q) == q" to the \ ysr@777: * assertion below, but that doesn't work, because you can't \ ysr@777: * accurately traverse previous objects to get to the current one \ ysr@777: * after their pointers (including pointers into permGen) have been \ ysr@777: * updated, until the actual compaction is done. dld, 4/00 */ \ ysr@777: assert(block_is_obj(q), \ ysr@777: "should be at block boundaries, and should be looking at objs"); \ duke@435: \ coleenp@548: VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ duke@435: \ ysr@777: /* point all the oops to the new location */ \ ysr@777: size_t size = oop(q)->adjust_pointers(); \ ysr@777: size = adjust_obj_size(size); \ duke@435: \ coleenp@548: VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ ysr@777: \ coleenp@548: VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ ysr@777: \ coleenp@548: q += size; \ ysr@777: } \ duke@435: \ ysr@777: if (_first_dead == t) { \ ysr@777: q = t; \ ysr@777: } else { \ ysr@777: /* $$$ This is funky. Using this to read the previously written \ ysr@777: * LiveRange. See also use below. */ \ duke@435: q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ ysr@777: } \ ysr@777: } \ duke@435: \ duke@435: const intx interval = PrefetchScanIntervalInBytes; \ duke@435: \ ysr@777: debug_only(HeapWord* prev_q = NULL); \ ysr@777: while (q < t) { \ ysr@777: /* prefetch beyond q */ \ duke@435: Prefetch::write(q, interval); \ ysr@777: if (oop(q)->is_gc_marked()) { \ ysr@777: /* q is alive */ \ coleenp@548: VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ ysr@777: /* point all the oops to the new location */ \ ysr@777: size_t size = oop(q)->adjust_pointers(); \ ysr@777: size = adjust_obj_size(size); \ ysr@777: VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ coleenp@548: VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ ysr@777: debug_only(prev_q = q); \ duke@435: q += size; \ tonyp@791: } else { \ tonyp@791: /* q is not a live object, so its mark should point at the next \ tonyp@791: * live object */ \ tonyp@791: debug_only(prev_q = q); \ tonyp@791: q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ tonyp@791: assert(q > prev_q, "we should be moving forward through memory"); \ tonyp@791: } \ tonyp@791: } \ duke@435: \ tonyp@791: assert(q == t, "just checking"); \ duke@435: } duke@435: tonyp@791: #define SCAN_AND_COMPACT(obj_size) { \ duke@435: /* Copy all live objects to their new location \ tonyp@791: * Used by MarkSweep::mark_sweep_phase4() */ \ duke@435: \ tonyp@791: HeapWord* q = bottom(); \ tonyp@791: HeapWord* const t = _end_of_live; \ tonyp@791: debug_only(HeapWord* prev_q = NULL); \ duke@435: \ tonyp@791: if (q < t && _first_dead > q && \ duke@435: !oop(q)->is_gc_marked()) { \ tonyp@791: debug_only( \ coleenp@548: /* we have a chunk of the space which hasn't moved and we've reinitialized \ coleenp@548: * the mark word during the previous pass, so we can't use is_gc_marked for \ coleenp@548: * the traversal. */ \ tonyp@791: HeapWord* const end = _first_dead; \ tonyp@791: \ tonyp@791: while (q < end) { \ coleenp@548: size_t size = obj_size(q); \ coleenp@548: assert(!oop(q)->is_gc_marked(), \ coleenp@548: "should be unmarked (special dense prefix handling)"); \ tonyp@791: VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \ tonyp@791: debug_only(prev_q = q); \ coleenp@548: q += size; \ tonyp@791: } \ tonyp@791: ) /* debug_only */ \ duke@435: \ tonyp@791: if (_first_dead == t) { \ tonyp@791: q = t; \ tonyp@791: } else { \ tonyp@791: /* $$$ Funky */ \ tonyp@791: q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ tonyp@791: } \ tonyp@791: } \ tonyp@791: \ tonyp@791: const intx scan_interval = PrefetchScanIntervalInBytes; \ tonyp@791: const intx copy_interval = PrefetchCopyIntervalInBytes; \ tonyp@791: while (q < t) { \ tonyp@791: if (!oop(q)->is_gc_marked()) { \ tonyp@791: /* mark is pointer to next marked oop */ \ tonyp@791: debug_only(prev_q = q); \ tonyp@791: q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ tonyp@791: assert(q > prev_q, "we should be moving forward through memory"); \ tonyp@791: } else { \ tonyp@791: /* prefetch beyond q */ \ duke@435: Prefetch::read(q, scan_interval); \ duke@435: \ duke@435: /* size and destination */ \ duke@435: size_t size = obj_size(q); \ duke@435: HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ duke@435: \ tonyp@791: /* prefetch beyond compaction_top */ \ duke@435: Prefetch::write(compaction_top, copy_interval); \ duke@435: \ tonyp@791: /* copy object and reinit its mark */ \ coleenp@548: VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \ coleenp@548: compaction_top)); \ tonyp@791: assert(q != compaction_top, "everything in this pass should be moving"); \ tonyp@791: Copy::aligned_conjoint_words(q, compaction_top, size); \ tonyp@791: oop(compaction_top)->init_mark(); \ tonyp@791: assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ duke@435: \ tonyp@791: debug_only(prev_q = q); \ duke@435: q += size; \ tonyp@791: } \ tonyp@791: } \ duke@435: \ ysr@777: /* Let's remember if we were empty before we did the compaction. */ \ ysr@777: bool was_empty = used_region().is_empty(); \ duke@435: /* Reset space after compaction is complete */ \ tonyp@791: reset_after_compaction(); \ duke@435: /* We do this clear, below, since it has overloaded meanings for some */ \ duke@435: /* space subtypes. For example, OffsetTableContigSpace's that were */ \ duke@435: /* compacted into will have had their offset table thresholds updated */ \ duke@435: /* continuously, but those that weren't need to have their thresholds */ \ duke@435: /* re-initialized. Also mangles unused area for debugging. */ \ ysr@777: if (used_region().is_empty()) { \ tonyp@791: if (!was_empty) clear(SpaceDecorator::Mangle); \ duke@435: } else { \ duke@435: if (ZapUnusedHeapArea) mangle_unused_area(); \ duke@435: } \ duke@435: } duke@435: jmasa@698: class GenSpaceMangler; jmasa@698: duke@435: // A space in which the free area is contiguous. It therefore supports duke@435: // faster allocation, and compaction. duke@435: class ContiguousSpace: public CompactibleSpace { duke@435: friend class OneContigSpaceCardGeneration; duke@435: friend class VMStructs; duke@435: protected: duke@435: HeapWord* _top; duke@435: HeapWord* _concurrent_iteration_safe_limit; jmasa@698: // A helper for mangling the unused area of the space in debug builds. jmasa@698: GenSpaceMangler* _mangler; jmasa@698: jmasa@698: GenSpaceMangler* mangler() { return _mangler; } duke@435: duke@435: // Allocation helpers (return NULL if full). duke@435: inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); duke@435: inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); duke@435: duke@435: public: jmasa@698: ContiguousSpace(); jmasa@698: ~ContiguousSpace(); jmasa@698: jmasa@698: virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); tonyp@791: virtual void clear(bool mangle_space); duke@435: duke@435: // Accessors duke@435: HeapWord* top() const { return _top; } duke@435: void set_top(HeapWord* value) { _top = value; } duke@435: ysr@777: virtual void set_saved_mark() { _saved_mark_word = top(); } ysr@777: void reset_saved_mark() { _saved_mark_word = bottom(); } duke@435: duke@435: WaterMark bottom_mark() { return WaterMark(this, bottom()); } duke@435: WaterMark top_mark() { return WaterMark(this, top()); } duke@435: WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } duke@435: bool saved_mark_at_top() const { return saved_mark_word() == top(); } duke@435: jmasa@698: // In debug mode mangle (write it with a particular bit jmasa@698: // pattern) the unused part of a space. jmasa@698: jmasa@698: // Used to save the an address in a space for later use during mangling. jmasa@698: void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; jmasa@698: // Used to save the space's current top for later use during mangling. jmasa@698: void set_top_for_allocations() PRODUCT_RETURN; jmasa@698: jmasa@698: // Mangle regions in the space from the current top up to the jmasa@698: // previously mangled part of the space. jmasa@698: void mangle_unused_area() PRODUCT_RETURN; jmasa@698: // Mangle [top, end) jmasa@698: void mangle_unused_area_complete() PRODUCT_RETURN; jmasa@698: // Mangle the given MemRegion. jmasa@698: void mangle_region(MemRegion mr) PRODUCT_RETURN; jmasa@698: jmasa@698: // Do some sparse checking on the area that should have been mangled. jmasa@698: void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; jmasa@698: // Check the complete area that should have been mangled. jmasa@698: // This code may be NULL depending on the macro DEBUG_MANGLING. jmasa@698: void check_mangled_unused_area_complete() PRODUCT_RETURN; duke@435: duke@435: // Size computations: sizes in bytes. duke@435: size_t capacity() const { return byte_size(bottom(), end()); } duke@435: size_t used() const { return byte_size(bottom(), top()); } duke@435: size_t free() const { return byte_size(top(), end()); } duke@435: duke@435: // Override from space. duke@435: bool is_in(const void* p) const; duke@435: duke@435: virtual bool is_free_block(const HeapWord* p) const; duke@435: duke@435: // In a contiguous space we have a more obvious bound on what parts duke@435: // contain objects. duke@435: MemRegion used_region() const { return MemRegion(bottom(), top()); } duke@435: duke@435: MemRegion used_region_at_save_marks() const { duke@435: return MemRegion(bottom(), saved_mark_word()); duke@435: } duke@435: duke@435: // Allocation (return NULL if full) duke@435: virtual HeapWord* allocate(size_t word_size); duke@435: virtual HeapWord* par_allocate(size_t word_size); duke@435: duke@435: virtual bool obj_allocated_since_save_marks(const oop obj) const { duke@435: return (HeapWord*)obj >= saved_mark_word(); duke@435: } duke@435: duke@435: // Iteration duke@435: void oop_iterate(OopClosure* cl); duke@435: void oop_iterate(MemRegion mr, OopClosure* cl); duke@435: void object_iterate(ObjectClosure* blk); jmasa@952: // For contiguous spaces this method will iterate safely over objects jmasa@952: // in the space (i.e., between bottom and top) when at a safepoint. jmasa@952: void safe_object_iterate(ObjectClosure* blk); duke@435: void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); duke@435: // iterates on objects up to the safe limit duke@435: HeapWord* object_iterate_careful(ObjectClosureCareful* cl); duke@435: inline HeapWord* concurrent_iteration_safe_limit(); duke@435: // changes the safe limit, all objects from bottom() to the new duke@435: // limit should be properly initialized duke@435: inline void set_concurrent_iteration_safe_limit(HeapWord* new_limit); duke@435: duke@435: #ifndef SERIALGC duke@435: // In support of parallel oop_iterate. duke@435: #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ duke@435: void par_oop_iterate(MemRegion mr, OopClosureType* blk); duke@435: duke@435: ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) duke@435: #undef ContigSpace_PAR_OOP_ITERATE_DECL duke@435: #endif // SERIALGC duke@435: duke@435: // Compaction support duke@435: virtual void reset_after_compaction() { duke@435: assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); duke@435: set_top(compaction_top()); duke@435: // set new iteration safe limit duke@435: set_concurrent_iteration_safe_limit(compaction_top()); duke@435: } duke@435: virtual size_t minimum_free_block_size() const { return 0; } duke@435: duke@435: // Override. duke@435: DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, duke@435: CardTableModRefBS::PrecisionStyle precision, duke@435: HeapWord* boundary = NULL); duke@435: duke@435: // Apply "blk->do_oop" to the addresses of all reference fields in objects duke@435: // starting with the _saved_mark_word, which was noted during a generation's duke@435: // save_marks and is required to denote the head of an object. duke@435: // Fields in objects allocated by applications of the closure duke@435: // *are* included in the iteration. duke@435: // Updates _saved_mark_word to point to just after the last object duke@435: // iterated over. duke@435: #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ duke@435: void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); duke@435: duke@435: ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) duke@435: #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL duke@435: duke@435: // Same as object_iterate, but starting from "mark", which is required duke@435: // to denote the start of an object. Objects allocated by duke@435: // applications of the closure *are* included in the iteration. duke@435: virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); duke@435: duke@435: // Very inefficient implementation. ysr@777: virtual HeapWord* block_start_const(const void* p) const; duke@435: size_t block_size(const HeapWord* p) const; duke@435: // If a block is in the allocated area, it is an object. duke@435: bool block_is_obj(const HeapWord* p) const { return p < top(); } duke@435: duke@435: // Addresses for inlined allocation duke@435: HeapWord** top_addr() { return &_top; } duke@435: HeapWord** end_addr() { return &_end; } duke@435: duke@435: // Overrides for more efficient compaction support. duke@435: void prepare_for_compaction(CompactPoint* cp); duke@435: duke@435: // PrintHeapAtGC support. duke@435: virtual void print_on(outputStream* st) const; duke@435: duke@435: // Checked dynamic downcasts. duke@435: virtual ContiguousSpace* toContiguousSpace() { duke@435: return this; duke@435: } duke@435: duke@435: // Debugging duke@435: virtual void verify(bool allow_dirty) const; duke@435: duke@435: // Used to increase collection frequency. "factor" of 0 means entire duke@435: // space. duke@435: void allocate_temporary_filler(int factor); duke@435: duke@435: }; duke@435: duke@435: duke@435: // A dirty card to oop closure that does filtering. duke@435: // It knows how to filter out objects that are outside of the _boundary. duke@435: class Filtering_DCTOC : public DirtyCardToOopClosure { duke@435: protected: duke@435: // Override. duke@435: void walk_mem_region(MemRegion mr, duke@435: HeapWord* bottom, HeapWord* top); duke@435: duke@435: // Walk the given memory region, from bottom to top, applying duke@435: // the given oop closure to (possibly) all objects found. The duke@435: // given oop closure may or may not be the same as the oop duke@435: // closure with which this closure was created, as it may duke@435: // be a filtering closure which makes use of the _boundary. duke@435: // We offer two signatures, so the FilteringClosure static type is duke@435: // apparent. duke@435: virtual void walk_mem_region_with_cl(MemRegion mr, duke@435: HeapWord* bottom, HeapWord* top, duke@435: OopClosure* cl) = 0; duke@435: virtual void walk_mem_region_with_cl(MemRegion mr, duke@435: HeapWord* bottom, HeapWord* top, duke@435: FilteringClosure* cl) = 0; duke@435: duke@435: public: duke@435: Filtering_DCTOC(Space* sp, OopClosure* cl, duke@435: CardTableModRefBS::PrecisionStyle precision, duke@435: HeapWord* boundary) : duke@435: DirtyCardToOopClosure(sp, cl, precision, boundary) {} duke@435: }; duke@435: duke@435: // A dirty card to oop closure for contiguous spaces duke@435: // (ContiguousSpace and sub-classes). duke@435: // It is a FilteringClosure, as defined above, and it knows: duke@435: // duke@435: // 1. That the actual top of any area in a memory region duke@435: // contained by the space is bounded by the end of the contiguous duke@435: // region of the space. duke@435: // 2. That the space is really made up of objects and not just duke@435: // blocks. duke@435: duke@435: class ContiguousSpaceDCTOC : public Filtering_DCTOC { duke@435: protected: duke@435: // Overrides. duke@435: HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); duke@435: duke@435: virtual void walk_mem_region_with_cl(MemRegion mr, duke@435: HeapWord* bottom, HeapWord* top, duke@435: OopClosure* cl); duke@435: virtual void walk_mem_region_with_cl(MemRegion mr, duke@435: HeapWord* bottom, HeapWord* top, duke@435: FilteringClosure* cl); duke@435: duke@435: public: duke@435: ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl, duke@435: CardTableModRefBS::PrecisionStyle precision, duke@435: HeapWord* boundary) : duke@435: Filtering_DCTOC(sp, cl, precision, boundary) duke@435: {} duke@435: }; duke@435: duke@435: duke@435: // Class EdenSpace describes eden-space in new generation. duke@435: duke@435: class DefNewGeneration; duke@435: duke@435: class EdenSpace : public ContiguousSpace { duke@435: friend class VMStructs; duke@435: private: duke@435: DefNewGeneration* _gen; duke@435: duke@435: // _soft_end is used as a soft limit on allocation. As soft limits are duke@435: // reached, the slow-path allocation code can invoke other actions and then duke@435: // adjust _soft_end up to a new soft limit or to end(). duke@435: HeapWord* _soft_end; duke@435: duke@435: public: ysr@782: EdenSpace(DefNewGeneration* gen) : ysr@782: _gen(gen), _soft_end(NULL) {} duke@435: duke@435: // Get/set just the 'soft' limit. duke@435: HeapWord* soft_end() { return _soft_end; } duke@435: HeapWord** soft_end_addr() { return &_soft_end; } duke@435: void set_soft_end(HeapWord* value) { _soft_end = value; } duke@435: duke@435: // Override. jmasa@698: void clear(bool mangle_space); duke@435: duke@435: // Set both the 'hard' and 'soft' limits (_end and _soft_end). duke@435: void set_end(HeapWord* value) { duke@435: set_soft_end(value); duke@435: ContiguousSpace::set_end(value); duke@435: } duke@435: duke@435: // Allocation (return NULL if full) duke@435: HeapWord* allocate(size_t word_size); duke@435: HeapWord* par_allocate(size_t word_size); duke@435: }; duke@435: duke@435: // Class ConcEdenSpace extends EdenSpace for the sake of safe duke@435: // allocation while soft-end is being modified concurrently duke@435: duke@435: class ConcEdenSpace : public EdenSpace { duke@435: public: duke@435: ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { } duke@435: duke@435: // Allocation (return NULL if full) duke@435: HeapWord* par_allocate(size_t word_size); duke@435: }; duke@435: duke@435: duke@435: // A ContigSpace that Supports an efficient "block_start" operation via duke@435: // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with duke@435: // other spaces.) This is the abstract base class for old generation duke@435: // (tenured, perm) spaces. duke@435: duke@435: class OffsetTableContigSpace: public ContiguousSpace { duke@435: friend class VMStructs; duke@435: protected: duke@435: BlockOffsetArrayContigSpace _offsets; duke@435: Mutex _par_alloc_lock; duke@435: duke@435: public: duke@435: // Constructor duke@435: OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, duke@435: MemRegion mr); duke@435: duke@435: void set_bottom(HeapWord* value); duke@435: void set_end(HeapWord* value); duke@435: jmasa@698: void clear(bool mangle_space); duke@435: ysr@777: inline HeapWord* block_start_const(const void* p) const; duke@435: duke@435: // Add offset table update. duke@435: virtual inline HeapWord* allocate(size_t word_size); duke@435: inline HeapWord* par_allocate(size_t word_size); duke@435: duke@435: // MarkSweep support phase3 duke@435: virtual HeapWord* initialize_threshold(); duke@435: virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); duke@435: duke@435: virtual void print_on(outputStream* st) const; duke@435: duke@435: // Debugging duke@435: void verify(bool allow_dirty) const; duke@435: duke@435: // Shared space support duke@435: void serialize_block_offset_array_offsets(SerializeOopClosure* soc); duke@435: }; duke@435: duke@435: duke@435: // Class TenuredSpace is used by TenuredGeneration duke@435: duke@435: class TenuredSpace: public OffsetTableContigSpace { duke@435: friend class VMStructs; duke@435: protected: duke@435: // Mark sweep support jcoomes@873: size_t allowed_dead_ratio() const; duke@435: public: duke@435: // Constructor duke@435: TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, duke@435: MemRegion mr) : duke@435: OffsetTableContigSpace(sharedOffsetArray, mr) {} duke@435: }; duke@435: duke@435: duke@435: // Class ContigPermSpace is used by CompactingPermGen duke@435: duke@435: class ContigPermSpace: public OffsetTableContigSpace { duke@435: friend class VMStructs; duke@435: protected: duke@435: // Mark sweep support jcoomes@873: size_t allowed_dead_ratio() const; duke@435: public: duke@435: // Constructor duke@435: ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : duke@435: OffsetTableContigSpace(sharedOffsetArray, mr) {} duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_MEMORY_SPACE_HPP