Mon, 23 Jun 2008 16:49:37 -0700
6718086: CMS assert: _concurrent_iteration_safe_limit update missed
Summary: Initialize the field correctly in ContiguousSpace's constructor and initialize() methods, using the latter for the survivor spaces upon initial construction or a subsequent resizing of the young generation. Add some missing Space sub-class constructors.
Reviewed-by: apetrusenko
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | // A space is an abstraction for the "storage units" backing |
duke@435 | 26 | // up the generation abstraction. It includes specific |
duke@435 | 27 | // implementations for keeping track of free and used space, |
duke@435 | 28 | // for iterating over objects and free blocks, etc. |
duke@435 | 29 | |
duke@435 | 30 | // Here's the Space hierarchy: |
duke@435 | 31 | // |
duke@435 | 32 | // - Space -- an asbtract base class describing a heap area |
duke@435 | 33 | // - CompactibleSpace -- a space supporting compaction |
duke@435 | 34 | // - CompactibleFreeListSpace -- (used for CMS generation) |
duke@435 | 35 | // - ContiguousSpace -- a compactible space in which all free space |
duke@435 | 36 | // is contiguous |
duke@435 | 37 | // - EdenSpace -- contiguous space used as nursery |
duke@435 | 38 | // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation |
duke@435 | 39 | // - OffsetTableContigSpace -- contiguous space with a block offset array |
duke@435 | 40 | // that allows "fast" block_start calls |
duke@435 | 41 | // - TenuredSpace -- (used for TenuredGeneration) |
duke@435 | 42 | // - ContigPermSpace -- an offset table contiguous space for perm gen |
duke@435 | 43 | |
duke@435 | 44 | // Forward decls. |
duke@435 | 45 | class Space; |
duke@435 | 46 | class BlockOffsetArray; |
duke@435 | 47 | class BlockOffsetArrayContigSpace; |
duke@435 | 48 | class Generation; |
duke@435 | 49 | class CompactibleSpace; |
duke@435 | 50 | class BlockOffsetTable; |
duke@435 | 51 | class GenRemSet; |
duke@435 | 52 | class CardTableRS; |
duke@435 | 53 | class DirtyCardToOopClosure; |
duke@435 | 54 | |
duke@435 | 55 | // An oop closure that is circumscribed by a filtering memory region. |
coleenp@548 | 56 | class SpaceMemRegionOopsIterClosure: public OopClosure { |
coleenp@548 | 57 | private: |
coleenp@548 | 58 | OopClosure* _cl; |
coleenp@548 | 59 | MemRegion _mr; |
coleenp@548 | 60 | protected: |
coleenp@548 | 61 | template <class T> void do_oop_work(T* p) { |
coleenp@548 | 62 | if (_mr.contains(p)) { |
coleenp@548 | 63 | _cl->do_oop(p); |
duke@435 | 64 | } |
duke@435 | 65 | } |
coleenp@548 | 66 | public: |
coleenp@548 | 67 | SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr): |
coleenp@548 | 68 | _cl(cl), _mr(mr) {} |
coleenp@548 | 69 | virtual void do_oop(oop* p); |
coleenp@548 | 70 | virtual void do_oop(narrowOop* p); |
duke@435 | 71 | }; |
duke@435 | 72 | |
duke@435 | 73 | // A Space describes a heap area. Class Space is an abstract |
duke@435 | 74 | // base class. |
duke@435 | 75 | // |
duke@435 | 76 | // Space supports allocation, size computation and GC support is provided. |
duke@435 | 77 | // |
duke@435 | 78 | // Invariant: bottom() and end() are on page_size boundaries and |
duke@435 | 79 | // bottom() <= top() <= end() |
duke@435 | 80 | // top() is inclusive and end() is exclusive. |
duke@435 | 81 | |
duke@435 | 82 | class Space: public CHeapObj { |
duke@435 | 83 | friend class VMStructs; |
duke@435 | 84 | protected: |
duke@435 | 85 | HeapWord* _bottom; |
duke@435 | 86 | HeapWord* _end; |
duke@435 | 87 | |
duke@435 | 88 | // Used in support of save_marks() |
duke@435 | 89 | HeapWord* _saved_mark_word; |
duke@435 | 90 | |
duke@435 | 91 | MemRegionClosure* _preconsumptionDirtyCardClosure; |
duke@435 | 92 | |
duke@435 | 93 | // A sequential tasks done structure. This supports |
duke@435 | 94 | // parallel GC, where we have threads dynamically |
duke@435 | 95 | // claiming sub-tasks from a larger parallel task. |
duke@435 | 96 | SequentialSubTasksDone _par_seq_tasks; |
duke@435 | 97 | |
duke@435 | 98 | Space(): |
duke@435 | 99 | _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { } |
duke@435 | 100 | |
duke@435 | 101 | public: |
duke@435 | 102 | // Accessors |
duke@435 | 103 | HeapWord* bottom() const { return _bottom; } |
duke@435 | 104 | HeapWord* end() const { return _end; } |
duke@435 | 105 | virtual void set_bottom(HeapWord* value) { _bottom = value; } |
duke@435 | 106 | virtual void set_end(HeapWord* value) { _end = value; } |
duke@435 | 107 | |
ysr@777 | 108 | virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } |
duke@435 | 109 | void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } |
duke@435 | 110 | |
duke@435 | 111 | MemRegionClosure* preconsumptionDirtyCardClosure() const { |
duke@435 | 112 | return _preconsumptionDirtyCardClosure; |
duke@435 | 113 | } |
duke@435 | 114 | void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { |
duke@435 | 115 | _preconsumptionDirtyCardClosure = cl; |
duke@435 | 116 | } |
duke@435 | 117 | |
duke@435 | 118 | // Returns a subregion of the space containing all the objects in |
duke@435 | 119 | // the space. |
duke@435 | 120 | virtual MemRegion used_region() const { return MemRegion(bottom(), end()); } |
duke@435 | 121 | |
duke@435 | 122 | // Returns a region that is guaranteed to contain (at least) all objects |
duke@435 | 123 | // allocated at the time of the last call to "save_marks". If the space |
duke@435 | 124 | // initializes its DirtyCardToOopClosure's specifying the "contig" option |
duke@435 | 125 | // (that is, if the space is contiguous), then this region must contain only |
duke@435 | 126 | // such objects: the memregion will be from the bottom of the region to the |
duke@435 | 127 | // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of |
duke@435 | 128 | // the space must distiguish between objects in the region allocated before |
duke@435 | 129 | // and after the call to save marks. |
duke@435 | 130 | virtual MemRegion used_region_at_save_marks() const { |
duke@435 | 131 | return MemRegion(bottom(), saved_mark_word()); |
duke@435 | 132 | } |
duke@435 | 133 | |
ysr@777 | 134 | // Initialization. |
ysr@777 | 135 | // "initialize" should be called once on a space, before it is used for |
ysr@777 | 136 | // any purpose. The "mr" arguments gives the bounds of the space, and |
ysr@777 | 137 | // the "clear_space" argument should be true unless the memory in "mr" is |
ysr@777 | 138 | // known to be zeroed. |
duke@435 | 139 | virtual void initialize(MemRegion mr, bool clear_space); |
ysr@777 | 140 | |
ysr@777 | 141 | // Sets the bounds (bottom and end) of the current space to those of "mr." |
ysr@777 | 142 | void set_bounds(MemRegion mr); |
ysr@777 | 143 | |
ysr@777 | 144 | // The "clear" method must be called on a region that may have |
ysr@777 | 145 | // had allocation performed in it, but is now to be considered empty. |
duke@435 | 146 | virtual void clear(); |
duke@435 | 147 | |
duke@435 | 148 | // For detecting GC bugs. Should only be called at GC boundaries, since |
duke@435 | 149 | // some unused space may be used as scratch space during GC's. |
duke@435 | 150 | // Default implementation does nothing. We also call this when expanding |
duke@435 | 151 | // a space to satisfy an allocation request. See bug #4668531 |
duke@435 | 152 | virtual void mangle_unused_area() {} |
duke@435 | 153 | virtual void mangle_region(MemRegion mr) {} |
duke@435 | 154 | |
duke@435 | 155 | // Testers |
duke@435 | 156 | bool is_empty() const { return used() == 0; } |
duke@435 | 157 | bool not_empty() const { return used() > 0; } |
duke@435 | 158 | |
duke@435 | 159 | // Returns true iff the given the space contains the |
duke@435 | 160 | // given address as part of an allocated object. For |
duke@435 | 161 | // ceratin kinds of spaces, this might be a potentially |
duke@435 | 162 | // expensive operation. To prevent performance problems |
duke@435 | 163 | // on account of its inadvertent use in product jvm's, |
duke@435 | 164 | // we restrict its use to assertion checks only. |
duke@435 | 165 | virtual bool is_in(const void* p) const; |
duke@435 | 166 | |
duke@435 | 167 | // Returns true iff the given reserved memory of the space contains the |
duke@435 | 168 | // given address. |
duke@435 | 169 | bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } |
duke@435 | 170 | |
duke@435 | 171 | // Returns true iff the given block is not allocated. |
duke@435 | 172 | virtual bool is_free_block(const HeapWord* p) const = 0; |
duke@435 | 173 | |
duke@435 | 174 | // Test whether p is double-aligned |
duke@435 | 175 | static bool is_aligned(void* p) { |
duke@435 | 176 | return ((intptr_t)p & (sizeof(double)-1)) == 0; |
duke@435 | 177 | } |
duke@435 | 178 | |
duke@435 | 179 | // Size computations. Sizes are in bytes. |
duke@435 | 180 | size_t capacity() const { return byte_size(bottom(), end()); } |
duke@435 | 181 | virtual size_t used() const = 0; |
duke@435 | 182 | virtual size_t free() const = 0; |
duke@435 | 183 | |
duke@435 | 184 | // Iterate over all the ref-containing fields of all objects in the |
duke@435 | 185 | // space, calling "cl.do_oop" on each. Fields in objects allocated by |
duke@435 | 186 | // applications of the closure are not included in the iteration. |
duke@435 | 187 | virtual void oop_iterate(OopClosure* cl); |
duke@435 | 188 | |
duke@435 | 189 | // Same as above, restricted to the intersection of a memory region and |
duke@435 | 190 | // the space. Fields in objects allocated by applications of the closure |
duke@435 | 191 | // are not included in the iteration. |
duke@435 | 192 | virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0; |
duke@435 | 193 | |
duke@435 | 194 | // Iterate over all objects in the space, calling "cl.do_object" on |
duke@435 | 195 | // each. Objects allocated by applications of the closure are not |
duke@435 | 196 | // included in the iteration. |
duke@435 | 197 | virtual void object_iterate(ObjectClosure* blk) = 0; |
duke@435 | 198 | |
duke@435 | 199 | // Iterate over all objects that intersect with mr, calling "cl->do_object" |
duke@435 | 200 | // on each. There is an exception to this: if this closure has already |
duke@435 | 201 | // been invoked on an object, it may skip such objects in some cases. This is |
duke@435 | 202 | // Most likely to happen in an "upwards" (ascending address) iteration of |
duke@435 | 203 | // MemRegions. |
duke@435 | 204 | virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
duke@435 | 205 | |
duke@435 | 206 | // Iterate over as many initialized objects in the space as possible, |
duke@435 | 207 | // calling "cl.do_object_careful" on each. Return NULL if all objects |
duke@435 | 208 | // in the space (at the start of the iteration) were iterated over. |
duke@435 | 209 | // Return an address indicating the extent of the iteration in the |
duke@435 | 210 | // event that the iteration had to return because of finding an |
duke@435 | 211 | // uninitialized object in the space, or if the closure "cl" |
duke@435 | 212 | // signalled early termination. |
duke@435 | 213 | virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl); |
duke@435 | 214 | virtual HeapWord* object_iterate_careful_m(MemRegion mr, |
duke@435 | 215 | ObjectClosureCareful* cl); |
duke@435 | 216 | |
duke@435 | 217 | // Create and return a new dirty card to oop closure. Can be |
duke@435 | 218 | // overriden to return the appropriate type of closure |
duke@435 | 219 | // depending on the type of space in which the closure will |
duke@435 | 220 | // operate. ResourceArea allocated. |
duke@435 | 221 | virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, |
duke@435 | 222 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 223 | HeapWord* boundary = NULL); |
duke@435 | 224 | |
duke@435 | 225 | // If "p" is in the space, returns the address of the start of the |
duke@435 | 226 | // "block" that contains "p". We say "block" instead of "object" since |
duke@435 | 227 | // some heaps may not pack objects densely; a chunk may either be an |
duke@435 | 228 | // object or a non-object. If "p" is not in the space, return NULL. |
ysr@777 | 229 | virtual HeapWord* block_start_const(const void* p) const = 0; |
ysr@777 | 230 | |
ysr@777 | 231 | // The non-const version may have benevolent side effects on the data |
ysr@777 | 232 | // structure supporting these calls, possibly speeding up future calls. |
ysr@777 | 233 | // The default implementation, however, is simply to call the const |
ysr@777 | 234 | // version. |
ysr@777 | 235 | inline virtual HeapWord* block_start(const void* p); |
duke@435 | 236 | |
duke@435 | 237 | // Requires "addr" to be the start of a chunk, and returns its size. |
duke@435 | 238 | // "addr + size" is required to be the start of a new chunk, or the end |
duke@435 | 239 | // of the active area of the heap. |
duke@435 | 240 | virtual size_t block_size(const HeapWord* addr) const = 0; |
duke@435 | 241 | |
duke@435 | 242 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
duke@435 | 243 | // the block is an object. |
duke@435 | 244 | virtual bool block_is_obj(const HeapWord* addr) const = 0; |
duke@435 | 245 | |
duke@435 | 246 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
duke@435 | 247 | // the block is an object and the object is alive. |
duke@435 | 248 | virtual bool obj_is_alive(const HeapWord* addr) const; |
duke@435 | 249 | |
duke@435 | 250 | // Allocation (return NULL if full). Assumes the caller has established |
duke@435 | 251 | // mutually exclusive access to the space. |
duke@435 | 252 | virtual HeapWord* allocate(size_t word_size) = 0; |
duke@435 | 253 | |
duke@435 | 254 | // Allocation (return NULL if full). Enforces mutual exclusion internally. |
duke@435 | 255 | virtual HeapWord* par_allocate(size_t word_size) = 0; |
duke@435 | 256 | |
duke@435 | 257 | // Returns true if this object has been allocated since a |
duke@435 | 258 | // generation's "save_marks" call. |
duke@435 | 259 | virtual bool obj_allocated_since_save_marks(const oop obj) const = 0; |
duke@435 | 260 | |
duke@435 | 261 | // Mark-sweep-compact support: all spaces can update pointers to objects |
duke@435 | 262 | // moving as a part of compaction. |
duke@435 | 263 | virtual void adjust_pointers(); |
duke@435 | 264 | |
duke@435 | 265 | // PrintHeapAtGC support |
duke@435 | 266 | virtual void print() const; |
duke@435 | 267 | virtual void print_on(outputStream* st) const; |
duke@435 | 268 | virtual void print_short() const; |
duke@435 | 269 | virtual void print_short_on(outputStream* st) const; |
duke@435 | 270 | |
duke@435 | 271 | |
duke@435 | 272 | // Accessor for parallel sequential tasks. |
duke@435 | 273 | SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } |
duke@435 | 274 | |
duke@435 | 275 | // IF "this" is a ContiguousSpace, return it, else return NULL. |
duke@435 | 276 | virtual ContiguousSpace* toContiguousSpace() { |
duke@435 | 277 | return NULL; |
duke@435 | 278 | } |
duke@435 | 279 | |
duke@435 | 280 | // Debugging |
duke@435 | 281 | virtual void verify(bool allow_dirty) const = 0; |
duke@435 | 282 | }; |
duke@435 | 283 | |
duke@435 | 284 | // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an |
duke@435 | 285 | // OopClosure to (the addresses of) all the ref-containing fields that could |
duke@435 | 286 | // be modified by virtue of the given MemRegion being dirty. (Note that |
duke@435 | 287 | // because of the imprecise nature of the write barrier, this may iterate |
duke@435 | 288 | // over oops beyond the region.) |
duke@435 | 289 | // This base type for dirty card to oop closures handles memory regions |
duke@435 | 290 | // in non-contiguous spaces with no boundaries, and should be sub-classed |
duke@435 | 291 | // to support other space types. See ContiguousDCTOC for a sub-class |
duke@435 | 292 | // that works with ContiguousSpaces. |
duke@435 | 293 | |
duke@435 | 294 | class DirtyCardToOopClosure: public MemRegionClosureRO { |
duke@435 | 295 | protected: |
duke@435 | 296 | OopClosure* _cl; |
duke@435 | 297 | Space* _sp; |
duke@435 | 298 | CardTableModRefBS::PrecisionStyle _precision; |
duke@435 | 299 | HeapWord* _boundary; // If non-NULL, process only non-NULL oops |
duke@435 | 300 | // pointing below boundary. |
ysr@777 | 301 | HeapWord* _min_done; // ObjHeadPreciseArray precision requires |
duke@435 | 302 | // a downwards traversal; this is the |
duke@435 | 303 | // lowest location already done (or, |
duke@435 | 304 | // alternatively, the lowest address that |
duke@435 | 305 | // shouldn't be done again. NULL means infinity.) |
duke@435 | 306 | NOT_PRODUCT(HeapWord* _last_bottom;) |
ysr@777 | 307 | NOT_PRODUCT(HeapWord* _last_explicit_min_done;) |
duke@435 | 308 | |
duke@435 | 309 | // Get the actual top of the area on which the closure will |
duke@435 | 310 | // operate, given where the top is assumed to be (the end of the |
duke@435 | 311 | // memory region passed to do_MemRegion) and where the object |
duke@435 | 312 | // at the top is assumed to start. For example, an object may |
duke@435 | 313 | // start at the top but actually extend past the assumed top, |
duke@435 | 314 | // in which case the top becomes the end of the object. |
duke@435 | 315 | virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); |
duke@435 | 316 | |
duke@435 | 317 | // Walk the given memory region from bottom to (actual) top |
duke@435 | 318 | // looking for objects and applying the oop closure (_cl) to |
duke@435 | 319 | // them. The base implementation of this treats the area as |
duke@435 | 320 | // blocks, where a block may or may not be an object. Sub- |
duke@435 | 321 | // classes should override this to provide more accurate |
duke@435 | 322 | // or possibly more efficient walking. |
duke@435 | 323 | virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); |
duke@435 | 324 | |
duke@435 | 325 | public: |
duke@435 | 326 | DirtyCardToOopClosure(Space* sp, OopClosure* cl, |
duke@435 | 327 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 328 | HeapWord* boundary) : |
duke@435 | 329 | _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), |
duke@435 | 330 | _min_done(NULL) { |
ysr@777 | 331 | NOT_PRODUCT(_last_bottom = NULL); |
ysr@777 | 332 | NOT_PRODUCT(_last_explicit_min_done = NULL); |
duke@435 | 333 | } |
duke@435 | 334 | |
duke@435 | 335 | void do_MemRegion(MemRegion mr); |
duke@435 | 336 | |
duke@435 | 337 | void set_min_done(HeapWord* min_done) { |
duke@435 | 338 | _min_done = min_done; |
ysr@777 | 339 | NOT_PRODUCT(_last_explicit_min_done = _min_done); |
duke@435 | 340 | } |
duke@435 | 341 | #ifndef PRODUCT |
duke@435 | 342 | void set_last_bottom(HeapWord* last_bottom) { |
duke@435 | 343 | _last_bottom = last_bottom; |
duke@435 | 344 | } |
duke@435 | 345 | #endif |
duke@435 | 346 | }; |
duke@435 | 347 | |
duke@435 | 348 | // A structure to represent a point at which objects are being copied |
duke@435 | 349 | // during compaction. |
duke@435 | 350 | class CompactPoint : public StackObj { |
duke@435 | 351 | public: |
duke@435 | 352 | Generation* gen; |
duke@435 | 353 | CompactibleSpace* space; |
duke@435 | 354 | HeapWord* threshold; |
duke@435 | 355 | CompactPoint(Generation* _gen, CompactibleSpace* _space, |
duke@435 | 356 | HeapWord* _threshold) : |
duke@435 | 357 | gen(_gen), space(_space), threshold(_threshold) {} |
duke@435 | 358 | }; |
duke@435 | 359 | |
duke@435 | 360 | |
duke@435 | 361 | // A space that supports compaction operations. This is usually, but not |
duke@435 | 362 | // necessarily, a space that is normally contiguous. But, for example, a |
duke@435 | 363 | // free-list-based space whose normal collection is a mark-sweep without |
duke@435 | 364 | // compaction could still support compaction in full GC's. |
duke@435 | 365 | |
duke@435 | 366 | class CompactibleSpace: public Space { |
duke@435 | 367 | friend class VMStructs; |
duke@435 | 368 | friend class CompactibleFreeListSpace; |
duke@435 | 369 | friend class CompactingPermGenGen; |
duke@435 | 370 | friend class CMSPermGenGen; |
duke@435 | 371 | private: |
duke@435 | 372 | HeapWord* _compaction_top; |
duke@435 | 373 | CompactibleSpace* _next_compaction_space; |
duke@435 | 374 | |
duke@435 | 375 | public: |
ysr@782 | 376 | CompactibleSpace() : |
ysr@782 | 377 | _compaction_top(NULL), _next_compaction_space(NULL) {} |
ysr@782 | 378 | |
duke@435 | 379 | virtual void initialize(MemRegion mr, bool clear_space); |
ysr@777 | 380 | virtual void clear(); |
duke@435 | 381 | |
duke@435 | 382 | // Used temporarily during a compaction phase to hold the value |
duke@435 | 383 | // top should have when compaction is complete. |
duke@435 | 384 | HeapWord* compaction_top() const { return _compaction_top; } |
duke@435 | 385 | |
duke@435 | 386 | void set_compaction_top(HeapWord* value) { |
duke@435 | 387 | assert(value == NULL || (value >= bottom() && value <= end()), |
duke@435 | 388 | "should point inside space"); |
duke@435 | 389 | _compaction_top = value; |
duke@435 | 390 | } |
duke@435 | 391 | |
duke@435 | 392 | // Perform operations on the space needed after a compaction |
duke@435 | 393 | // has been performed. |
duke@435 | 394 | virtual void reset_after_compaction() {} |
duke@435 | 395 | |
duke@435 | 396 | // Returns the next space (in the current generation) to be compacted in |
duke@435 | 397 | // the global compaction order. Also is used to select the next |
duke@435 | 398 | // space into which to compact. |
duke@435 | 399 | |
duke@435 | 400 | virtual CompactibleSpace* next_compaction_space() const { |
duke@435 | 401 | return _next_compaction_space; |
duke@435 | 402 | } |
duke@435 | 403 | |
duke@435 | 404 | void set_next_compaction_space(CompactibleSpace* csp) { |
duke@435 | 405 | _next_compaction_space = csp; |
duke@435 | 406 | } |
duke@435 | 407 | |
duke@435 | 408 | // MarkSweep support phase2 |
duke@435 | 409 | |
duke@435 | 410 | // Start the process of compaction of the current space: compute |
duke@435 | 411 | // post-compaction addresses, and insert forwarding pointers. The fields |
duke@435 | 412 | // "cp->gen" and "cp->compaction_space" are the generation and space into |
duke@435 | 413 | // which we are currently compacting. This call updates "cp" as necessary, |
duke@435 | 414 | // and leaves the "compaction_top" of the final value of |
duke@435 | 415 | // "cp->compaction_space" up-to-date. Offset tables may be updated in |
duke@435 | 416 | // this phase as if the final copy had occurred; if so, "cp->threshold" |
duke@435 | 417 | // indicates when the next such action should be taken. |
duke@435 | 418 | virtual void prepare_for_compaction(CompactPoint* cp); |
duke@435 | 419 | // MarkSweep support phase3 |
duke@435 | 420 | virtual void adjust_pointers(); |
duke@435 | 421 | // MarkSweep support phase4 |
duke@435 | 422 | virtual void compact(); |
duke@435 | 423 | |
duke@435 | 424 | // The maximum percentage of objects that can be dead in the compacted |
duke@435 | 425 | // live part of a compacted space ("deadwood" support.) |
duke@435 | 426 | virtual int allowed_dead_ratio() const { return 0; }; |
duke@435 | 427 | |
duke@435 | 428 | // Some contiguous spaces may maintain some data structures that should |
duke@435 | 429 | // be updated whenever an allocation crosses a boundary. This function |
duke@435 | 430 | // returns the first such boundary. |
duke@435 | 431 | // (The default implementation returns the end of the space, so the |
duke@435 | 432 | // boundary is never crossed.) |
duke@435 | 433 | virtual HeapWord* initialize_threshold() { return end(); } |
duke@435 | 434 | |
duke@435 | 435 | // "q" is an object of the given "size" that should be forwarded; |
duke@435 | 436 | // "cp" names the generation ("gen") and containing "this" (which must |
duke@435 | 437 | // also equal "cp->space"). "compact_top" is where in "this" the |
duke@435 | 438 | // next object should be forwarded to. If there is room in "this" for |
duke@435 | 439 | // the object, insert an appropriate forwarding pointer in "q". |
duke@435 | 440 | // If not, go to the next compaction space (there must |
duke@435 | 441 | // be one, since compaction must succeed -- we go to the first space of |
duke@435 | 442 | // the previous generation if necessary, updating "cp"), reset compact_top |
duke@435 | 443 | // and then forward. In either case, returns the new value of "compact_top". |
duke@435 | 444 | // If the forwarding crosses "cp->threshold", invokes the "cross_threhold" |
duke@435 | 445 | // function of the then-current compaction space, and updates "cp->threshold |
duke@435 | 446 | // accordingly". |
duke@435 | 447 | virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, |
duke@435 | 448 | HeapWord* compact_top); |
duke@435 | 449 | |
duke@435 | 450 | // Return a size with adjusments as required of the space. |
duke@435 | 451 | virtual size_t adjust_object_size_v(size_t size) const { return size; } |
duke@435 | 452 | |
duke@435 | 453 | protected: |
duke@435 | 454 | // Used during compaction. |
duke@435 | 455 | HeapWord* _first_dead; |
duke@435 | 456 | HeapWord* _end_of_live; |
duke@435 | 457 | |
duke@435 | 458 | // Minimum size of a free block. |
duke@435 | 459 | virtual size_t minimum_free_block_size() const = 0; |
duke@435 | 460 | |
duke@435 | 461 | // This the function is invoked when an allocation of an object covering |
duke@435 | 462 | // "start" to "end occurs crosses the threshold; returns the next |
duke@435 | 463 | // threshold. (The default implementation does nothing.) |
duke@435 | 464 | virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { |
duke@435 | 465 | return end(); |
duke@435 | 466 | } |
duke@435 | 467 | |
duke@435 | 468 | // Requires "allowed_deadspace_words > 0", that "q" is the start of a |
duke@435 | 469 | // free block of the given "word_len", and that "q", were it an object, |
duke@435 | 470 | // would not move if forwared. If the size allows, fill the free |
duke@435 | 471 | // block with an object, to prevent excessive compaction. Returns "true" |
duke@435 | 472 | // iff the free region was made deadspace, and modifies |
duke@435 | 473 | // "allowed_deadspace_words" to reflect the number of available deadspace |
duke@435 | 474 | // words remaining after this operation. |
duke@435 | 475 | bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, |
duke@435 | 476 | size_t word_len); |
duke@435 | 477 | }; |
duke@435 | 478 | |
duke@435 | 479 | #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ |
duke@435 | 480 | /* Compute the new addresses for the live objects and store it in the mark \ |
duke@435 | 481 | * Used by universe::mark_sweep_phase2() \ |
duke@435 | 482 | */ \ |
duke@435 | 483 | HeapWord* compact_top; /* This is where we are currently compacting to. */ \ |
duke@435 | 484 | \ |
duke@435 | 485 | /* We're sure to be here before any objects are compacted into this \ |
duke@435 | 486 | * space, so this is a good time to initialize this: \ |
duke@435 | 487 | */ \ |
duke@435 | 488 | set_compaction_top(bottom()); \ |
duke@435 | 489 | \ |
duke@435 | 490 | if (cp->space == NULL) { \ |
duke@435 | 491 | assert(cp->gen != NULL, "need a generation"); \ |
duke@435 | 492 | assert(cp->threshold == NULL, "just checking"); \ |
duke@435 | 493 | assert(cp->gen->first_compaction_space() == this, "just checking"); \ |
duke@435 | 494 | cp->space = cp->gen->first_compaction_space(); \ |
duke@435 | 495 | compact_top = cp->space->bottom(); \ |
duke@435 | 496 | cp->space->set_compaction_top(compact_top); \ |
duke@435 | 497 | cp->threshold = cp->space->initialize_threshold(); \ |
duke@435 | 498 | } else { \ |
duke@435 | 499 | compact_top = cp->space->compaction_top(); \ |
duke@435 | 500 | } \ |
duke@435 | 501 | \ |
duke@435 | 502 | /* We allow some amount of garbage towards the bottom of the space, so \ |
duke@435 | 503 | * we don't start compacting before there is a significant gain to be made.\ |
duke@435 | 504 | * Occasionally, we want to ensure a full compaction, which is determined \ |
duke@435 | 505 | * by the MarkSweepAlwaysCompactCount parameter. \ |
duke@435 | 506 | */ \ |
duke@435 | 507 | int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\ |
duke@435 | 508 | bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ |
duke@435 | 509 | \ |
duke@435 | 510 | size_t allowed_deadspace = 0; \ |
duke@435 | 511 | if (skip_dead) { \ |
duke@435 | 512 | int ratio = allowed_dead_ratio(); \ |
duke@435 | 513 | allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ |
duke@435 | 514 | } \ |
duke@435 | 515 | \ |
duke@435 | 516 | HeapWord* q = bottom(); \ |
duke@435 | 517 | HeapWord* t = scan_limit(); \ |
duke@435 | 518 | \ |
duke@435 | 519 | HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ |
duke@435 | 520 | live object. */ \ |
duke@435 | 521 | HeapWord* first_dead = end();/* The first dead object. */ \ |
duke@435 | 522 | LiveRange* liveRange = NULL; /* The current live range, recorded in the \ |
duke@435 | 523 | first header of preceding free area. */ \ |
duke@435 | 524 | _first_dead = first_dead; \ |
duke@435 | 525 | \ |
duke@435 | 526 | const intx interval = PrefetchScanIntervalInBytes; \ |
duke@435 | 527 | \ |
duke@435 | 528 | while (q < t) { \ |
duke@435 | 529 | assert(!block_is_obj(q) || \ |
duke@435 | 530 | oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ |
duke@435 | 531 | oop(q)->mark()->has_bias_pattern(), \ |
duke@435 | 532 | "these are the only valid states during a mark sweep"); \ |
duke@435 | 533 | if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ |
duke@435 | 534 | /* prefetch beyond q */ \ |
duke@435 | 535 | Prefetch::write(q, interval); \ |
duke@435 | 536 | /* size_t size = oop(q)->size(); changing this for cms for perm gen */\ |
ysr@777 | 537 | size_t size = block_size(q); \ |
duke@435 | 538 | compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ |
duke@435 | 539 | q += size; \ |
duke@435 | 540 | end_of_live = q; \ |
duke@435 | 541 | } else { \ |
duke@435 | 542 | /* run over all the contiguous dead objects */ \ |
duke@435 | 543 | HeapWord* end = q; \ |
duke@435 | 544 | do { \ |
duke@435 | 545 | /* prefetch beyond end */ \ |
duke@435 | 546 | Prefetch::write(end, interval); \ |
duke@435 | 547 | end += block_size(end); \ |
duke@435 | 548 | } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ |
duke@435 | 549 | \ |
duke@435 | 550 | /* see if we might want to pretend this object is alive so that \ |
duke@435 | 551 | * we don't have to compact quite as often. \ |
duke@435 | 552 | */ \ |
duke@435 | 553 | if (allowed_deadspace > 0 && q == compact_top) { \ |
duke@435 | 554 | size_t sz = pointer_delta(end, q); \ |
duke@435 | 555 | if (insert_deadspace(allowed_deadspace, q, sz)) { \ |
duke@435 | 556 | compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ |
duke@435 | 557 | q = end; \ |
duke@435 | 558 | end_of_live = end; \ |
duke@435 | 559 | continue; \ |
duke@435 | 560 | } \ |
duke@435 | 561 | } \ |
duke@435 | 562 | \ |
duke@435 | 563 | /* otherwise, it really is a free region. */ \ |
duke@435 | 564 | \ |
duke@435 | 565 | /* for the previous LiveRange, record the end of the live objects. */ \ |
duke@435 | 566 | if (liveRange) { \ |
duke@435 | 567 | liveRange->set_end(q); \ |
duke@435 | 568 | } \ |
duke@435 | 569 | \ |
duke@435 | 570 | /* record the current LiveRange object. \ |
duke@435 | 571 | * liveRange->start() is overlaid on the mark word. \ |
duke@435 | 572 | */ \ |
duke@435 | 573 | liveRange = (LiveRange*)q; \ |
duke@435 | 574 | liveRange->set_start(end); \ |
duke@435 | 575 | liveRange->set_end(end); \ |
duke@435 | 576 | \ |
duke@435 | 577 | /* see if this is the first dead region. */ \ |
duke@435 | 578 | if (q < first_dead) { \ |
duke@435 | 579 | first_dead = q; \ |
duke@435 | 580 | } \ |
duke@435 | 581 | \ |
duke@435 | 582 | /* move on to the next object */ \ |
duke@435 | 583 | q = end; \ |
duke@435 | 584 | } \ |
duke@435 | 585 | } \ |
duke@435 | 586 | \ |
duke@435 | 587 | assert(q == t, "just checking"); \ |
duke@435 | 588 | if (liveRange != NULL) { \ |
duke@435 | 589 | liveRange->set_end(q); \ |
duke@435 | 590 | } \ |
duke@435 | 591 | _end_of_live = end_of_live; \ |
duke@435 | 592 | if (end_of_live < first_dead) { \ |
duke@435 | 593 | first_dead = end_of_live; \ |
duke@435 | 594 | } \ |
duke@435 | 595 | _first_dead = first_dead; \ |
duke@435 | 596 | \ |
duke@435 | 597 | /* save the compaction_top of the compaction space. */ \ |
duke@435 | 598 | cp->space->set_compaction_top(compact_top); \ |
duke@435 | 599 | } |
duke@435 | 600 | |
ysr@777 | 601 | #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ |
ysr@777 | 602 | /* adjust all the interior pointers to point at the new locations of objects \ |
ysr@777 | 603 | * Used by MarkSweep::mark_sweep_phase3() */ \ |
duke@435 | 604 | \ |
ysr@777 | 605 | HeapWord* q = bottom(); \ |
ysr@777 | 606 | HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ |
duke@435 | 607 | \ |
ysr@777 | 608 | assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ |
duke@435 | 609 | \ |
ysr@777 | 610 | if (q < t && _first_dead > q && \ |
duke@435 | 611 | !oop(q)->is_gc_marked()) { \ |
duke@435 | 612 | /* we have a chunk of the space which hasn't moved and we've \ |
duke@435 | 613 | * reinitialized the mark word during the previous pass, so we can't \ |
ysr@777 | 614 | * use is_gc_marked for the traversal. */ \ |
duke@435 | 615 | HeapWord* end = _first_dead; \ |
duke@435 | 616 | \ |
ysr@777 | 617 | while (q < end) { \ |
ysr@777 | 618 | /* I originally tried to conjoin "block_start(q) == q" to the \ |
ysr@777 | 619 | * assertion below, but that doesn't work, because you can't \ |
ysr@777 | 620 | * accurately traverse previous objects to get to the current one \ |
ysr@777 | 621 | * after their pointers (including pointers into permGen) have been \ |
ysr@777 | 622 | * updated, until the actual compaction is done. dld, 4/00 */ \ |
ysr@777 | 623 | assert(block_is_obj(q), \ |
ysr@777 | 624 | "should be at block boundaries, and should be looking at objs"); \ |
duke@435 | 625 | \ |
coleenp@548 | 626 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ |
duke@435 | 627 | \ |
ysr@777 | 628 | /* point all the oops to the new location */ \ |
ysr@777 | 629 | size_t size = oop(q)->adjust_pointers(); \ |
ysr@777 | 630 | size = adjust_obj_size(size); \ |
duke@435 | 631 | \ |
coleenp@548 | 632 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ |
ysr@777 | 633 | \ |
coleenp@548 | 634 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ |
ysr@777 | 635 | \ |
coleenp@548 | 636 | q += size; \ |
ysr@777 | 637 | } \ |
duke@435 | 638 | \ |
ysr@777 | 639 | if (_first_dead == t) { \ |
ysr@777 | 640 | q = t; \ |
ysr@777 | 641 | } else { \ |
ysr@777 | 642 | /* $$$ This is funky. Using this to read the previously written \ |
ysr@777 | 643 | * LiveRange. See also use below. */ \ |
duke@435 | 644 | q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ |
ysr@777 | 645 | } \ |
ysr@777 | 646 | } \ |
duke@435 | 647 | \ |
duke@435 | 648 | const intx interval = PrefetchScanIntervalInBytes; \ |
duke@435 | 649 | \ |
ysr@777 | 650 | debug_only(HeapWord* prev_q = NULL); \ |
ysr@777 | 651 | while (q < t) { \ |
ysr@777 | 652 | /* prefetch beyond q */ \ |
duke@435 | 653 | Prefetch::write(q, interval); \ |
ysr@777 | 654 | if (oop(q)->is_gc_marked()) { \ |
ysr@777 | 655 | /* q is alive */ \ |
coleenp@548 | 656 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ |
ysr@777 | 657 | /* point all the oops to the new location */ \ |
ysr@777 | 658 | size_t size = oop(q)->adjust_pointers(); \ |
ysr@777 | 659 | size = adjust_obj_size(size); \ |
ysr@777 | 660 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ |
coleenp@548 | 661 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ |
ysr@777 | 662 | debug_only(prev_q = q); \ |
duke@435 | 663 | q += size; \ |
coleenp@548 | 664 | } else { \ |
coleenp@548 | 665 | /* q is not a live object, so its mark should point at the next \ |
coleenp@548 | 666 | * live object */ \ |
coleenp@548 | 667 | debug_only(prev_q = q); \ |
coleenp@548 | 668 | q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
coleenp@548 | 669 | assert(q > prev_q, "we should be moving forward through memory"); \ |
coleenp@548 | 670 | } \ |
coleenp@548 | 671 | } \ |
duke@435 | 672 | \ |
coleenp@548 | 673 | assert(q == t, "just checking"); \ |
duke@435 | 674 | } |
duke@435 | 675 | |
coleenp@548 | 676 | #define SCAN_AND_COMPACT(obj_size) { \ |
duke@435 | 677 | /* Copy all live objects to their new location \ |
coleenp@548 | 678 | * Used by MarkSweep::mark_sweep_phase4() */ \ |
duke@435 | 679 | \ |
coleenp@548 | 680 | HeapWord* q = bottom(); \ |
coleenp@548 | 681 | HeapWord* const t = _end_of_live; \ |
coleenp@548 | 682 | debug_only(HeapWord* prev_q = NULL); \ |
duke@435 | 683 | \ |
coleenp@548 | 684 | if (q < t && _first_dead > q && \ |
duke@435 | 685 | !oop(q)->is_gc_marked()) { \ |
coleenp@548 | 686 | debug_only( \ |
coleenp@548 | 687 | /* we have a chunk of the space which hasn't moved and we've reinitialized \ |
coleenp@548 | 688 | * the mark word during the previous pass, so we can't use is_gc_marked for \ |
coleenp@548 | 689 | * the traversal. */ \ |
coleenp@548 | 690 | HeapWord* const end = _first_dead; \ |
coleenp@548 | 691 | \ |
coleenp@548 | 692 | while (q < end) { \ |
coleenp@548 | 693 | size_t size = obj_size(q); \ |
coleenp@548 | 694 | assert(!oop(q)->is_gc_marked(), \ |
coleenp@548 | 695 | "should be unmarked (special dense prefix handling)"); \ |
coleenp@548 | 696 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \ |
coleenp@548 | 697 | debug_only(prev_q = q); \ |
coleenp@548 | 698 | q += size; \ |
coleenp@548 | 699 | } \ |
coleenp@548 | 700 | ) /* debug_only */ \ |
coleenp@548 | 701 | \ |
coleenp@548 | 702 | if (_first_dead == t) { \ |
coleenp@548 | 703 | q = t; \ |
coleenp@548 | 704 | } else { \ |
coleenp@548 | 705 | /* $$$ Funky */ \ |
coleenp@548 | 706 | q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ |
coleenp@548 | 707 | } \ |
coleenp@548 | 708 | } \ |
duke@435 | 709 | \ |
coleenp@548 | 710 | const intx scan_interval = PrefetchScanIntervalInBytes; \ |
coleenp@548 | 711 | const intx copy_interval = PrefetchCopyIntervalInBytes; \ |
coleenp@548 | 712 | while (q < t) { \ |
coleenp@548 | 713 | if (!oop(q)->is_gc_marked()) { \ |
coleenp@548 | 714 | /* mark is pointer to next marked oop */ \ |
coleenp@548 | 715 | debug_only(prev_q = q); \ |
coleenp@548 | 716 | q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
coleenp@548 | 717 | assert(q > prev_q, "we should be moving forward through memory"); \ |
coleenp@548 | 718 | } else { \ |
coleenp@548 | 719 | /* prefetch beyond q */ \ |
duke@435 | 720 | Prefetch::read(q, scan_interval); \ |
duke@435 | 721 | \ |
duke@435 | 722 | /* size and destination */ \ |
duke@435 | 723 | size_t size = obj_size(q); \ |
duke@435 | 724 | HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ |
duke@435 | 725 | \ |
coleenp@548 | 726 | /* prefetch beyond compaction_top */ \ |
duke@435 | 727 | Prefetch::write(compaction_top, copy_interval); \ |
duke@435 | 728 | \ |
coleenp@548 | 729 | /* copy object and reinit its mark */ \ |
coleenp@548 | 730 | VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \ |
coleenp@548 | 731 | compaction_top)); \ |
coleenp@548 | 732 | assert(q != compaction_top, "everything in this pass should be moving"); \ |
coleenp@548 | 733 | Copy::aligned_conjoint_words(q, compaction_top, size); \ |
coleenp@548 | 734 | oop(compaction_top)->init_mark(); \ |
coleenp@548 | 735 | assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ |
duke@435 | 736 | \ |
coleenp@548 | 737 | debug_only(prev_q = q); \ |
duke@435 | 738 | q += size; \ |
coleenp@548 | 739 | } \ |
coleenp@548 | 740 | } \ |
duke@435 | 741 | \ |
ysr@777 | 742 | /* Let's remember if we were empty before we did the compaction. */ \ |
ysr@777 | 743 | bool was_empty = used_region().is_empty(); \ |
duke@435 | 744 | /* Reset space after compaction is complete */ \ |
coleenp@548 | 745 | reset_after_compaction(); \ |
duke@435 | 746 | /* We do this clear, below, since it has overloaded meanings for some */ \ |
duke@435 | 747 | /* space subtypes. For example, OffsetTableContigSpace's that were */ \ |
duke@435 | 748 | /* compacted into will have had their offset table thresholds updated */ \ |
duke@435 | 749 | /* continuously, but those that weren't need to have their thresholds */ \ |
duke@435 | 750 | /* re-initialized. Also mangles unused area for debugging. */ \ |
ysr@777 | 751 | if (used_region().is_empty()) { \ |
ysr@777 | 752 | if (!was_empty) clear(); \ |
duke@435 | 753 | } else { \ |
duke@435 | 754 | if (ZapUnusedHeapArea) mangle_unused_area(); \ |
duke@435 | 755 | } \ |
duke@435 | 756 | } |
duke@435 | 757 | |
duke@435 | 758 | // A space in which the free area is contiguous. It therefore supports |
duke@435 | 759 | // faster allocation, and compaction. |
duke@435 | 760 | class ContiguousSpace: public CompactibleSpace { |
duke@435 | 761 | friend class OneContigSpaceCardGeneration; |
duke@435 | 762 | friend class VMStructs; |
duke@435 | 763 | protected: |
duke@435 | 764 | HeapWord* _top; |
duke@435 | 765 | HeapWord* _concurrent_iteration_safe_limit; |
duke@435 | 766 | |
duke@435 | 767 | // Allocation helpers (return NULL if full). |
duke@435 | 768 | inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); |
duke@435 | 769 | inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); |
duke@435 | 770 | |
duke@435 | 771 | public: |
ysr@782 | 772 | ContiguousSpace() : |
ysr@782 | 773 | _top(NULL), |
ysr@782 | 774 | _concurrent_iteration_safe_limit(NULL) {} |
ysr@782 | 775 | |
duke@435 | 776 | virtual void initialize(MemRegion mr, bool clear_space); |
duke@435 | 777 | |
duke@435 | 778 | // Accessors |
duke@435 | 779 | HeapWord* top() const { return _top; } |
duke@435 | 780 | void set_top(HeapWord* value) { _top = value; } |
duke@435 | 781 | |
ysr@777 | 782 | virtual void set_saved_mark() { _saved_mark_word = top(); } |
ysr@777 | 783 | void reset_saved_mark() { _saved_mark_word = bottom(); } |
duke@435 | 784 | |
duke@435 | 785 | virtual void clear(); |
duke@435 | 786 | |
duke@435 | 787 | WaterMark bottom_mark() { return WaterMark(this, bottom()); } |
duke@435 | 788 | WaterMark top_mark() { return WaterMark(this, top()); } |
duke@435 | 789 | WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } |
duke@435 | 790 | bool saved_mark_at_top() const { return saved_mark_word() == top(); } |
duke@435 | 791 | |
duke@435 | 792 | void mangle_unused_area(); |
duke@435 | 793 | void mangle_region(MemRegion mr); |
duke@435 | 794 | |
duke@435 | 795 | // Size computations: sizes in bytes. |
duke@435 | 796 | size_t capacity() const { return byte_size(bottom(), end()); } |
duke@435 | 797 | size_t used() const { return byte_size(bottom(), top()); } |
duke@435 | 798 | size_t free() const { return byte_size(top(), end()); } |
duke@435 | 799 | |
duke@435 | 800 | // Override from space. |
duke@435 | 801 | bool is_in(const void* p) const; |
duke@435 | 802 | |
duke@435 | 803 | virtual bool is_free_block(const HeapWord* p) const; |
duke@435 | 804 | |
duke@435 | 805 | // In a contiguous space we have a more obvious bound on what parts |
duke@435 | 806 | // contain objects. |
duke@435 | 807 | MemRegion used_region() const { return MemRegion(bottom(), top()); } |
duke@435 | 808 | |
duke@435 | 809 | MemRegion used_region_at_save_marks() const { |
duke@435 | 810 | return MemRegion(bottom(), saved_mark_word()); |
duke@435 | 811 | } |
duke@435 | 812 | |
duke@435 | 813 | // Allocation (return NULL if full) |
duke@435 | 814 | virtual HeapWord* allocate(size_t word_size); |
duke@435 | 815 | virtual HeapWord* par_allocate(size_t word_size); |
duke@435 | 816 | |
duke@435 | 817 | virtual bool obj_allocated_since_save_marks(const oop obj) const { |
duke@435 | 818 | return (HeapWord*)obj >= saved_mark_word(); |
duke@435 | 819 | } |
duke@435 | 820 | |
duke@435 | 821 | // Iteration |
duke@435 | 822 | void oop_iterate(OopClosure* cl); |
duke@435 | 823 | void oop_iterate(MemRegion mr, OopClosure* cl); |
duke@435 | 824 | void object_iterate(ObjectClosure* blk); |
duke@435 | 825 | void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
duke@435 | 826 | // iterates on objects up to the safe limit |
duke@435 | 827 | HeapWord* object_iterate_careful(ObjectClosureCareful* cl); |
duke@435 | 828 | inline HeapWord* concurrent_iteration_safe_limit(); |
duke@435 | 829 | // changes the safe limit, all objects from bottom() to the new |
duke@435 | 830 | // limit should be properly initialized |
duke@435 | 831 | inline void set_concurrent_iteration_safe_limit(HeapWord* new_limit); |
duke@435 | 832 | |
duke@435 | 833 | #ifndef SERIALGC |
duke@435 | 834 | // In support of parallel oop_iterate. |
duke@435 | 835 | #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ |
duke@435 | 836 | void par_oop_iterate(MemRegion mr, OopClosureType* blk); |
duke@435 | 837 | |
duke@435 | 838 | ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) |
duke@435 | 839 | #undef ContigSpace_PAR_OOP_ITERATE_DECL |
duke@435 | 840 | #endif // SERIALGC |
duke@435 | 841 | |
duke@435 | 842 | // Compaction support |
duke@435 | 843 | virtual void reset_after_compaction() { |
duke@435 | 844 | assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); |
duke@435 | 845 | set_top(compaction_top()); |
duke@435 | 846 | // set new iteration safe limit |
duke@435 | 847 | set_concurrent_iteration_safe_limit(compaction_top()); |
duke@435 | 848 | } |
duke@435 | 849 | virtual size_t minimum_free_block_size() const { return 0; } |
duke@435 | 850 | |
duke@435 | 851 | // Override. |
duke@435 | 852 | DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, |
duke@435 | 853 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 854 | HeapWord* boundary = NULL); |
duke@435 | 855 | |
duke@435 | 856 | // Apply "blk->do_oop" to the addresses of all reference fields in objects |
duke@435 | 857 | // starting with the _saved_mark_word, which was noted during a generation's |
duke@435 | 858 | // save_marks and is required to denote the head of an object. |
duke@435 | 859 | // Fields in objects allocated by applications of the closure |
duke@435 | 860 | // *are* included in the iteration. |
duke@435 | 861 | // Updates _saved_mark_word to point to just after the last object |
duke@435 | 862 | // iterated over. |
duke@435 | 863 | #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
duke@435 | 864 | void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); |
duke@435 | 865 | |
duke@435 | 866 | ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) |
duke@435 | 867 | #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL |
duke@435 | 868 | |
duke@435 | 869 | // Same as object_iterate, but starting from "mark", which is required |
duke@435 | 870 | // to denote the start of an object. Objects allocated by |
duke@435 | 871 | // applications of the closure *are* included in the iteration. |
duke@435 | 872 | virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); |
duke@435 | 873 | |
duke@435 | 874 | // Very inefficient implementation. |
ysr@777 | 875 | virtual HeapWord* block_start_const(const void* p) const; |
duke@435 | 876 | size_t block_size(const HeapWord* p) const; |
duke@435 | 877 | // If a block is in the allocated area, it is an object. |
duke@435 | 878 | bool block_is_obj(const HeapWord* p) const { return p < top(); } |
duke@435 | 879 | |
duke@435 | 880 | // Addresses for inlined allocation |
duke@435 | 881 | HeapWord** top_addr() { return &_top; } |
duke@435 | 882 | HeapWord** end_addr() { return &_end; } |
duke@435 | 883 | |
duke@435 | 884 | // Overrides for more efficient compaction support. |
duke@435 | 885 | void prepare_for_compaction(CompactPoint* cp); |
duke@435 | 886 | |
duke@435 | 887 | // PrintHeapAtGC support. |
duke@435 | 888 | virtual void print_on(outputStream* st) const; |
duke@435 | 889 | |
duke@435 | 890 | // Checked dynamic downcasts. |
duke@435 | 891 | virtual ContiguousSpace* toContiguousSpace() { |
duke@435 | 892 | return this; |
duke@435 | 893 | } |
duke@435 | 894 | |
duke@435 | 895 | // Debugging |
duke@435 | 896 | virtual void verify(bool allow_dirty) const; |
duke@435 | 897 | |
duke@435 | 898 | // Used to increase collection frequency. "factor" of 0 means entire |
duke@435 | 899 | // space. |
duke@435 | 900 | void allocate_temporary_filler(int factor); |
duke@435 | 901 | |
duke@435 | 902 | }; |
duke@435 | 903 | |
duke@435 | 904 | |
duke@435 | 905 | // A dirty card to oop closure that does filtering. |
duke@435 | 906 | // It knows how to filter out objects that are outside of the _boundary. |
duke@435 | 907 | class Filtering_DCTOC : public DirtyCardToOopClosure { |
duke@435 | 908 | protected: |
duke@435 | 909 | // Override. |
duke@435 | 910 | void walk_mem_region(MemRegion mr, |
duke@435 | 911 | HeapWord* bottom, HeapWord* top); |
duke@435 | 912 | |
duke@435 | 913 | // Walk the given memory region, from bottom to top, applying |
duke@435 | 914 | // the given oop closure to (possibly) all objects found. The |
duke@435 | 915 | // given oop closure may or may not be the same as the oop |
duke@435 | 916 | // closure with which this closure was created, as it may |
duke@435 | 917 | // be a filtering closure which makes use of the _boundary. |
duke@435 | 918 | // We offer two signatures, so the FilteringClosure static type is |
duke@435 | 919 | // apparent. |
duke@435 | 920 | virtual void walk_mem_region_with_cl(MemRegion mr, |
duke@435 | 921 | HeapWord* bottom, HeapWord* top, |
duke@435 | 922 | OopClosure* cl) = 0; |
duke@435 | 923 | virtual void walk_mem_region_with_cl(MemRegion mr, |
duke@435 | 924 | HeapWord* bottom, HeapWord* top, |
duke@435 | 925 | FilteringClosure* cl) = 0; |
duke@435 | 926 | |
duke@435 | 927 | public: |
duke@435 | 928 | Filtering_DCTOC(Space* sp, OopClosure* cl, |
duke@435 | 929 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 930 | HeapWord* boundary) : |
duke@435 | 931 | DirtyCardToOopClosure(sp, cl, precision, boundary) {} |
duke@435 | 932 | }; |
duke@435 | 933 | |
duke@435 | 934 | // A dirty card to oop closure for contiguous spaces |
duke@435 | 935 | // (ContiguousSpace and sub-classes). |
duke@435 | 936 | // It is a FilteringClosure, as defined above, and it knows: |
duke@435 | 937 | // |
duke@435 | 938 | // 1. That the actual top of any area in a memory region |
duke@435 | 939 | // contained by the space is bounded by the end of the contiguous |
duke@435 | 940 | // region of the space. |
duke@435 | 941 | // 2. That the space is really made up of objects and not just |
duke@435 | 942 | // blocks. |
duke@435 | 943 | |
duke@435 | 944 | class ContiguousSpaceDCTOC : public Filtering_DCTOC { |
duke@435 | 945 | protected: |
duke@435 | 946 | // Overrides. |
duke@435 | 947 | HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); |
duke@435 | 948 | |
duke@435 | 949 | virtual void walk_mem_region_with_cl(MemRegion mr, |
duke@435 | 950 | HeapWord* bottom, HeapWord* top, |
duke@435 | 951 | OopClosure* cl); |
duke@435 | 952 | virtual void walk_mem_region_with_cl(MemRegion mr, |
duke@435 | 953 | HeapWord* bottom, HeapWord* top, |
duke@435 | 954 | FilteringClosure* cl); |
duke@435 | 955 | |
duke@435 | 956 | public: |
duke@435 | 957 | ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl, |
duke@435 | 958 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 959 | HeapWord* boundary) : |
duke@435 | 960 | Filtering_DCTOC(sp, cl, precision, boundary) |
duke@435 | 961 | {} |
duke@435 | 962 | }; |
duke@435 | 963 | |
duke@435 | 964 | |
duke@435 | 965 | // Class EdenSpace describes eden-space in new generation. |
duke@435 | 966 | |
duke@435 | 967 | class DefNewGeneration; |
duke@435 | 968 | |
duke@435 | 969 | class EdenSpace : public ContiguousSpace { |
duke@435 | 970 | friend class VMStructs; |
duke@435 | 971 | private: |
duke@435 | 972 | DefNewGeneration* _gen; |
duke@435 | 973 | |
duke@435 | 974 | // _soft_end is used as a soft limit on allocation. As soft limits are |
duke@435 | 975 | // reached, the slow-path allocation code can invoke other actions and then |
duke@435 | 976 | // adjust _soft_end up to a new soft limit or to end(). |
duke@435 | 977 | HeapWord* _soft_end; |
duke@435 | 978 | |
duke@435 | 979 | public: |
ysr@782 | 980 | EdenSpace(DefNewGeneration* gen) : |
ysr@782 | 981 | _gen(gen), _soft_end(NULL) {} |
duke@435 | 982 | |
duke@435 | 983 | // Get/set just the 'soft' limit. |
duke@435 | 984 | HeapWord* soft_end() { return _soft_end; } |
duke@435 | 985 | HeapWord** soft_end_addr() { return &_soft_end; } |
duke@435 | 986 | void set_soft_end(HeapWord* value) { _soft_end = value; } |
duke@435 | 987 | |
duke@435 | 988 | // Override. |
duke@435 | 989 | void clear(); |
duke@435 | 990 | |
duke@435 | 991 | // Set both the 'hard' and 'soft' limits (_end and _soft_end). |
duke@435 | 992 | void set_end(HeapWord* value) { |
duke@435 | 993 | set_soft_end(value); |
duke@435 | 994 | ContiguousSpace::set_end(value); |
duke@435 | 995 | } |
duke@435 | 996 | |
duke@435 | 997 | // Allocation (return NULL if full) |
duke@435 | 998 | HeapWord* allocate(size_t word_size); |
duke@435 | 999 | HeapWord* par_allocate(size_t word_size); |
duke@435 | 1000 | }; |
duke@435 | 1001 | |
duke@435 | 1002 | // Class ConcEdenSpace extends EdenSpace for the sake of safe |
duke@435 | 1003 | // allocation while soft-end is being modified concurrently |
duke@435 | 1004 | |
duke@435 | 1005 | class ConcEdenSpace : public EdenSpace { |
duke@435 | 1006 | public: |
duke@435 | 1007 | ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { } |
duke@435 | 1008 | |
duke@435 | 1009 | // Allocation (return NULL if full) |
duke@435 | 1010 | HeapWord* par_allocate(size_t word_size); |
duke@435 | 1011 | }; |
duke@435 | 1012 | |
duke@435 | 1013 | |
duke@435 | 1014 | // A ContigSpace that Supports an efficient "block_start" operation via |
duke@435 | 1015 | // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with |
duke@435 | 1016 | // other spaces.) This is the abstract base class for old generation |
duke@435 | 1017 | // (tenured, perm) spaces. |
duke@435 | 1018 | |
duke@435 | 1019 | class OffsetTableContigSpace: public ContiguousSpace { |
duke@435 | 1020 | friend class VMStructs; |
duke@435 | 1021 | protected: |
duke@435 | 1022 | BlockOffsetArrayContigSpace _offsets; |
duke@435 | 1023 | Mutex _par_alloc_lock; |
duke@435 | 1024 | |
duke@435 | 1025 | public: |
duke@435 | 1026 | // Constructor |
duke@435 | 1027 | OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, |
duke@435 | 1028 | MemRegion mr); |
duke@435 | 1029 | |
duke@435 | 1030 | void set_bottom(HeapWord* value); |
duke@435 | 1031 | void set_end(HeapWord* value); |
duke@435 | 1032 | |
ysr@777 | 1033 | virtual void initialize(MemRegion mr, bool clear_space); |
duke@435 | 1034 | void clear(); |
duke@435 | 1035 | |
ysr@777 | 1036 | inline HeapWord* block_start_const(const void* p) const; |
duke@435 | 1037 | |
duke@435 | 1038 | // Add offset table update. |
duke@435 | 1039 | virtual inline HeapWord* allocate(size_t word_size); |
duke@435 | 1040 | inline HeapWord* par_allocate(size_t word_size); |
duke@435 | 1041 | |
duke@435 | 1042 | // MarkSweep support phase3 |
duke@435 | 1043 | virtual HeapWord* initialize_threshold(); |
duke@435 | 1044 | virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); |
duke@435 | 1045 | |
duke@435 | 1046 | virtual void print_on(outputStream* st) const; |
duke@435 | 1047 | |
duke@435 | 1048 | // Debugging |
duke@435 | 1049 | void verify(bool allow_dirty) const; |
duke@435 | 1050 | |
duke@435 | 1051 | // Shared space support |
duke@435 | 1052 | void serialize_block_offset_array_offsets(SerializeOopClosure* soc); |
duke@435 | 1053 | }; |
duke@435 | 1054 | |
duke@435 | 1055 | |
duke@435 | 1056 | // Class TenuredSpace is used by TenuredGeneration |
duke@435 | 1057 | |
duke@435 | 1058 | class TenuredSpace: public OffsetTableContigSpace { |
duke@435 | 1059 | friend class VMStructs; |
duke@435 | 1060 | protected: |
duke@435 | 1061 | // Mark sweep support |
duke@435 | 1062 | int allowed_dead_ratio() const; |
duke@435 | 1063 | public: |
duke@435 | 1064 | // Constructor |
duke@435 | 1065 | TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, |
duke@435 | 1066 | MemRegion mr) : |
duke@435 | 1067 | OffsetTableContigSpace(sharedOffsetArray, mr) {} |
duke@435 | 1068 | }; |
duke@435 | 1069 | |
duke@435 | 1070 | |
duke@435 | 1071 | // Class ContigPermSpace is used by CompactingPermGen |
duke@435 | 1072 | |
duke@435 | 1073 | class ContigPermSpace: public OffsetTableContigSpace { |
duke@435 | 1074 | friend class VMStructs; |
duke@435 | 1075 | protected: |
duke@435 | 1076 | // Mark sweep support |
duke@435 | 1077 | int allowed_dead_ratio() const; |
duke@435 | 1078 | public: |
duke@435 | 1079 | // Constructor |
duke@435 | 1080 | ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : |
duke@435 | 1081 | OffsetTableContigSpace(sharedOffsetArray, mr) {} |
duke@435 | 1082 | }; |