Wed, 26 Jun 2013 16:58:37 +0200
8013590: NPG: Add a memory pool MXBean for Metaspace
Reviewed-by: jmasa, mgerdin
duke@435 | 1 | /* |
brutisso@3711 | 2 | * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_MEMORY_SPACE_HPP |
stefank@2314 | 26 | #define SHARE_VM_MEMORY_SPACE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "memory/allocation.hpp" |
stefank@2314 | 29 | #include "memory/blockOffsetTable.hpp" |
stefank@2314 | 30 | #include "memory/cardTableModRefBS.hpp" |
stefank@2314 | 31 | #include "memory/iterator.hpp" |
stefank@2314 | 32 | #include "memory/memRegion.hpp" |
stefank@2314 | 33 | #include "memory/watermark.hpp" |
stefank@2314 | 34 | #include "oops/markOop.hpp" |
stefank@2314 | 35 | #include "runtime/mutexLocker.hpp" |
stefank@2314 | 36 | #include "runtime/prefetch.hpp" |
jprovino@4542 | 37 | #include "utilities/macros.hpp" |
stefank@2314 | 38 | #include "utilities/workgroup.hpp" |
stefank@2314 | 39 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 40 | # include "os_linux.inline.hpp" |
stefank@2314 | 41 | #endif |
stefank@2314 | 42 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 43 | # include "os_solaris.inline.hpp" |
stefank@2314 | 44 | #endif |
stefank@2314 | 45 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 46 | # include "os_windows.inline.hpp" |
stefank@2314 | 47 | #endif |
never@3156 | 48 | #ifdef TARGET_OS_FAMILY_bsd |
never@3156 | 49 | # include "os_bsd.inline.hpp" |
never@3156 | 50 | #endif |
stefank@2314 | 51 | |
duke@435 | 52 | // A space is an abstraction for the "storage units" backing |
duke@435 | 53 | // up the generation abstraction. It includes specific |
duke@435 | 54 | // implementations for keeping track of free and used space, |
duke@435 | 55 | // for iterating over objects and free blocks, etc. |
duke@435 | 56 | |
duke@435 | 57 | // Here's the Space hierarchy: |
duke@435 | 58 | // |
duke@435 | 59 | // - Space -- an asbtract base class describing a heap area |
duke@435 | 60 | // - CompactibleSpace -- a space supporting compaction |
duke@435 | 61 | // - CompactibleFreeListSpace -- (used for CMS generation) |
duke@435 | 62 | // - ContiguousSpace -- a compactible space in which all free space |
duke@435 | 63 | // is contiguous |
duke@435 | 64 | // - EdenSpace -- contiguous space used as nursery |
duke@435 | 65 | // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation |
duke@435 | 66 | // - OffsetTableContigSpace -- contiguous space with a block offset array |
duke@435 | 67 | // that allows "fast" block_start calls |
duke@435 | 68 | // - TenuredSpace -- (used for TenuredGeneration) |
duke@435 | 69 | |
duke@435 | 70 | // Forward decls. |
duke@435 | 71 | class Space; |
duke@435 | 72 | class BlockOffsetArray; |
duke@435 | 73 | class BlockOffsetArrayContigSpace; |
duke@435 | 74 | class Generation; |
duke@435 | 75 | class CompactibleSpace; |
duke@435 | 76 | class BlockOffsetTable; |
duke@435 | 77 | class GenRemSet; |
duke@435 | 78 | class CardTableRS; |
duke@435 | 79 | class DirtyCardToOopClosure; |
duke@435 | 80 | |
duke@435 | 81 | // An oop closure that is circumscribed by a filtering memory region. |
coleenp@4037 | 82 | class SpaceMemRegionOopsIterClosure: public ExtendedOopClosure { |
coleenp@548 | 83 | private: |
coleenp@4037 | 84 | ExtendedOopClosure* _cl; |
coleenp@548 | 85 | MemRegion _mr; |
coleenp@548 | 86 | protected: |
coleenp@548 | 87 | template <class T> void do_oop_work(T* p) { |
coleenp@548 | 88 | if (_mr.contains(p)) { |
coleenp@548 | 89 | _cl->do_oop(p); |
duke@435 | 90 | } |
duke@435 | 91 | } |
coleenp@548 | 92 | public: |
coleenp@4037 | 93 | SpaceMemRegionOopsIterClosure(ExtendedOopClosure* cl, MemRegion mr): |
coleenp@548 | 94 | _cl(cl), _mr(mr) {} |
coleenp@548 | 95 | virtual void do_oop(oop* p); |
coleenp@548 | 96 | virtual void do_oop(narrowOop* p); |
coleenp@4037 | 97 | virtual bool do_metadata() { |
coleenp@4037 | 98 | // _cl is of type ExtendedOopClosure instead of OopClosure, so that we can check this. |
coleenp@4037 | 99 | assert(!_cl->do_metadata(), "I've checked all call paths, this shouldn't happen."); |
coleenp@4037 | 100 | return false; |
coleenp@4037 | 101 | } |
coleenp@4037 | 102 | virtual void do_klass(Klass* k) { ShouldNotReachHere(); } |
coleenp@4037 | 103 | virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); } |
duke@435 | 104 | }; |
duke@435 | 105 | |
duke@435 | 106 | // A Space describes a heap area. Class Space is an abstract |
duke@435 | 107 | // base class. |
duke@435 | 108 | // |
duke@435 | 109 | // Space supports allocation, size computation and GC support is provided. |
duke@435 | 110 | // |
duke@435 | 111 | // Invariant: bottom() and end() are on page_size boundaries and |
duke@435 | 112 | // bottom() <= top() <= end() |
duke@435 | 113 | // top() is inclusive and end() is exclusive. |
duke@435 | 114 | |
zgu@3900 | 115 | class Space: public CHeapObj<mtGC> { |
duke@435 | 116 | friend class VMStructs; |
duke@435 | 117 | protected: |
duke@435 | 118 | HeapWord* _bottom; |
duke@435 | 119 | HeapWord* _end; |
duke@435 | 120 | |
duke@435 | 121 | // Used in support of save_marks() |
duke@435 | 122 | HeapWord* _saved_mark_word; |
duke@435 | 123 | |
duke@435 | 124 | MemRegionClosure* _preconsumptionDirtyCardClosure; |
duke@435 | 125 | |
duke@435 | 126 | // A sequential tasks done structure. This supports |
duke@435 | 127 | // parallel GC, where we have threads dynamically |
duke@435 | 128 | // claiming sub-tasks from a larger parallel task. |
duke@435 | 129 | SequentialSubTasksDone _par_seq_tasks; |
duke@435 | 130 | |
duke@435 | 131 | Space(): |
duke@435 | 132 | _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { } |
duke@435 | 133 | |
duke@435 | 134 | public: |
duke@435 | 135 | // Accessors |
duke@435 | 136 | HeapWord* bottom() const { return _bottom; } |
duke@435 | 137 | HeapWord* end() const { return _end; } |
duke@435 | 138 | virtual void set_bottom(HeapWord* value) { _bottom = value; } |
duke@435 | 139 | virtual void set_end(HeapWord* value) { _end = value; } |
duke@435 | 140 | |
ysr@777 | 141 | virtual HeapWord* saved_mark_word() const { return _saved_mark_word; } |
ysr@1280 | 142 | |
duke@435 | 143 | void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } |
duke@435 | 144 | |
duke@435 | 145 | MemRegionClosure* preconsumptionDirtyCardClosure() const { |
duke@435 | 146 | return _preconsumptionDirtyCardClosure; |
duke@435 | 147 | } |
duke@435 | 148 | void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { |
duke@435 | 149 | _preconsumptionDirtyCardClosure = cl; |
duke@435 | 150 | } |
duke@435 | 151 | |
duke@435 | 152 | // Returns a subregion of the space containing all the objects in |
duke@435 | 153 | // the space. |
duke@435 | 154 | virtual MemRegion used_region() const { return MemRegion(bottom(), end()); } |
duke@435 | 155 | |
duke@435 | 156 | // Returns a region that is guaranteed to contain (at least) all objects |
duke@435 | 157 | // allocated at the time of the last call to "save_marks". If the space |
duke@435 | 158 | // initializes its DirtyCardToOopClosure's specifying the "contig" option |
duke@435 | 159 | // (that is, if the space is contiguous), then this region must contain only |
duke@435 | 160 | // such objects: the memregion will be from the bottom of the region to the |
duke@435 | 161 | // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of |
duke@435 | 162 | // the space must distiguish between objects in the region allocated before |
duke@435 | 163 | // and after the call to save marks. |
duke@435 | 164 | virtual MemRegion used_region_at_save_marks() const { |
duke@435 | 165 | return MemRegion(bottom(), saved_mark_word()); |
duke@435 | 166 | } |
duke@435 | 167 | |
ysr@777 | 168 | // Initialization. |
ysr@777 | 169 | // "initialize" should be called once on a space, before it is used for |
ysr@777 | 170 | // any purpose. The "mr" arguments gives the bounds of the space, and |
ysr@777 | 171 | // the "clear_space" argument should be true unless the memory in "mr" is |
ysr@777 | 172 | // known to be zeroed. |
jmasa@698 | 173 | virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
ysr@777 | 174 | |
ysr@777 | 175 | // The "clear" method must be called on a region that may have |
ysr@777 | 176 | // had allocation performed in it, but is now to be considered empty. |
jmasa@698 | 177 | virtual void clear(bool mangle_space); |
duke@435 | 178 | |
duke@435 | 179 | // For detecting GC bugs. Should only be called at GC boundaries, since |
duke@435 | 180 | // some unused space may be used as scratch space during GC's. |
duke@435 | 181 | // Default implementation does nothing. We also call this when expanding |
duke@435 | 182 | // a space to satisfy an allocation request. See bug #4668531 |
duke@435 | 183 | virtual void mangle_unused_area() {} |
jmasa@698 | 184 | virtual void mangle_unused_area_complete() {} |
duke@435 | 185 | virtual void mangle_region(MemRegion mr) {} |
duke@435 | 186 | |
duke@435 | 187 | // Testers |
duke@435 | 188 | bool is_empty() const { return used() == 0; } |
duke@435 | 189 | bool not_empty() const { return used() > 0; } |
duke@435 | 190 | |
duke@435 | 191 | // Returns true iff the given the space contains the |
duke@435 | 192 | // given address as part of an allocated object. For |
duke@435 | 193 | // ceratin kinds of spaces, this might be a potentially |
duke@435 | 194 | // expensive operation. To prevent performance problems |
duke@435 | 195 | // on account of its inadvertent use in product jvm's, |
duke@435 | 196 | // we restrict its use to assertion checks only. |
stefank@3335 | 197 | virtual bool is_in(const void* p) const = 0; |
duke@435 | 198 | |
duke@435 | 199 | // Returns true iff the given reserved memory of the space contains the |
duke@435 | 200 | // given address. |
duke@435 | 201 | bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } |
duke@435 | 202 | |
duke@435 | 203 | // Returns true iff the given block is not allocated. |
duke@435 | 204 | virtual bool is_free_block(const HeapWord* p) const = 0; |
duke@435 | 205 | |
duke@435 | 206 | // Test whether p is double-aligned |
duke@435 | 207 | static bool is_aligned(void* p) { |
duke@435 | 208 | return ((intptr_t)p & (sizeof(double)-1)) == 0; |
duke@435 | 209 | } |
duke@435 | 210 | |
duke@435 | 211 | // Size computations. Sizes are in bytes. |
duke@435 | 212 | size_t capacity() const { return byte_size(bottom(), end()); } |
duke@435 | 213 | virtual size_t used() const = 0; |
duke@435 | 214 | virtual size_t free() const = 0; |
duke@435 | 215 | |
duke@435 | 216 | // Iterate over all the ref-containing fields of all objects in the |
duke@435 | 217 | // space, calling "cl.do_oop" on each. Fields in objects allocated by |
duke@435 | 218 | // applications of the closure are not included in the iteration. |
coleenp@4037 | 219 | virtual void oop_iterate(ExtendedOopClosure* cl); |
duke@435 | 220 | |
duke@435 | 221 | // Same as above, restricted to the intersection of a memory region and |
duke@435 | 222 | // the space. Fields in objects allocated by applications of the closure |
duke@435 | 223 | // are not included in the iteration. |
coleenp@4037 | 224 | virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0; |
duke@435 | 225 | |
duke@435 | 226 | // Iterate over all objects in the space, calling "cl.do_object" on |
duke@435 | 227 | // each. Objects allocated by applications of the closure are not |
duke@435 | 228 | // included in the iteration. |
duke@435 | 229 | virtual void object_iterate(ObjectClosure* blk) = 0; |
jmasa@952 | 230 | // Similar to object_iterate() except only iterates over |
jmasa@952 | 231 | // objects whose internal references point to objects in the space. |
jmasa@952 | 232 | virtual void safe_object_iterate(ObjectClosure* blk) = 0; |
duke@435 | 233 | |
duke@435 | 234 | // Iterate over all objects that intersect with mr, calling "cl->do_object" |
duke@435 | 235 | // on each. There is an exception to this: if this closure has already |
duke@435 | 236 | // been invoked on an object, it may skip such objects in some cases. This is |
duke@435 | 237 | // Most likely to happen in an "upwards" (ascending address) iteration of |
duke@435 | 238 | // MemRegions. |
duke@435 | 239 | virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
duke@435 | 240 | |
duke@435 | 241 | // Iterate over as many initialized objects in the space as possible, |
duke@435 | 242 | // calling "cl.do_object_careful" on each. Return NULL if all objects |
duke@435 | 243 | // in the space (at the start of the iteration) were iterated over. |
duke@435 | 244 | // Return an address indicating the extent of the iteration in the |
duke@435 | 245 | // event that the iteration had to return because of finding an |
duke@435 | 246 | // uninitialized object in the space, or if the closure "cl" |
duke@435 | 247 | // signalled early termination. |
duke@435 | 248 | virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl); |
duke@435 | 249 | virtual HeapWord* object_iterate_careful_m(MemRegion mr, |
duke@435 | 250 | ObjectClosureCareful* cl); |
duke@435 | 251 | |
duke@435 | 252 | // Create and return a new dirty card to oop closure. Can be |
duke@435 | 253 | // overriden to return the appropriate type of closure |
duke@435 | 254 | // depending on the type of space in which the closure will |
duke@435 | 255 | // operate. ResourceArea allocated. |
coleenp@4037 | 256 | virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, |
duke@435 | 257 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 258 | HeapWord* boundary = NULL); |
duke@435 | 259 | |
duke@435 | 260 | // If "p" is in the space, returns the address of the start of the |
duke@435 | 261 | // "block" that contains "p". We say "block" instead of "object" since |
duke@435 | 262 | // some heaps may not pack objects densely; a chunk may either be an |
duke@435 | 263 | // object or a non-object. If "p" is not in the space, return NULL. |
ysr@777 | 264 | virtual HeapWord* block_start_const(const void* p) const = 0; |
ysr@777 | 265 | |
ysr@777 | 266 | // The non-const version may have benevolent side effects on the data |
ysr@777 | 267 | // structure supporting these calls, possibly speeding up future calls. |
ysr@777 | 268 | // The default implementation, however, is simply to call the const |
ysr@777 | 269 | // version. |
ysr@777 | 270 | inline virtual HeapWord* block_start(const void* p); |
duke@435 | 271 | |
duke@435 | 272 | // Requires "addr" to be the start of a chunk, and returns its size. |
duke@435 | 273 | // "addr + size" is required to be the start of a new chunk, or the end |
duke@435 | 274 | // of the active area of the heap. |
duke@435 | 275 | virtual size_t block_size(const HeapWord* addr) const = 0; |
duke@435 | 276 | |
duke@435 | 277 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
duke@435 | 278 | // the block is an object. |
duke@435 | 279 | virtual bool block_is_obj(const HeapWord* addr) const = 0; |
duke@435 | 280 | |
duke@435 | 281 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
duke@435 | 282 | // the block is an object and the object is alive. |
duke@435 | 283 | virtual bool obj_is_alive(const HeapWord* addr) const; |
duke@435 | 284 | |
duke@435 | 285 | // Allocation (return NULL if full). Assumes the caller has established |
duke@435 | 286 | // mutually exclusive access to the space. |
duke@435 | 287 | virtual HeapWord* allocate(size_t word_size) = 0; |
duke@435 | 288 | |
duke@435 | 289 | // Allocation (return NULL if full). Enforces mutual exclusion internally. |
duke@435 | 290 | virtual HeapWord* par_allocate(size_t word_size) = 0; |
duke@435 | 291 | |
duke@435 | 292 | // Returns true if this object has been allocated since a |
duke@435 | 293 | // generation's "save_marks" call. |
duke@435 | 294 | virtual bool obj_allocated_since_save_marks(const oop obj) const = 0; |
duke@435 | 295 | |
duke@435 | 296 | // Mark-sweep-compact support: all spaces can update pointers to objects |
duke@435 | 297 | // moving as a part of compaction. |
duke@435 | 298 | virtual void adjust_pointers(); |
duke@435 | 299 | |
duke@435 | 300 | // PrintHeapAtGC support |
duke@435 | 301 | virtual void print() const; |
duke@435 | 302 | virtual void print_on(outputStream* st) const; |
duke@435 | 303 | virtual void print_short() const; |
duke@435 | 304 | virtual void print_short_on(outputStream* st) const; |
duke@435 | 305 | |
duke@435 | 306 | |
duke@435 | 307 | // Accessor for parallel sequential tasks. |
duke@435 | 308 | SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; } |
duke@435 | 309 | |
duke@435 | 310 | // IF "this" is a ContiguousSpace, return it, else return NULL. |
duke@435 | 311 | virtual ContiguousSpace* toContiguousSpace() { |
duke@435 | 312 | return NULL; |
duke@435 | 313 | } |
duke@435 | 314 | |
duke@435 | 315 | // Debugging |
brutisso@3711 | 316 | virtual void verify() const = 0; |
duke@435 | 317 | }; |
duke@435 | 318 | |
duke@435 | 319 | // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an |
duke@435 | 320 | // OopClosure to (the addresses of) all the ref-containing fields that could |
duke@435 | 321 | // be modified by virtue of the given MemRegion being dirty. (Note that |
duke@435 | 322 | // because of the imprecise nature of the write barrier, this may iterate |
duke@435 | 323 | // over oops beyond the region.) |
duke@435 | 324 | // This base type for dirty card to oop closures handles memory regions |
duke@435 | 325 | // in non-contiguous spaces with no boundaries, and should be sub-classed |
duke@435 | 326 | // to support other space types. See ContiguousDCTOC for a sub-class |
duke@435 | 327 | // that works with ContiguousSpaces. |
duke@435 | 328 | |
duke@435 | 329 | class DirtyCardToOopClosure: public MemRegionClosureRO { |
duke@435 | 330 | protected: |
coleenp@4037 | 331 | ExtendedOopClosure* _cl; |
duke@435 | 332 | Space* _sp; |
duke@435 | 333 | CardTableModRefBS::PrecisionStyle _precision; |
duke@435 | 334 | HeapWord* _boundary; // If non-NULL, process only non-NULL oops |
duke@435 | 335 | // pointing below boundary. |
ysr@777 | 336 | HeapWord* _min_done; // ObjHeadPreciseArray precision requires |
duke@435 | 337 | // a downwards traversal; this is the |
duke@435 | 338 | // lowest location already done (or, |
duke@435 | 339 | // alternatively, the lowest address that |
duke@435 | 340 | // shouldn't be done again. NULL means infinity.) |
duke@435 | 341 | NOT_PRODUCT(HeapWord* _last_bottom;) |
ysr@777 | 342 | NOT_PRODUCT(HeapWord* _last_explicit_min_done;) |
duke@435 | 343 | |
duke@435 | 344 | // Get the actual top of the area on which the closure will |
duke@435 | 345 | // operate, given where the top is assumed to be (the end of the |
duke@435 | 346 | // memory region passed to do_MemRegion) and where the object |
duke@435 | 347 | // at the top is assumed to start. For example, an object may |
duke@435 | 348 | // start at the top but actually extend past the assumed top, |
duke@435 | 349 | // in which case the top becomes the end of the object. |
duke@435 | 350 | virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); |
duke@435 | 351 | |
duke@435 | 352 | // Walk the given memory region from bottom to (actual) top |
duke@435 | 353 | // looking for objects and applying the oop closure (_cl) to |
duke@435 | 354 | // them. The base implementation of this treats the area as |
duke@435 | 355 | // blocks, where a block may or may not be an object. Sub- |
duke@435 | 356 | // classes should override this to provide more accurate |
duke@435 | 357 | // or possibly more efficient walking. |
duke@435 | 358 | virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); |
duke@435 | 359 | |
duke@435 | 360 | public: |
coleenp@4037 | 361 | DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl, |
duke@435 | 362 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 363 | HeapWord* boundary) : |
duke@435 | 364 | _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), |
duke@435 | 365 | _min_done(NULL) { |
ysr@777 | 366 | NOT_PRODUCT(_last_bottom = NULL); |
ysr@777 | 367 | NOT_PRODUCT(_last_explicit_min_done = NULL); |
duke@435 | 368 | } |
duke@435 | 369 | |
duke@435 | 370 | void do_MemRegion(MemRegion mr); |
duke@435 | 371 | |
duke@435 | 372 | void set_min_done(HeapWord* min_done) { |
duke@435 | 373 | _min_done = min_done; |
ysr@777 | 374 | NOT_PRODUCT(_last_explicit_min_done = _min_done); |
duke@435 | 375 | } |
duke@435 | 376 | #ifndef PRODUCT |
duke@435 | 377 | void set_last_bottom(HeapWord* last_bottom) { |
duke@435 | 378 | _last_bottom = last_bottom; |
duke@435 | 379 | } |
duke@435 | 380 | #endif |
duke@435 | 381 | }; |
duke@435 | 382 | |
duke@435 | 383 | // A structure to represent a point at which objects are being copied |
duke@435 | 384 | // during compaction. |
duke@435 | 385 | class CompactPoint : public StackObj { |
duke@435 | 386 | public: |
duke@435 | 387 | Generation* gen; |
duke@435 | 388 | CompactibleSpace* space; |
duke@435 | 389 | HeapWord* threshold; |
duke@435 | 390 | CompactPoint(Generation* _gen, CompactibleSpace* _space, |
duke@435 | 391 | HeapWord* _threshold) : |
duke@435 | 392 | gen(_gen), space(_space), threshold(_threshold) {} |
duke@435 | 393 | }; |
duke@435 | 394 | |
duke@435 | 395 | |
duke@435 | 396 | // A space that supports compaction operations. This is usually, but not |
duke@435 | 397 | // necessarily, a space that is normally contiguous. But, for example, a |
duke@435 | 398 | // free-list-based space whose normal collection is a mark-sweep without |
duke@435 | 399 | // compaction could still support compaction in full GC's. |
duke@435 | 400 | |
duke@435 | 401 | class CompactibleSpace: public Space { |
duke@435 | 402 | friend class VMStructs; |
duke@435 | 403 | friend class CompactibleFreeListSpace; |
duke@435 | 404 | private: |
duke@435 | 405 | HeapWord* _compaction_top; |
duke@435 | 406 | CompactibleSpace* _next_compaction_space; |
duke@435 | 407 | |
duke@435 | 408 | public: |
ysr@782 | 409 | CompactibleSpace() : |
ysr@782 | 410 | _compaction_top(NULL), _next_compaction_space(NULL) {} |
ysr@782 | 411 | |
jmasa@698 | 412 | virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
tonyp@791 | 413 | virtual void clear(bool mangle_space); |
duke@435 | 414 | |
duke@435 | 415 | // Used temporarily during a compaction phase to hold the value |
duke@435 | 416 | // top should have when compaction is complete. |
duke@435 | 417 | HeapWord* compaction_top() const { return _compaction_top; } |
duke@435 | 418 | |
duke@435 | 419 | void set_compaction_top(HeapWord* value) { |
duke@435 | 420 | assert(value == NULL || (value >= bottom() && value <= end()), |
duke@435 | 421 | "should point inside space"); |
duke@435 | 422 | _compaction_top = value; |
duke@435 | 423 | } |
duke@435 | 424 | |
duke@435 | 425 | // Perform operations on the space needed after a compaction |
duke@435 | 426 | // has been performed. |
duke@435 | 427 | virtual void reset_after_compaction() {} |
duke@435 | 428 | |
duke@435 | 429 | // Returns the next space (in the current generation) to be compacted in |
duke@435 | 430 | // the global compaction order. Also is used to select the next |
duke@435 | 431 | // space into which to compact. |
duke@435 | 432 | |
duke@435 | 433 | virtual CompactibleSpace* next_compaction_space() const { |
duke@435 | 434 | return _next_compaction_space; |
duke@435 | 435 | } |
duke@435 | 436 | |
duke@435 | 437 | void set_next_compaction_space(CompactibleSpace* csp) { |
duke@435 | 438 | _next_compaction_space = csp; |
duke@435 | 439 | } |
duke@435 | 440 | |
duke@435 | 441 | // MarkSweep support phase2 |
duke@435 | 442 | |
duke@435 | 443 | // Start the process of compaction of the current space: compute |
duke@435 | 444 | // post-compaction addresses, and insert forwarding pointers. The fields |
duke@435 | 445 | // "cp->gen" and "cp->compaction_space" are the generation and space into |
duke@435 | 446 | // which we are currently compacting. This call updates "cp" as necessary, |
duke@435 | 447 | // and leaves the "compaction_top" of the final value of |
duke@435 | 448 | // "cp->compaction_space" up-to-date. Offset tables may be updated in |
duke@435 | 449 | // this phase as if the final copy had occurred; if so, "cp->threshold" |
duke@435 | 450 | // indicates when the next such action should be taken. |
duke@435 | 451 | virtual void prepare_for_compaction(CompactPoint* cp); |
duke@435 | 452 | // MarkSweep support phase3 |
duke@435 | 453 | virtual void adjust_pointers(); |
duke@435 | 454 | // MarkSweep support phase4 |
duke@435 | 455 | virtual void compact(); |
duke@435 | 456 | |
duke@435 | 457 | // The maximum percentage of objects that can be dead in the compacted |
duke@435 | 458 | // live part of a compacted space ("deadwood" support.) |
jcoomes@873 | 459 | virtual size_t allowed_dead_ratio() const { return 0; }; |
duke@435 | 460 | |
duke@435 | 461 | // Some contiguous spaces may maintain some data structures that should |
duke@435 | 462 | // be updated whenever an allocation crosses a boundary. This function |
duke@435 | 463 | // returns the first such boundary. |
duke@435 | 464 | // (The default implementation returns the end of the space, so the |
duke@435 | 465 | // boundary is never crossed.) |
duke@435 | 466 | virtual HeapWord* initialize_threshold() { return end(); } |
duke@435 | 467 | |
duke@435 | 468 | // "q" is an object of the given "size" that should be forwarded; |
duke@435 | 469 | // "cp" names the generation ("gen") and containing "this" (which must |
duke@435 | 470 | // also equal "cp->space"). "compact_top" is where in "this" the |
duke@435 | 471 | // next object should be forwarded to. If there is room in "this" for |
duke@435 | 472 | // the object, insert an appropriate forwarding pointer in "q". |
duke@435 | 473 | // If not, go to the next compaction space (there must |
duke@435 | 474 | // be one, since compaction must succeed -- we go to the first space of |
duke@435 | 475 | // the previous generation if necessary, updating "cp"), reset compact_top |
duke@435 | 476 | // and then forward. In either case, returns the new value of "compact_top". |
duke@435 | 477 | // If the forwarding crosses "cp->threshold", invokes the "cross_threhold" |
duke@435 | 478 | // function of the then-current compaction space, and updates "cp->threshold |
duke@435 | 479 | // accordingly". |
duke@435 | 480 | virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, |
duke@435 | 481 | HeapWord* compact_top); |
duke@435 | 482 | |
duke@435 | 483 | // Return a size with adjusments as required of the space. |
duke@435 | 484 | virtual size_t adjust_object_size_v(size_t size) const { return size; } |
duke@435 | 485 | |
duke@435 | 486 | protected: |
duke@435 | 487 | // Used during compaction. |
duke@435 | 488 | HeapWord* _first_dead; |
duke@435 | 489 | HeapWord* _end_of_live; |
duke@435 | 490 | |
duke@435 | 491 | // Minimum size of a free block. |
duke@435 | 492 | virtual size_t minimum_free_block_size() const = 0; |
duke@435 | 493 | |
duke@435 | 494 | // This the function is invoked when an allocation of an object covering |
duke@435 | 495 | // "start" to "end occurs crosses the threshold; returns the next |
duke@435 | 496 | // threshold. (The default implementation does nothing.) |
duke@435 | 497 | virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) { |
duke@435 | 498 | return end(); |
duke@435 | 499 | } |
duke@435 | 500 | |
duke@435 | 501 | // Requires "allowed_deadspace_words > 0", that "q" is the start of a |
duke@435 | 502 | // free block of the given "word_len", and that "q", were it an object, |
duke@435 | 503 | // would not move if forwared. If the size allows, fill the free |
duke@435 | 504 | // block with an object, to prevent excessive compaction. Returns "true" |
duke@435 | 505 | // iff the free region was made deadspace, and modifies |
duke@435 | 506 | // "allowed_deadspace_words" to reflect the number of available deadspace |
duke@435 | 507 | // words remaining after this operation. |
duke@435 | 508 | bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, |
duke@435 | 509 | size_t word_len); |
duke@435 | 510 | }; |
duke@435 | 511 | |
duke@435 | 512 | #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ |
duke@435 | 513 | /* Compute the new addresses for the live objects and store it in the mark \ |
duke@435 | 514 | * Used by universe::mark_sweep_phase2() \ |
duke@435 | 515 | */ \ |
duke@435 | 516 | HeapWord* compact_top; /* This is where we are currently compacting to. */ \ |
duke@435 | 517 | \ |
duke@435 | 518 | /* We're sure to be here before any objects are compacted into this \ |
duke@435 | 519 | * space, so this is a good time to initialize this: \ |
duke@435 | 520 | */ \ |
duke@435 | 521 | set_compaction_top(bottom()); \ |
duke@435 | 522 | \ |
duke@435 | 523 | if (cp->space == NULL) { \ |
duke@435 | 524 | assert(cp->gen != NULL, "need a generation"); \ |
duke@435 | 525 | assert(cp->threshold == NULL, "just checking"); \ |
duke@435 | 526 | assert(cp->gen->first_compaction_space() == this, "just checking"); \ |
duke@435 | 527 | cp->space = cp->gen->first_compaction_space(); \ |
duke@435 | 528 | compact_top = cp->space->bottom(); \ |
duke@435 | 529 | cp->space->set_compaction_top(compact_top); \ |
duke@435 | 530 | cp->threshold = cp->space->initialize_threshold(); \ |
duke@435 | 531 | } else { \ |
duke@435 | 532 | compact_top = cp->space->compaction_top(); \ |
duke@435 | 533 | } \ |
duke@435 | 534 | \ |
duke@435 | 535 | /* We allow some amount of garbage towards the bottom of the space, so \ |
duke@435 | 536 | * we don't start compacting before there is a significant gain to be made.\ |
duke@435 | 537 | * Occasionally, we want to ensure a full compaction, which is determined \ |
duke@435 | 538 | * by the MarkSweepAlwaysCompactCount parameter. \ |
duke@435 | 539 | */ \ |
tschatzl@5119 | 540 | uint invocations = MarkSweep::total_invocations(); \ |
tschatzl@5119 | 541 | bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ |
duke@435 | 542 | \ |
duke@435 | 543 | size_t allowed_deadspace = 0; \ |
duke@435 | 544 | if (skip_dead) { \ |
jcoomes@873 | 545 | const size_t ratio = allowed_dead_ratio(); \ |
duke@435 | 546 | allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ |
duke@435 | 547 | } \ |
duke@435 | 548 | \ |
duke@435 | 549 | HeapWord* q = bottom(); \ |
duke@435 | 550 | HeapWord* t = scan_limit(); \ |
duke@435 | 551 | \ |
duke@435 | 552 | HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ |
duke@435 | 553 | live object. */ \ |
duke@435 | 554 | HeapWord* first_dead = end();/* The first dead object. */ \ |
duke@435 | 555 | LiveRange* liveRange = NULL; /* The current live range, recorded in the \ |
duke@435 | 556 | first header of preceding free area. */ \ |
duke@435 | 557 | _first_dead = first_dead; \ |
duke@435 | 558 | \ |
duke@435 | 559 | const intx interval = PrefetchScanIntervalInBytes; \ |
duke@435 | 560 | \ |
duke@435 | 561 | while (q < t) { \ |
duke@435 | 562 | assert(!block_is_obj(q) || \ |
duke@435 | 563 | oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ |
duke@435 | 564 | oop(q)->mark()->has_bias_pattern(), \ |
duke@435 | 565 | "these are the only valid states during a mark sweep"); \ |
duke@435 | 566 | if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ |
duke@435 | 567 | /* prefetch beyond q */ \ |
duke@435 | 568 | Prefetch::write(q, interval); \ |
ysr@777 | 569 | size_t size = block_size(q); \ |
duke@435 | 570 | compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ |
duke@435 | 571 | q += size; \ |
duke@435 | 572 | end_of_live = q; \ |
duke@435 | 573 | } else { \ |
duke@435 | 574 | /* run over all the contiguous dead objects */ \ |
duke@435 | 575 | HeapWord* end = q; \ |
duke@435 | 576 | do { \ |
duke@435 | 577 | /* prefetch beyond end */ \ |
duke@435 | 578 | Prefetch::write(end, interval); \ |
duke@435 | 579 | end += block_size(end); \ |
duke@435 | 580 | } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ |
duke@435 | 581 | \ |
duke@435 | 582 | /* see if we might want to pretend this object is alive so that \ |
duke@435 | 583 | * we don't have to compact quite as often. \ |
duke@435 | 584 | */ \ |
duke@435 | 585 | if (allowed_deadspace > 0 && q == compact_top) { \ |
duke@435 | 586 | size_t sz = pointer_delta(end, q); \ |
duke@435 | 587 | if (insert_deadspace(allowed_deadspace, q, sz)) { \ |
duke@435 | 588 | compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ |
duke@435 | 589 | q = end; \ |
duke@435 | 590 | end_of_live = end; \ |
duke@435 | 591 | continue; \ |
duke@435 | 592 | } \ |
duke@435 | 593 | } \ |
duke@435 | 594 | \ |
duke@435 | 595 | /* otherwise, it really is a free region. */ \ |
duke@435 | 596 | \ |
duke@435 | 597 | /* for the previous LiveRange, record the end of the live objects. */ \ |
duke@435 | 598 | if (liveRange) { \ |
duke@435 | 599 | liveRange->set_end(q); \ |
duke@435 | 600 | } \ |
duke@435 | 601 | \ |
duke@435 | 602 | /* record the current LiveRange object. \ |
duke@435 | 603 | * liveRange->start() is overlaid on the mark word. \ |
duke@435 | 604 | */ \ |
duke@435 | 605 | liveRange = (LiveRange*)q; \ |
duke@435 | 606 | liveRange->set_start(end); \ |
duke@435 | 607 | liveRange->set_end(end); \ |
duke@435 | 608 | \ |
duke@435 | 609 | /* see if this is the first dead region. */ \ |
duke@435 | 610 | if (q < first_dead) { \ |
duke@435 | 611 | first_dead = q; \ |
duke@435 | 612 | } \ |
duke@435 | 613 | \ |
duke@435 | 614 | /* move on to the next object */ \ |
duke@435 | 615 | q = end; \ |
duke@435 | 616 | } \ |
duke@435 | 617 | } \ |
duke@435 | 618 | \ |
duke@435 | 619 | assert(q == t, "just checking"); \ |
duke@435 | 620 | if (liveRange != NULL) { \ |
duke@435 | 621 | liveRange->set_end(q); \ |
duke@435 | 622 | } \ |
duke@435 | 623 | _end_of_live = end_of_live; \ |
duke@435 | 624 | if (end_of_live < first_dead) { \ |
duke@435 | 625 | first_dead = end_of_live; \ |
duke@435 | 626 | } \ |
duke@435 | 627 | _first_dead = first_dead; \ |
duke@435 | 628 | \ |
duke@435 | 629 | /* save the compaction_top of the compaction space. */ \ |
duke@435 | 630 | cp->space->set_compaction_top(compact_top); \ |
duke@435 | 631 | } |
duke@435 | 632 | |
ysr@777 | 633 | #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ |
ysr@777 | 634 | /* adjust all the interior pointers to point at the new locations of objects \ |
ysr@777 | 635 | * Used by MarkSweep::mark_sweep_phase3() */ \ |
duke@435 | 636 | \ |
ysr@777 | 637 | HeapWord* q = bottom(); \ |
ysr@777 | 638 | HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ |
duke@435 | 639 | \ |
ysr@777 | 640 | assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ |
duke@435 | 641 | \ |
ysr@777 | 642 | if (q < t && _first_dead > q && \ |
duke@435 | 643 | !oop(q)->is_gc_marked()) { \ |
duke@435 | 644 | /* we have a chunk of the space which hasn't moved and we've \ |
duke@435 | 645 | * reinitialized the mark word during the previous pass, so we can't \ |
ysr@777 | 646 | * use is_gc_marked for the traversal. */ \ |
duke@435 | 647 | HeapWord* end = _first_dead; \ |
duke@435 | 648 | \ |
ysr@777 | 649 | while (q < end) { \ |
ysr@777 | 650 | /* I originally tried to conjoin "block_start(q) == q" to the \ |
ysr@777 | 651 | * assertion below, but that doesn't work, because you can't \ |
ysr@777 | 652 | * accurately traverse previous objects to get to the current one \ |
coleenp@4037 | 653 | * after their pointers have been \ |
ysr@777 | 654 | * updated, until the actual compaction is done. dld, 4/00 */ \ |
ysr@777 | 655 | assert(block_is_obj(q), \ |
ysr@777 | 656 | "should be at block boundaries, and should be looking at objs"); \ |
duke@435 | 657 | \ |
ysr@777 | 658 | /* point all the oops to the new location */ \ |
ysr@777 | 659 | size_t size = oop(q)->adjust_pointers(); \ |
ysr@777 | 660 | size = adjust_obj_size(size); \ |
duke@435 | 661 | \ |
coleenp@548 | 662 | q += size; \ |
ysr@777 | 663 | } \ |
duke@435 | 664 | \ |
ysr@777 | 665 | if (_first_dead == t) { \ |
ysr@777 | 666 | q = t; \ |
ysr@777 | 667 | } else { \ |
ysr@777 | 668 | /* $$$ This is funky. Using this to read the previously written \ |
ysr@777 | 669 | * LiveRange. See also use below. */ \ |
duke@435 | 670 | q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ |
ysr@777 | 671 | } \ |
ysr@777 | 672 | } \ |
duke@435 | 673 | \ |
duke@435 | 674 | const intx interval = PrefetchScanIntervalInBytes; \ |
duke@435 | 675 | \ |
ysr@777 | 676 | debug_only(HeapWord* prev_q = NULL); \ |
ysr@777 | 677 | while (q < t) { \ |
ysr@777 | 678 | /* prefetch beyond q */ \ |
duke@435 | 679 | Prefetch::write(q, interval); \ |
ysr@777 | 680 | if (oop(q)->is_gc_marked()) { \ |
ysr@777 | 681 | /* q is alive */ \ |
ysr@777 | 682 | /* point all the oops to the new location */ \ |
ysr@777 | 683 | size_t size = oop(q)->adjust_pointers(); \ |
ysr@777 | 684 | size = adjust_obj_size(size); \ |
ysr@777 | 685 | debug_only(prev_q = q); \ |
duke@435 | 686 | q += size; \ |
tonyp@791 | 687 | } else { \ |
tonyp@791 | 688 | /* q is not a live object, so its mark should point at the next \ |
tonyp@791 | 689 | * live object */ \ |
tonyp@791 | 690 | debug_only(prev_q = q); \ |
tonyp@791 | 691 | q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
tonyp@791 | 692 | assert(q > prev_q, "we should be moving forward through memory"); \ |
tonyp@791 | 693 | } \ |
tonyp@791 | 694 | } \ |
duke@435 | 695 | \ |
tonyp@791 | 696 | assert(q == t, "just checking"); \ |
duke@435 | 697 | } |
duke@435 | 698 | |
tonyp@791 | 699 | #define SCAN_AND_COMPACT(obj_size) { \ |
duke@435 | 700 | /* Copy all live objects to their new location \ |
tonyp@791 | 701 | * Used by MarkSweep::mark_sweep_phase4() */ \ |
duke@435 | 702 | \ |
tonyp@791 | 703 | HeapWord* q = bottom(); \ |
tonyp@791 | 704 | HeapWord* const t = _end_of_live; \ |
tonyp@791 | 705 | debug_only(HeapWord* prev_q = NULL); \ |
duke@435 | 706 | \ |
tonyp@791 | 707 | if (q < t && _first_dead > q && \ |
duke@435 | 708 | !oop(q)->is_gc_marked()) { \ |
tonyp@791 | 709 | debug_only( \ |
coleenp@548 | 710 | /* we have a chunk of the space which hasn't moved and we've reinitialized \ |
coleenp@548 | 711 | * the mark word during the previous pass, so we can't use is_gc_marked for \ |
coleenp@548 | 712 | * the traversal. */ \ |
tonyp@791 | 713 | HeapWord* const end = _first_dead; \ |
tonyp@791 | 714 | \ |
tonyp@791 | 715 | while (q < end) { \ |
coleenp@548 | 716 | size_t size = obj_size(q); \ |
coleenp@548 | 717 | assert(!oop(q)->is_gc_marked(), \ |
coleenp@548 | 718 | "should be unmarked (special dense prefix handling)"); \ |
tonyp@791 | 719 | debug_only(prev_q = q); \ |
coleenp@548 | 720 | q += size; \ |
tonyp@791 | 721 | } \ |
tonyp@791 | 722 | ) /* debug_only */ \ |
duke@435 | 723 | \ |
tonyp@791 | 724 | if (_first_dead == t) { \ |
tonyp@791 | 725 | q = t; \ |
tonyp@791 | 726 | } else { \ |
tonyp@791 | 727 | /* $$$ Funky */ \ |
tonyp@791 | 728 | q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ |
tonyp@791 | 729 | } \ |
tonyp@791 | 730 | } \ |
tonyp@791 | 731 | \ |
tonyp@791 | 732 | const intx scan_interval = PrefetchScanIntervalInBytes; \ |
tonyp@791 | 733 | const intx copy_interval = PrefetchCopyIntervalInBytes; \ |
tonyp@791 | 734 | while (q < t) { \ |
tonyp@791 | 735 | if (!oop(q)->is_gc_marked()) { \ |
tonyp@791 | 736 | /* mark is pointer to next marked oop */ \ |
tonyp@791 | 737 | debug_only(prev_q = q); \ |
tonyp@791 | 738 | q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ |
tonyp@791 | 739 | assert(q > prev_q, "we should be moving forward through memory"); \ |
tonyp@791 | 740 | } else { \ |
tonyp@791 | 741 | /* prefetch beyond q */ \ |
duke@435 | 742 | Prefetch::read(q, scan_interval); \ |
duke@435 | 743 | \ |
duke@435 | 744 | /* size and destination */ \ |
duke@435 | 745 | size_t size = obj_size(q); \ |
duke@435 | 746 | HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ |
duke@435 | 747 | \ |
tonyp@791 | 748 | /* prefetch beyond compaction_top */ \ |
duke@435 | 749 | Prefetch::write(compaction_top, copy_interval); \ |
duke@435 | 750 | \ |
tonyp@791 | 751 | /* copy object and reinit its mark */ \ |
tonyp@791 | 752 | assert(q != compaction_top, "everything in this pass should be moving"); \ |
tonyp@791 | 753 | Copy::aligned_conjoint_words(q, compaction_top, size); \ |
tonyp@791 | 754 | oop(compaction_top)->init_mark(); \ |
tonyp@791 | 755 | assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ |
duke@435 | 756 | \ |
tonyp@791 | 757 | debug_only(prev_q = q); \ |
duke@435 | 758 | q += size; \ |
tonyp@791 | 759 | } \ |
tonyp@791 | 760 | } \ |
duke@435 | 761 | \ |
ysr@777 | 762 | /* Let's remember if we were empty before we did the compaction. */ \ |
ysr@777 | 763 | bool was_empty = used_region().is_empty(); \ |
duke@435 | 764 | /* Reset space after compaction is complete */ \ |
tonyp@791 | 765 | reset_after_compaction(); \ |
duke@435 | 766 | /* We do this clear, below, since it has overloaded meanings for some */ \ |
duke@435 | 767 | /* space subtypes. For example, OffsetTableContigSpace's that were */ \ |
duke@435 | 768 | /* compacted into will have had their offset table thresholds updated */ \ |
duke@435 | 769 | /* continuously, but those that weren't need to have their thresholds */ \ |
duke@435 | 770 | /* re-initialized. Also mangles unused area for debugging. */ \ |
ysr@777 | 771 | if (used_region().is_empty()) { \ |
tonyp@791 | 772 | if (!was_empty) clear(SpaceDecorator::Mangle); \ |
duke@435 | 773 | } else { \ |
duke@435 | 774 | if (ZapUnusedHeapArea) mangle_unused_area(); \ |
duke@435 | 775 | } \ |
duke@435 | 776 | } |
duke@435 | 777 | |
jmasa@698 | 778 | class GenSpaceMangler; |
jmasa@698 | 779 | |
duke@435 | 780 | // A space in which the free area is contiguous. It therefore supports |
duke@435 | 781 | // faster allocation, and compaction. |
duke@435 | 782 | class ContiguousSpace: public CompactibleSpace { |
duke@435 | 783 | friend class OneContigSpaceCardGeneration; |
duke@435 | 784 | friend class VMStructs; |
duke@435 | 785 | protected: |
duke@435 | 786 | HeapWord* _top; |
duke@435 | 787 | HeapWord* _concurrent_iteration_safe_limit; |
jmasa@698 | 788 | // A helper for mangling the unused area of the space in debug builds. |
jmasa@698 | 789 | GenSpaceMangler* _mangler; |
jmasa@698 | 790 | |
jmasa@698 | 791 | GenSpaceMangler* mangler() { return _mangler; } |
duke@435 | 792 | |
duke@435 | 793 | // Allocation helpers (return NULL if full). |
duke@435 | 794 | inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); |
duke@435 | 795 | inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); |
duke@435 | 796 | |
duke@435 | 797 | public: |
jmasa@698 | 798 | ContiguousSpace(); |
jmasa@698 | 799 | ~ContiguousSpace(); |
jmasa@698 | 800 | |
jmasa@698 | 801 | virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); |
tonyp@791 | 802 | virtual void clear(bool mangle_space); |
duke@435 | 803 | |
duke@435 | 804 | // Accessors |
duke@435 | 805 | HeapWord* top() const { return _top; } |
duke@435 | 806 | void set_top(HeapWord* value) { _top = value; } |
duke@435 | 807 | |
ysr@777 | 808 | virtual void set_saved_mark() { _saved_mark_word = top(); } |
ysr@777 | 809 | void reset_saved_mark() { _saved_mark_word = bottom(); } |
duke@435 | 810 | |
duke@435 | 811 | WaterMark bottom_mark() { return WaterMark(this, bottom()); } |
duke@435 | 812 | WaterMark top_mark() { return WaterMark(this, top()); } |
duke@435 | 813 | WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); } |
duke@435 | 814 | bool saved_mark_at_top() const { return saved_mark_word() == top(); } |
duke@435 | 815 | |
jmasa@698 | 816 | // In debug mode mangle (write it with a particular bit |
jmasa@698 | 817 | // pattern) the unused part of a space. |
jmasa@698 | 818 | |
jmasa@698 | 819 | // Used to save the an address in a space for later use during mangling. |
jmasa@698 | 820 | void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; |
jmasa@698 | 821 | // Used to save the space's current top for later use during mangling. |
jmasa@698 | 822 | void set_top_for_allocations() PRODUCT_RETURN; |
jmasa@698 | 823 | |
jmasa@698 | 824 | // Mangle regions in the space from the current top up to the |
jmasa@698 | 825 | // previously mangled part of the space. |
jmasa@698 | 826 | void mangle_unused_area() PRODUCT_RETURN; |
jmasa@698 | 827 | // Mangle [top, end) |
jmasa@698 | 828 | void mangle_unused_area_complete() PRODUCT_RETURN; |
jmasa@698 | 829 | // Mangle the given MemRegion. |
jmasa@698 | 830 | void mangle_region(MemRegion mr) PRODUCT_RETURN; |
jmasa@698 | 831 | |
jmasa@698 | 832 | // Do some sparse checking on the area that should have been mangled. |
jmasa@698 | 833 | void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; |
jmasa@698 | 834 | // Check the complete area that should have been mangled. |
jmasa@698 | 835 | // This code may be NULL depending on the macro DEBUG_MANGLING. |
jmasa@698 | 836 | void check_mangled_unused_area_complete() PRODUCT_RETURN; |
duke@435 | 837 | |
duke@435 | 838 | // Size computations: sizes in bytes. |
duke@435 | 839 | size_t capacity() const { return byte_size(bottom(), end()); } |
duke@435 | 840 | size_t used() const { return byte_size(bottom(), top()); } |
duke@435 | 841 | size_t free() const { return byte_size(top(), end()); } |
duke@435 | 842 | |
duke@435 | 843 | // Override from space. |
duke@435 | 844 | bool is_in(const void* p) const; |
duke@435 | 845 | |
duke@435 | 846 | virtual bool is_free_block(const HeapWord* p) const; |
duke@435 | 847 | |
duke@435 | 848 | // In a contiguous space we have a more obvious bound on what parts |
duke@435 | 849 | // contain objects. |
duke@435 | 850 | MemRegion used_region() const { return MemRegion(bottom(), top()); } |
duke@435 | 851 | |
duke@435 | 852 | MemRegion used_region_at_save_marks() const { |
duke@435 | 853 | return MemRegion(bottom(), saved_mark_word()); |
duke@435 | 854 | } |
duke@435 | 855 | |
duke@435 | 856 | // Allocation (return NULL if full) |
duke@435 | 857 | virtual HeapWord* allocate(size_t word_size); |
duke@435 | 858 | virtual HeapWord* par_allocate(size_t word_size); |
duke@435 | 859 | |
duke@435 | 860 | virtual bool obj_allocated_since_save_marks(const oop obj) const { |
duke@435 | 861 | return (HeapWord*)obj >= saved_mark_word(); |
duke@435 | 862 | } |
duke@435 | 863 | |
duke@435 | 864 | // Iteration |
coleenp@4037 | 865 | void oop_iterate(ExtendedOopClosure* cl); |
coleenp@4037 | 866 | void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); |
duke@435 | 867 | void object_iterate(ObjectClosure* blk); |
jmasa@952 | 868 | // For contiguous spaces this method will iterate safely over objects |
jmasa@952 | 869 | // in the space (i.e., between bottom and top) when at a safepoint. |
jmasa@952 | 870 | void safe_object_iterate(ObjectClosure* blk); |
duke@435 | 871 | void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
duke@435 | 872 | // iterates on objects up to the safe limit |
duke@435 | 873 | HeapWord* object_iterate_careful(ObjectClosureCareful* cl); |
stefank@3751 | 874 | HeapWord* concurrent_iteration_safe_limit() { |
stefank@3751 | 875 | assert(_concurrent_iteration_safe_limit <= top(), |
stefank@3751 | 876 | "_concurrent_iteration_safe_limit update missed"); |
stefank@3751 | 877 | return _concurrent_iteration_safe_limit; |
stefank@3751 | 878 | } |
duke@435 | 879 | // changes the safe limit, all objects from bottom() to the new |
duke@435 | 880 | // limit should be properly initialized |
stefank@3751 | 881 | void set_concurrent_iteration_safe_limit(HeapWord* new_limit) { |
stefank@3751 | 882 | assert(new_limit <= top(), "uninitialized objects in the safe range"); |
stefank@3751 | 883 | _concurrent_iteration_safe_limit = new_limit; |
stefank@3751 | 884 | } |
duke@435 | 885 | |
coleenp@4037 | 886 | |
jprovino@4542 | 887 | #if INCLUDE_ALL_GCS |
duke@435 | 888 | // In support of parallel oop_iterate. |
duke@435 | 889 | #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ |
duke@435 | 890 | void par_oop_iterate(MemRegion mr, OopClosureType* blk); |
duke@435 | 891 | |
duke@435 | 892 | ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) |
duke@435 | 893 | #undef ContigSpace_PAR_OOP_ITERATE_DECL |
jprovino@4542 | 894 | #endif // INCLUDE_ALL_GCS |
duke@435 | 895 | |
duke@435 | 896 | // Compaction support |
duke@435 | 897 | virtual void reset_after_compaction() { |
duke@435 | 898 | assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space"); |
duke@435 | 899 | set_top(compaction_top()); |
duke@435 | 900 | // set new iteration safe limit |
duke@435 | 901 | set_concurrent_iteration_safe_limit(compaction_top()); |
duke@435 | 902 | } |
duke@435 | 903 | virtual size_t minimum_free_block_size() const { return 0; } |
duke@435 | 904 | |
duke@435 | 905 | // Override. |
coleenp@4037 | 906 | DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, |
duke@435 | 907 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 908 | HeapWord* boundary = NULL); |
duke@435 | 909 | |
duke@435 | 910 | // Apply "blk->do_oop" to the addresses of all reference fields in objects |
duke@435 | 911 | // starting with the _saved_mark_word, which was noted during a generation's |
duke@435 | 912 | // save_marks and is required to denote the head of an object. |
duke@435 | 913 | // Fields in objects allocated by applications of the closure |
duke@435 | 914 | // *are* included in the iteration. |
duke@435 | 915 | // Updates _saved_mark_word to point to just after the last object |
duke@435 | 916 | // iterated over. |
duke@435 | 917 | #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
duke@435 | 918 | void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); |
duke@435 | 919 | |
duke@435 | 920 | ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) |
duke@435 | 921 | #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL |
duke@435 | 922 | |
duke@435 | 923 | // Same as object_iterate, but starting from "mark", which is required |
duke@435 | 924 | // to denote the start of an object. Objects allocated by |
duke@435 | 925 | // applications of the closure *are* included in the iteration. |
duke@435 | 926 | virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); |
duke@435 | 927 | |
duke@435 | 928 | // Very inefficient implementation. |
ysr@777 | 929 | virtual HeapWord* block_start_const(const void* p) const; |
duke@435 | 930 | size_t block_size(const HeapWord* p) const; |
duke@435 | 931 | // If a block is in the allocated area, it is an object. |
duke@435 | 932 | bool block_is_obj(const HeapWord* p) const { return p < top(); } |
duke@435 | 933 | |
duke@435 | 934 | // Addresses for inlined allocation |
duke@435 | 935 | HeapWord** top_addr() { return &_top; } |
duke@435 | 936 | HeapWord** end_addr() { return &_end; } |
duke@435 | 937 | |
duke@435 | 938 | // Overrides for more efficient compaction support. |
duke@435 | 939 | void prepare_for_compaction(CompactPoint* cp); |
duke@435 | 940 | |
duke@435 | 941 | // PrintHeapAtGC support. |
duke@435 | 942 | virtual void print_on(outputStream* st) const; |
duke@435 | 943 | |
duke@435 | 944 | // Checked dynamic downcasts. |
duke@435 | 945 | virtual ContiguousSpace* toContiguousSpace() { |
duke@435 | 946 | return this; |
duke@435 | 947 | } |
duke@435 | 948 | |
duke@435 | 949 | // Debugging |
brutisso@3711 | 950 | virtual void verify() const; |
duke@435 | 951 | |
duke@435 | 952 | // Used to increase collection frequency. "factor" of 0 means entire |
duke@435 | 953 | // space. |
duke@435 | 954 | void allocate_temporary_filler(int factor); |
duke@435 | 955 | |
duke@435 | 956 | }; |
duke@435 | 957 | |
duke@435 | 958 | |
duke@435 | 959 | // A dirty card to oop closure that does filtering. |
duke@435 | 960 | // It knows how to filter out objects that are outside of the _boundary. |
duke@435 | 961 | class Filtering_DCTOC : public DirtyCardToOopClosure { |
duke@435 | 962 | protected: |
duke@435 | 963 | // Override. |
duke@435 | 964 | void walk_mem_region(MemRegion mr, |
duke@435 | 965 | HeapWord* bottom, HeapWord* top); |
duke@435 | 966 | |
duke@435 | 967 | // Walk the given memory region, from bottom to top, applying |
duke@435 | 968 | // the given oop closure to (possibly) all objects found. The |
duke@435 | 969 | // given oop closure may or may not be the same as the oop |
duke@435 | 970 | // closure with which this closure was created, as it may |
duke@435 | 971 | // be a filtering closure which makes use of the _boundary. |
duke@435 | 972 | // We offer two signatures, so the FilteringClosure static type is |
duke@435 | 973 | // apparent. |
duke@435 | 974 | virtual void walk_mem_region_with_cl(MemRegion mr, |
duke@435 | 975 | HeapWord* bottom, HeapWord* top, |
coleenp@4037 | 976 | ExtendedOopClosure* cl) = 0; |
duke@435 | 977 | virtual void walk_mem_region_with_cl(MemRegion mr, |
duke@435 | 978 | HeapWord* bottom, HeapWord* top, |
duke@435 | 979 | FilteringClosure* cl) = 0; |
duke@435 | 980 | |
duke@435 | 981 | public: |
coleenp@4037 | 982 | Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl, |
duke@435 | 983 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 984 | HeapWord* boundary) : |
duke@435 | 985 | DirtyCardToOopClosure(sp, cl, precision, boundary) {} |
duke@435 | 986 | }; |
duke@435 | 987 | |
duke@435 | 988 | // A dirty card to oop closure for contiguous spaces |
duke@435 | 989 | // (ContiguousSpace and sub-classes). |
duke@435 | 990 | // It is a FilteringClosure, as defined above, and it knows: |
duke@435 | 991 | // |
duke@435 | 992 | // 1. That the actual top of any area in a memory region |
duke@435 | 993 | // contained by the space is bounded by the end of the contiguous |
duke@435 | 994 | // region of the space. |
duke@435 | 995 | // 2. That the space is really made up of objects and not just |
duke@435 | 996 | // blocks. |
duke@435 | 997 | |
duke@435 | 998 | class ContiguousSpaceDCTOC : public Filtering_DCTOC { |
duke@435 | 999 | protected: |
duke@435 | 1000 | // Overrides. |
duke@435 | 1001 | HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj); |
duke@435 | 1002 | |
duke@435 | 1003 | virtual void walk_mem_region_with_cl(MemRegion mr, |
duke@435 | 1004 | HeapWord* bottom, HeapWord* top, |
coleenp@4037 | 1005 | ExtendedOopClosure* cl); |
duke@435 | 1006 | virtual void walk_mem_region_with_cl(MemRegion mr, |
duke@435 | 1007 | HeapWord* bottom, HeapWord* top, |
duke@435 | 1008 | FilteringClosure* cl); |
duke@435 | 1009 | |
duke@435 | 1010 | public: |
coleenp@4037 | 1011 | ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl, |
duke@435 | 1012 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 1013 | HeapWord* boundary) : |
duke@435 | 1014 | Filtering_DCTOC(sp, cl, precision, boundary) |
duke@435 | 1015 | {} |
duke@435 | 1016 | }; |
duke@435 | 1017 | |
duke@435 | 1018 | |
duke@435 | 1019 | // Class EdenSpace describes eden-space in new generation. |
duke@435 | 1020 | |
duke@435 | 1021 | class DefNewGeneration; |
duke@435 | 1022 | |
duke@435 | 1023 | class EdenSpace : public ContiguousSpace { |
duke@435 | 1024 | friend class VMStructs; |
duke@435 | 1025 | private: |
duke@435 | 1026 | DefNewGeneration* _gen; |
duke@435 | 1027 | |
duke@435 | 1028 | // _soft_end is used as a soft limit on allocation. As soft limits are |
duke@435 | 1029 | // reached, the slow-path allocation code can invoke other actions and then |
duke@435 | 1030 | // adjust _soft_end up to a new soft limit or to end(). |
duke@435 | 1031 | HeapWord* _soft_end; |
duke@435 | 1032 | |
duke@435 | 1033 | public: |
ysr@782 | 1034 | EdenSpace(DefNewGeneration* gen) : |
ysr@782 | 1035 | _gen(gen), _soft_end(NULL) {} |
duke@435 | 1036 | |
duke@435 | 1037 | // Get/set just the 'soft' limit. |
duke@435 | 1038 | HeapWord* soft_end() { return _soft_end; } |
duke@435 | 1039 | HeapWord** soft_end_addr() { return &_soft_end; } |
duke@435 | 1040 | void set_soft_end(HeapWord* value) { _soft_end = value; } |
duke@435 | 1041 | |
duke@435 | 1042 | // Override. |
jmasa@698 | 1043 | void clear(bool mangle_space); |
duke@435 | 1044 | |
duke@435 | 1045 | // Set both the 'hard' and 'soft' limits (_end and _soft_end). |
duke@435 | 1046 | void set_end(HeapWord* value) { |
duke@435 | 1047 | set_soft_end(value); |
duke@435 | 1048 | ContiguousSpace::set_end(value); |
duke@435 | 1049 | } |
duke@435 | 1050 | |
duke@435 | 1051 | // Allocation (return NULL if full) |
duke@435 | 1052 | HeapWord* allocate(size_t word_size); |
duke@435 | 1053 | HeapWord* par_allocate(size_t word_size); |
duke@435 | 1054 | }; |
duke@435 | 1055 | |
duke@435 | 1056 | // Class ConcEdenSpace extends EdenSpace for the sake of safe |
duke@435 | 1057 | // allocation while soft-end is being modified concurrently |
duke@435 | 1058 | |
duke@435 | 1059 | class ConcEdenSpace : public EdenSpace { |
duke@435 | 1060 | public: |
duke@435 | 1061 | ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { } |
duke@435 | 1062 | |
duke@435 | 1063 | // Allocation (return NULL if full) |
duke@435 | 1064 | HeapWord* par_allocate(size_t word_size); |
duke@435 | 1065 | }; |
duke@435 | 1066 | |
duke@435 | 1067 | |
duke@435 | 1068 | // A ContigSpace that Supports an efficient "block_start" operation via |
duke@435 | 1069 | // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with |
duke@435 | 1070 | // other spaces.) This is the abstract base class for old generation |
coleenp@4037 | 1071 | // (tenured) spaces. |
duke@435 | 1072 | |
duke@435 | 1073 | class OffsetTableContigSpace: public ContiguousSpace { |
duke@435 | 1074 | friend class VMStructs; |
duke@435 | 1075 | protected: |
duke@435 | 1076 | BlockOffsetArrayContigSpace _offsets; |
duke@435 | 1077 | Mutex _par_alloc_lock; |
duke@435 | 1078 | |
duke@435 | 1079 | public: |
duke@435 | 1080 | // Constructor |
duke@435 | 1081 | OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, |
duke@435 | 1082 | MemRegion mr); |
duke@435 | 1083 | |
duke@435 | 1084 | void set_bottom(HeapWord* value); |
duke@435 | 1085 | void set_end(HeapWord* value); |
duke@435 | 1086 | |
jmasa@698 | 1087 | void clear(bool mangle_space); |
duke@435 | 1088 | |
ysr@777 | 1089 | inline HeapWord* block_start_const(const void* p) const; |
duke@435 | 1090 | |
duke@435 | 1091 | // Add offset table update. |
duke@435 | 1092 | virtual inline HeapWord* allocate(size_t word_size); |
duke@435 | 1093 | inline HeapWord* par_allocate(size_t word_size); |
duke@435 | 1094 | |
duke@435 | 1095 | // MarkSweep support phase3 |
duke@435 | 1096 | virtual HeapWord* initialize_threshold(); |
duke@435 | 1097 | virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); |
duke@435 | 1098 | |
duke@435 | 1099 | virtual void print_on(outputStream* st) const; |
duke@435 | 1100 | |
duke@435 | 1101 | // Debugging |
brutisso@3711 | 1102 | void verify() const; |
duke@435 | 1103 | }; |
duke@435 | 1104 | |
duke@435 | 1105 | |
duke@435 | 1106 | // Class TenuredSpace is used by TenuredGeneration |
duke@435 | 1107 | |
duke@435 | 1108 | class TenuredSpace: public OffsetTableContigSpace { |
duke@435 | 1109 | friend class VMStructs; |
duke@435 | 1110 | protected: |
duke@435 | 1111 | // Mark sweep support |
jcoomes@873 | 1112 | size_t allowed_dead_ratio() const; |
duke@435 | 1113 | public: |
duke@435 | 1114 | // Constructor |
duke@435 | 1115 | TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray, |
duke@435 | 1116 | MemRegion mr) : |
duke@435 | 1117 | OffsetTableContigSpace(sharedOffsetArray, mr) {} |
duke@435 | 1118 | }; |
stefank@2314 | 1119 | #endif // SHARE_VM_MEMORY_SPACE_HPP |