src/share/vm/memory/space.hpp

Tue, 30 Oct 2012 10:23:55 -0700

author
jmasa
date
Tue, 30 Oct 2012 10:23:55 -0700
changeset 4234
3fadc0e8cffe
parent 4037
da91efe96a93
child 4384
b735136e0d82
permissions
-rw-r--r--

8000988: VM deadlock when running btree006 on windows-i586
Reviewed-by: johnc, jcoomes, ysr

duke@435 1 /*
brutisso@3711 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_SPACE_HPP
stefank@2314 27
stefank@2314 28 #include "memory/allocation.hpp"
stefank@2314 29 #include "memory/blockOffsetTable.hpp"
stefank@2314 30 #include "memory/cardTableModRefBS.hpp"
stefank@2314 31 #include "memory/iterator.hpp"
stefank@2314 32 #include "memory/memRegion.hpp"
stefank@2314 33 #include "memory/watermark.hpp"
stefank@2314 34 #include "oops/markOop.hpp"
stefank@2314 35 #include "runtime/mutexLocker.hpp"
stefank@2314 36 #include "runtime/prefetch.hpp"
stefank@2314 37 #include "utilities/workgroup.hpp"
stefank@2314 38 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 39 # include "os_linux.inline.hpp"
stefank@2314 40 #endif
stefank@2314 41 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 42 # include "os_solaris.inline.hpp"
stefank@2314 43 #endif
stefank@2314 44 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 45 # include "os_windows.inline.hpp"
stefank@2314 46 #endif
never@3156 47 #ifdef TARGET_OS_FAMILY_bsd
never@3156 48 # include "os_bsd.inline.hpp"
never@3156 49 #endif
stefank@2314 50
duke@435 51 // A space is an abstraction for the "storage units" backing
duke@435 52 // up the generation abstraction. It includes specific
duke@435 53 // implementations for keeping track of free and used space,
duke@435 54 // for iterating over objects and free blocks, etc.
duke@435 55
duke@435 56 // Here's the Space hierarchy:
duke@435 57 //
duke@435 58 // - Space -- an asbtract base class describing a heap area
duke@435 59 // - CompactibleSpace -- a space supporting compaction
duke@435 60 // - CompactibleFreeListSpace -- (used for CMS generation)
duke@435 61 // - ContiguousSpace -- a compactible space in which all free space
duke@435 62 // is contiguous
duke@435 63 // - EdenSpace -- contiguous space used as nursery
duke@435 64 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
duke@435 65 // - OffsetTableContigSpace -- contiguous space with a block offset array
duke@435 66 // that allows "fast" block_start calls
duke@435 67 // - TenuredSpace -- (used for TenuredGeneration)
duke@435 68
duke@435 69 // Forward decls.
duke@435 70 class Space;
duke@435 71 class BlockOffsetArray;
duke@435 72 class BlockOffsetArrayContigSpace;
duke@435 73 class Generation;
duke@435 74 class CompactibleSpace;
duke@435 75 class BlockOffsetTable;
duke@435 76 class GenRemSet;
duke@435 77 class CardTableRS;
duke@435 78 class DirtyCardToOopClosure;
duke@435 79
duke@435 80 // An oop closure that is circumscribed by a filtering memory region.
coleenp@4037 81 class SpaceMemRegionOopsIterClosure: public ExtendedOopClosure {
coleenp@548 82 private:
coleenp@4037 83 ExtendedOopClosure* _cl;
coleenp@548 84 MemRegion _mr;
coleenp@548 85 protected:
coleenp@548 86 template <class T> void do_oop_work(T* p) {
coleenp@548 87 if (_mr.contains(p)) {
coleenp@548 88 _cl->do_oop(p);
duke@435 89 }
duke@435 90 }
coleenp@548 91 public:
coleenp@4037 92 SpaceMemRegionOopsIterClosure(ExtendedOopClosure* cl, MemRegion mr):
coleenp@548 93 _cl(cl), _mr(mr) {}
coleenp@548 94 virtual void do_oop(oop* p);
coleenp@548 95 virtual void do_oop(narrowOop* p);
coleenp@4037 96 virtual bool do_metadata() {
coleenp@4037 97 // _cl is of type ExtendedOopClosure instead of OopClosure, so that we can check this.
coleenp@4037 98 assert(!_cl->do_metadata(), "I've checked all call paths, this shouldn't happen.");
coleenp@4037 99 return false;
coleenp@4037 100 }
coleenp@4037 101 virtual void do_klass(Klass* k) { ShouldNotReachHere(); }
coleenp@4037 102 virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
duke@435 103 };
duke@435 104
duke@435 105 // A Space describes a heap area. Class Space is an abstract
duke@435 106 // base class.
duke@435 107 //
duke@435 108 // Space supports allocation, size computation and GC support is provided.
duke@435 109 //
duke@435 110 // Invariant: bottom() and end() are on page_size boundaries and
duke@435 111 // bottom() <= top() <= end()
duke@435 112 // top() is inclusive and end() is exclusive.
duke@435 113
zgu@3900 114 class Space: public CHeapObj<mtGC> {
duke@435 115 friend class VMStructs;
duke@435 116 protected:
duke@435 117 HeapWord* _bottom;
duke@435 118 HeapWord* _end;
duke@435 119
duke@435 120 // Used in support of save_marks()
duke@435 121 HeapWord* _saved_mark_word;
duke@435 122
duke@435 123 MemRegionClosure* _preconsumptionDirtyCardClosure;
duke@435 124
duke@435 125 // A sequential tasks done structure. This supports
duke@435 126 // parallel GC, where we have threads dynamically
duke@435 127 // claiming sub-tasks from a larger parallel task.
duke@435 128 SequentialSubTasksDone _par_seq_tasks;
duke@435 129
duke@435 130 Space():
duke@435 131 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
duke@435 132
duke@435 133 public:
duke@435 134 // Accessors
duke@435 135 HeapWord* bottom() const { return _bottom; }
duke@435 136 HeapWord* end() const { return _end; }
duke@435 137 virtual void set_bottom(HeapWord* value) { _bottom = value; }
duke@435 138 virtual void set_end(HeapWord* value) { _end = value; }
duke@435 139
ysr@777 140 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
ysr@1280 141
duke@435 142 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
duke@435 143
duke@435 144 MemRegionClosure* preconsumptionDirtyCardClosure() const {
duke@435 145 return _preconsumptionDirtyCardClosure;
duke@435 146 }
duke@435 147 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
duke@435 148 _preconsumptionDirtyCardClosure = cl;
duke@435 149 }
duke@435 150
duke@435 151 // Returns a subregion of the space containing all the objects in
duke@435 152 // the space.
duke@435 153 virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
duke@435 154
duke@435 155 // Returns a region that is guaranteed to contain (at least) all objects
duke@435 156 // allocated at the time of the last call to "save_marks". If the space
duke@435 157 // initializes its DirtyCardToOopClosure's specifying the "contig" option
duke@435 158 // (that is, if the space is contiguous), then this region must contain only
duke@435 159 // such objects: the memregion will be from the bottom of the region to the
duke@435 160 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
duke@435 161 // the space must distiguish between objects in the region allocated before
duke@435 162 // and after the call to save marks.
duke@435 163 virtual MemRegion used_region_at_save_marks() const {
duke@435 164 return MemRegion(bottom(), saved_mark_word());
duke@435 165 }
duke@435 166
ysr@777 167 // Initialization.
ysr@777 168 // "initialize" should be called once on a space, before it is used for
ysr@777 169 // any purpose. The "mr" arguments gives the bounds of the space, and
ysr@777 170 // the "clear_space" argument should be true unless the memory in "mr" is
ysr@777 171 // known to be zeroed.
jmasa@698 172 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
ysr@777 173
ysr@777 174 // The "clear" method must be called on a region that may have
ysr@777 175 // had allocation performed in it, but is now to be considered empty.
jmasa@698 176 virtual void clear(bool mangle_space);
duke@435 177
duke@435 178 // For detecting GC bugs. Should only be called at GC boundaries, since
duke@435 179 // some unused space may be used as scratch space during GC's.
duke@435 180 // Default implementation does nothing. We also call this when expanding
duke@435 181 // a space to satisfy an allocation request. See bug #4668531
duke@435 182 virtual void mangle_unused_area() {}
jmasa@698 183 virtual void mangle_unused_area_complete() {}
duke@435 184 virtual void mangle_region(MemRegion mr) {}
duke@435 185
duke@435 186 // Testers
duke@435 187 bool is_empty() const { return used() == 0; }
duke@435 188 bool not_empty() const { return used() > 0; }
duke@435 189
duke@435 190 // Returns true iff the given the space contains the
duke@435 191 // given address as part of an allocated object. For
duke@435 192 // ceratin kinds of spaces, this might be a potentially
duke@435 193 // expensive operation. To prevent performance problems
duke@435 194 // on account of its inadvertent use in product jvm's,
duke@435 195 // we restrict its use to assertion checks only.
stefank@3335 196 virtual bool is_in(const void* p) const = 0;
duke@435 197
duke@435 198 // Returns true iff the given reserved memory of the space contains the
duke@435 199 // given address.
duke@435 200 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
duke@435 201
duke@435 202 // Returns true iff the given block is not allocated.
duke@435 203 virtual bool is_free_block(const HeapWord* p) const = 0;
duke@435 204
duke@435 205 // Test whether p is double-aligned
duke@435 206 static bool is_aligned(void* p) {
duke@435 207 return ((intptr_t)p & (sizeof(double)-1)) == 0;
duke@435 208 }
duke@435 209
duke@435 210 // Size computations. Sizes are in bytes.
duke@435 211 size_t capacity() const { return byte_size(bottom(), end()); }
duke@435 212 virtual size_t used() const = 0;
duke@435 213 virtual size_t free() const = 0;
duke@435 214
duke@435 215 // Iterate over all the ref-containing fields of all objects in the
duke@435 216 // space, calling "cl.do_oop" on each. Fields in objects allocated by
duke@435 217 // applications of the closure are not included in the iteration.
coleenp@4037 218 virtual void oop_iterate(ExtendedOopClosure* cl);
duke@435 219
duke@435 220 // Same as above, restricted to the intersection of a memory region and
duke@435 221 // the space. Fields in objects allocated by applications of the closure
duke@435 222 // are not included in the iteration.
coleenp@4037 223 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
duke@435 224
duke@435 225 // Iterate over all objects in the space, calling "cl.do_object" on
duke@435 226 // each. Objects allocated by applications of the closure are not
duke@435 227 // included in the iteration.
duke@435 228 virtual void object_iterate(ObjectClosure* blk) = 0;
jmasa@952 229 // Similar to object_iterate() except only iterates over
jmasa@952 230 // objects whose internal references point to objects in the space.
jmasa@952 231 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
duke@435 232
duke@435 233 // Iterate over all objects that intersect with mr, calling "cl->do_object"
duke@435 234 // on each. There is an exception to this: if this closure has already
duke@435 235 // been invoked on an object, it may skip such objects in some cases. This is
duke@435 236 // Most likely to happen in an "upwards" (ascending address) iteration of
duke@435 237 // MemRegions.
duke@435 238 virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
duke@435 239
duke@435 240 // Iterate over as many initialized objects in the space as possible,
duke@435 241 // calling "cl.do_object_careful" on each. Return NULL if all objects
duke@435 242 // in the space (at the start of the iteration) were iterated over.
duke@435 243 // Return an address indicating the extent of the iteration in the
duke@435 244 // event that the iteration had to return because of finding an
duke@435 245 // uninitialized object in the space, or if the closure "cl"
duke@435 246 // signalled early termination.
duke@435 247 virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
duke@435 248 virtual HeapWord* object_iterate_careful_m(MemRegion mr,
duke@435 249 ObjectClosureCareful* cl);
duke@435 250
duke@435 251 // Create and return a new dirty card to oop closure. Can be
duke@435 252 // overriden to return the appropriate type of closure
duke@435 253 // depending on the type of space in which the closure will
duke@435 254 // operate. ResourceArea allocated.
coleenp@4037 255 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
duke@435 256 CardTableModRefBS::PrecisionStyle precision,
duke@435 257 HeapWord* boundary = NULL);
duke@435 258
duke@435 259 // If "p" is in the space, returns the address of the start of the
duke@435 260 // "block" that contains "p". We say "block" instead of "object" since
duke@435 261 // some heaps may not pack objects densely; a chunk may either be an
duke@435 262 // object or a non-object. If "p" is not in the space, return NULL.
ysr@777 263 virtual HeapWord* block_start_const(const void* p) const = 0;
ysr@777 264
ysr@777 265 // The non-const version may have benevolent side effects on the data
ysr@777 266 // structure supporting these calls, possibly speeding up future calls.
ysr@777 267 // The default implementation, however, is simply to call the const
ysr@777 268 // version.
ysr@777 269 inline virtual HeapWord* block_start(const void* p);
duke@435 270
duke@435 271 // Requires "addr" to be the start of a chunk, and returns its size.
duke@435 272 // "addr + size" is required to be the start of a new chunk, or the end
duke@435 273 // of the active area of the heap.
duke@435 274 virtual size_t block_size(const HeapWord* addr) const = 0;
duke@435 275
duke@435 276 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@435 277 // the block is an object.
duke@435 278 virtual bool block_is_obj(const HeapWord* addr) const = 0;
duke@435 279
duke@435 280 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@435 281 // the block is an object and the object is alive.
duke@435 282 virtual bool obj_is_alive(const HeapWord* addr) const;
duke@435 283
duke@435 284 // Allocation (return NULL if full). Assumes the caller has established
duke@435 285 // mutually exclusive access to the space.
duke@435 286 virtual HeapWord* allocate(size_t word_size) = 0;
duke@435 287
duke@435 288 // Allocation (return NULL if full). Enforces mutual exclusion internally.
duke@435 289 virtual HeapWord* par_allocate(size_t word_size) = 0;
duke@435 290
duke@435 291 // Returns true if this object has been allocated since a
duke@435 292 // generation's "save_marks" call.
duke@435 293 virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
duke@435 294
duke@435 295 // Mark-sweep-compact support: all spaces can update pointers to objects
duke@435 296 // moving as a part of compaction.
duke@435 297 virtual void adjust_pointers();
duke@435 298
duke@435 299 // PrintHeapAtGC support
duke@435 300 virtual void print() const;
duke@435 301 virtual void print_on(outputStream* st) const;
duke@435 302 virtual void print_short() const;
duke@435 303 virtual void print_short_on(outputStream* st) const;
duke@435 304
duke@435 305
duke@435 306 // Accessor for parallel sequential tasks.
duke@435 307 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
duke@435 308
duke@435 309 // IF "this" is a ContiguousSpace, return it, else return NULL.
duke@435 310 virtual ContiguousSpace* toContiguousSpace() {
duke@435 311 return NULL;
duke@435 312 }
duke@435 313
duke@435 314 // Debugging
brutisso@3711 315 virtual void verify() const = 0;
duke@435 316 };
duke@435 317
duke@435 318 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
duke@435 319 // OopClosure to (the addresses of) all the ref-containing fields that could
duke@435 320 // be modified by virtue of the given MemRegion being dirty. (Note that
duke@435 321 // because of the imprecise nature of the write barrier, this may iterate
duke@435 322 // over oops beyond the region.)
duke@435 323 // This base type for dirty card to oop closures handles memory regions
duke@435 324 // in non-contiguous spaces with no boundaries, and should be sub-classed
duke@435 325 // to support other space types. See ContiguousDCTOC for a sub-class
duke@435 326 // that works with ContiguousSpaces.
duke@435 327
duke@435 328 class DirtyCardToOopClosure: public MemRegionClosureRO {
duke@435 329 protected:
coleenp@4037 330 ExtendedOopClosure* _cl;
duke@435 331 Space* _sp;
duke@435 332 CardTableModRefBS::PrecisionStyle _precision;
duke@435 333 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
duke@435 334 // pointing below boundary.
ysr@777 335 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
duke@435 336 // a downwards traversal; this is the
duke@435 337 // lowest location already done (or,
duke@435 338 // alternatively, the lowest address that
duke@435 339 // shouldn't be done again. NULL means infinity.)
duke@435 340 NOT_PRODUCT(HeapWord* _last_bottom;)
ysr@777 341 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
duke@435 342
duke@435 343 // Get the actual top of the area on which the closure will
duke@435 344 // operate, given where the top is assumed to be (the end of the
duke@435 345 // memory region passed to do_MemRegion) and where the object
duke@435 346 // at the top is assumed to start. For example, an object may
duke@435 347 // start at the top but actually extend past the assumed top,
duke@435 348 // in which case the top becomes the end of the object.
duke@435 349 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
duke@435 350
duke@435 351 // Walk the given memory region from bottom to (actual) top
duke@435 352 // looking for objects and applying the oop closure (_cl) to
duke@435 353 // them. The base implementation of this treats the area as
duke@435 354 // blocks, where a block may or may not be an object. Sub-
duke@435 355 // classes should override this to provide more accurate
duke@435 356 // or possibly more efficient walking.
duke@435 357 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
duke@435 358
duke@435 359 public:
coleenp@4037 360 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
duke@435 361 CardTableModRefBS::PrecisionStyle precision,
duke@435 362 HeapWord* boundary) :
duke@435 363 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
duke@435 364 _min_done(NULL) {
ysr@777 365 NOT_PRODUCT(_last_bottom = NULL);
ysr@777 366 NOT_PRODUCT(_last_explicit_min_done = NULL);
duke@435 367 }
duke@435 368
duke@435 369 void do_MemRegion(MemRegion mr);
duke@435 370
duke@435 371 void set_min_done(HeapWord* min_done) {
duke@435 372 _min_done = min_done;
ysr@777 373 NOT_PRODUCT(_last_explicit_min_done = _min_done);
duke@435 374 }
duke@435 375 #ifndef PRODUCT
duke@435 376 void set_last_bottom(HeapWord* last_bottom) {
duke@435 377 _last_bottom = last_bottom;
duke@435 378 }
duke@435 379 #endif
duke@435 380 };
duke@435 381
duke@435 382 // A structure to represent a point at which objects are being copied
duke@435 383 // during compaction.
duke@435 384 class CompactPoint : public StackObj {
duke@435 385 public:
duke@435 386 Generation* gen;
duke@435 387 CompactibleSpace* space;
duke@435 388 HeapWord* threshold;
duke@435 389 CompactPoint(Generation* _gen, CompactibleSpace* _space,
duke@435 390 HeapWord* _threshold) :
duke@435 391 gen(_gen), space(_space), threshold(_threshold) {}
duke@435 392 };
duke@435 393
duke@435 394
duke@435 395 // A space that supports compaction operations. This is usually, but not
duke@435 396 // necessarily, a space that is normally contiguous. But, for example, a
duke@435 397 // free-list-based space whose normal collection is a mark-sweep without
duke@435 398 // compaction could still support compaction in full GC's.
duke@435 399
duke@435 400 class CompactibleSpace: public Space {
duke@435 401 friend class VMStructs;
duke@435 402 friend class CompactibleFreeListSpace;
duke@435 403 private:
duke@435 404 HeapWord* _compaction_top;
duke@435 405 CompactibleSpace* _next_compaction_space;
duke@435 406
duke@435 407 public:
ysr@782 408 CompactibleSpace() :
ysr@782 409 _compaction_top(NULL), _next_compaction_space(NULL) {}
ysr@782 410
jmasa@698 411 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 412 virtual void clear(bool mangle_space);
duke@435 413
duke@435 414 // Used temporarily during a compaction phase to hold the value
duke@435 415 // top should have when compaction is complete.
duke@435 416 HeapWord* compaction_top() const { return _compaction_top; }
duke@435 417
duke@435 418 void set_compaction_top(HeapWord* value) {
duke@435 419 assert(value == NULL || (value >= bottom() && value <= end()),
duke@435 420 "should point inside space");
duke@435 421 _compaction_top = value;
duke@435 422 }
duke@435 423
duke@435 424 // Perform operations on the space needed after a compaction
duke@435 425 // has been performed.
duke@435 426 virtual void reset_after_compaction() {}
duke@435 427
duke@435 428 // Returns the next space (in the current generation) to be compacted in
duke@435 429 // the global compaction order. Also is used to select the next
duke@435 430 // space into which to compact.
duke@435 431
duke@435 432 virtual CompactibleSpace* next_compaction_space() const {
duke@435 433 return _next_compaction_space;
duke@435 434 }
duke@435 435
duke@435 436 void set_next_compaction_space(CompactibleSpace* csp) {
duke@435 437 _next_compaction_space = csp;
duke@435 438 }
duke@435 439
duke@435 440 // MarkSweep support phase2
duke@435 441
duke@435 442 // Start the process of compaction of the current space: compute
duke@435 443 // post-compaction addresses, and insert forwarding pointers. The fields
duke@435 444 // "cp->gen" and "cp->compaction_space" are the generation and space into
duke@435 445 // which we are currently compacting. This call updates "cp" as necessary,
duke@435 446 // and leaves the "compaction_top" of the final value of
duke@435 447 // "cp->compaction_space" up-to-date. Offset tables may be updated in
duke@435 448 // this phase as if the final copy had occurred; if so, "cp->threshold"
duke@435 449 // indicates when the next such action should be taken.
duke@435 450 virtual void prepare_for_compaction(CompactPoint* cp);
duke@435 451 // MarkSweep support phase3
duke@435 452 virtual void adjust_pointers();
duke@435 453 // MarkSweep support phase4
duke@435 454 virtual void compact();
duke@435 455
duke@435 456 // The maximum percentage of objects that can be dead in the compacted
duke@435 457 // live part of a compacted space ("deadwood" support.)
jcoomes@873 458 virtual size_t allowed_dead_ratio() const { return 0; };
duke@435 459
duke@435 460 // Some contiguous spaces may maintain some data structures that should
duke@435 461 // be updated whenever an allocation crosses a boundary. This function
duke@435 462 // returns the first such boundary.
duke@435 463 // (The default implementation returns the end of the space, so the
duke@435 464 // boundary is never crossed.)
duke@435 465 virtual HeapWord* initialize_threshold() { return end(); }
duke@435 466
duke@435 467 // "q" is an object of the given "size" that should be forwarded;
duke@435 468 // "cp" names the generation ("gen") and containing "this" (which must
duke@435 469 // also equal "cp->space"). "compact_top" is where in "this" the
duke@435 470 // next object should be forwarded to. If there is room in "this" for
duke@435 471 // the object, insert an appropriate forwarding pointer in "q".
duke@435 472 // If not, go to the next compaction space (there must
duke@435 473 // be one, since compaction must succeed -- we go to the first space of
duke@435 474 // the previous generation if necessary, updating "cp"), reset compact_top
duke@435 475 // and then forward. In either case, returns the new value of "compact_top".
duke@435 476 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
duke@435 477 // function of the then-current compaction space, and updates "cp->threshold
duke@435 478 // accordingly".
duke@435 479 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
duke@435 480 HeapWord* compact_top);
duke@435 481
duke@435 482 // Return a size with adjusments as required of the space.
duke@435 483 virtual size_t adjust_object_size_v(size_t size) const { return size; }
duke@435 484
duke@435 485 protected:
duke@435 486 // Used during compaction.
duke@435 487 HeapWord* _first_dead;
duke@435 488 HeapWord* _end_of_live;
duke@435 489
duke@435 490 // Minimum size of a free block.
duke@435 491 virtual size_t minimum_free_block_size() const = 0;
duke@435 492
duke@435 493 // This the function is invoked when an allocation of an object covering
duke@435 494 // "start" to "end occurs crosses the threshold; returns the next
duke@435 495 // threshold. (The default implementation does nothing.)
duke@435 496 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 497 return end();
duke@435 498 }
duke@435 499
duke@435 500 // Requires "allowed_deadspace_words > 0", that "q" is the start of a
duke@435 501 // free block of the given "word_len", and that "q", were it an object,
duke@435 502 // would not move if forwared. If the size allows, fill the free
duke@435 503 // block with an object, to prevent excessive compaction. Returns "true"
duke@435 504 // iff the free region was made deadspace, and modifies
duke@435 505 // "allowed_deadspace_words" to reflect the number of available deadspace
duke@435 506 // words remaining after this operation.
duke@435 507 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
duke@435 508 size_t word_len);
duke@435 509 };
duke@435 510
duke@435 511 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
duke@435 512 /* Compute the new addresses for the live objects and store it in the mark \
duke@435 513 * Used by universe::mark_sweep_phase2() \
duke@435 514 */ \
duke@435 515 HeapWord* compact_top; /* This is where we are currently compacting to. */ \
duke@435 516 \
duke@435 517 /* We're sure to be here before any objects are compacted into this \
duke@435 518 * space, so this is a good time to initialize this: \
duke@435 519 */ \
duke@435 520 set_compaction_top(bottom()); \
duke@435 521 \
duke@435 522 if (cp->space == NULL) { \
duke@435 523 assert(cp->gen != NULL, "need a generation"); \
duke@435 524 assert(cp->threshold == NULL, "just checking"); \
duke@435 525 assert(cp->gen->first_compaction_space() == this, "just checking"); \
duke@435 526 cp->space = cp->gen->first_compaction_space(); \
duke@435 527 compact_top = cp->space->bottom(); \
duke@435 528 cp->space->set_compaction_top(compact_top); \
duke@435 529 cp->threshold = cp->space->initialize_threshold(); \
duke@435 530 } else { \
duke@435 531 compact_top = cp->space->compaction_top(); \
duke@435 532 } \
duke@435 533 \
duke@435 534 /* We allow some amount of garbage towards the bottom of the space, so \
duke@435 535 * we don't start compacting before there is a significant gain to be made.\
duke@435 536 * Occasionally, we want to ensure a full compaction, which is determined \
duke@435 537 * by the MarkSweepAlwaysCompactCount parameter. \
duke@435 538 */ \
coleenp@4037 539 int invocations = MarkSweep::total_invocations(); \
brutisso@3290 540 bool skip_dead = (MarkSweepAlwaysCompactCount < 1) \
brutisso@3290 541 ||((invocations % MarkSweepAlwaysCompactCount) != 0); \
duke@435 542 \
duke@435 543 size_t allowed_deadspace = 0; \
duke@435 544 if (skip_dead) { \
jcoomes@873 545 const size_t ratio = allowed_dead_ratio(); \
duke@435 546 allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \
duke@435 547 } \
duke@435 548 \
duke@435 549 HeapWord* q = bottom(); \
duke@435 550 HeapWord* t = scan_limit(); \
duke@435 551 \
duke@435 552 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \
duke@435 553 live object. */ \
duke@435 554 HeapWord* first_dead = end();/* The first dead object. */ \
duke@435 555 LiveRange* liveRange = NULL; /* The current live range, recorded in the \
duke@435 556 first header of preceding free area. */ \
duke@435 557 _first_dead = first_dead; \
duke@435 558 \
duke@435 559 const intx interval = PrefetchScanIntervalInBytes; \
duke@435 560 \
duke@435 561 while (q < t) { \
duke@435 562 assert(!block_is_obj(q) || \
duke@435 563 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \
duke@435 564 oop(q)->mark()->has_bias_pattern(), \
duke@435 565 "these are the only valid states during a mark sweep"); \
duke@435 566 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
duke@435 567 /* prefetch beyond q */ \
duke@435 568 Prefetch::write(q, interval); \
ysr@777 569 size_t size = block_size(q); \
duke@435 570 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
duke@435 571 q += size; \
duke@435 572 end_of_live = q; \
duke@435 573 } else { \
duke@435 574 /* run over all the contiguous dead objects */ \
duke@435 575 HeapWord* end = q; \
duke@435 576 do { \
duke@435 577 /* prefetch beyond end */ \
duke@435 578 Prefetch::write(end, interval); \
duke@435 579 end += block_size(end); \
duke@435 580 } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
duke@435 581 \
duke@435 582 /* see if we might want to pretend this object is alive so that \
duke@435 583 * we don't have to compact quite as often. \
duke@435 584 */ \
duke@435 585 if (allowed_deadspace > 0 && q == compact_top) { \
duke@435 586 size_t sz = pointer_delta(end, q); \
duke@435 587 if (insert_deadspace(allowed_deadspace, q, sz)) { \
duke@435 588 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \
duke@435 589 q = end; \
duke@435 590 end_of_live = end; \
duke@435 591 continue; \
duke@435 592 } \
duke@435 593 } \
duke@435 594 \
duke@435 595 /* otherwise, it really is a free region. */ \
duke@435 596 \
duke@435 597 /* for the previous LiveRange, record the end of the live objects. */ \
duke@435 598 if (liveRange) { \
duke@435 599 liveRange->set_end(q); \
duke@435 600 } \
duke@435 601 \
duke@435 602 /* record the current LiveRange object. \
duke@435 603 * liveRange->start() is overlaid on the mark word. \
duke@435 604 */ \
duke@435 605 liveRange = (LiveRange*)q; \
duke@435 606 liveRange->set_start(end); \
duke@435 607 liveRange->set_end(end); \
duke@435 608 \
duke@435 609 /* see if this is the first dead region. */ \
duke@435 610 if (q < first_dead) { \
duke@435 611 first_dead = q; \
duke@435 612 } \
duke@435 613 \
duke@435 614 /* move on to the next object */ \
duke@435 615 q = end; \
duke@435 616 } \
duke@435 617 } \
duke@435 618 \
duke@435 619 assert(q == t, "just checking"); \
duke@435 620 if (liveRange != NULL) { \
duke@435 621 liveRange->set_end(q); \
duke@435 622 } \
duke@435 623 _end_of_live = end_of_live; \
duke@435 624 if (end_of_live < first_dead) { \
duke@435 625 first_dead = end_of_live; \
duke@435 626 } \
duke@435 627 _first_dead = first_dead; \
duke@435 628 \
duke@435 629 /* save the compaction_top of the compaction space. */ \
duke@435 630 cp->space->set_compaction_top(compact_top); \
duke@435 631 }
duke@435 632
ysr@777 633 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
ysr@777 634 /* adjust all the interior pointers to point at the new locations of objects \
ysr@777 635 * Used by MarkSweep::mark_sweep_phase3() */ \
duke@435 636 \
ysr@777 637 HeapWord* q = bottom(); \
ysr@777 638 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
duke@435 639 \
ysr@777 640 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
duke@435 641 \
ysr@777 642 if (q < t && _first_dead > q && \
duke@435 643 !oop(q)->is_gc_marked()) { \
duke@435 644 /* we have a chunk of the space which hasn't moved and we've \
duke@435 645 * reinitialized the mark word during the previous pass, so we can't \
ysr@777 646 * use is_gc_marked for the traversal. */ \
duke@435 647 HeapWord* end = _first_dead; \
duke@435 648 \
ysr@777 649 while (q < end) { \
ysr@777 650 /* I originally tried to conjoin "block_start(q) == q" to the \
ysr@777 651 * assertion below, but that doesn't work, because you can't \
ysr@777 652 * accurately traverse previous objects to get to the current one \
coleenp@4037 653 * after their pointers have been \
ysr@777 654 * updated, until the actual compaction is done. dld, 4/00 */ \
ysr@777 655 assert(block_is_obj(q), \
ysr@777 656 "should be at block boundaries, and should be looking at objs"); \
duke@435 657 \
coleenp@548 658 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
duke@435 659 \
ysr@777 660 /* point all the oops to the new location */ \
ysr@777 661 size_t size = oop(q)->adjust_pointers(); \
ysr@777 662 size = adjust_obj_size(size); \
duke@435 663 \
coleenp@548 664 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
ysr@777 665 \
coleenp@548 666 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
ysr@777 667 \
coleenp@548 668 q += size; \
ysr@777 669 } \
duke@435 670 \
ysr@777 671 if (_first_dead == t) { \
ysr@777 672 q = t; \
ysr@777 673 } else { \
ysr@777 674 /* $$$ This is funky. Using this to read the previously written \
ysr@777 675 * LiveRange. See also use below. */ \
duke@435 676 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
ysr@777 677 } \
ysr@777 678 } \
duke@435 679 \
duke@435 680 const intx interval = PrefetchScanIntervalInBytes; \
duke@435 681 \
ysr@777 682 debug_only(HeapWord* prev_q = NULL); \
ysr@777 683 while (q < t) { \
ysr@777 684 /* prefetch beyond q */ \
duke@435 685 Prefetch::write(q, interval); \
ysr@777 686 if (oop(q)->is_gc_marked()) { \
ysr@777 687 /* q is alive */ \
coleenp@548 688 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
ysr@777 689 /* point all the oops to the new location */ \
ysr@777 690 size_t size = oop(q)->adjust_pointers(); \
ysr@777 691 size = adjust_obj_size(size); \
ysr@777 692 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
coleenp@548 693 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
ysr@777 694 debug_only(prev_q = q); \
duke@435 695 q += size; \
tonyp@791 696 } else { \
tonyp@791 697 /* q is not a live object, so its mark should point at the next \
tonyp@791 698 * live object */ \
tonyp@791 699 debug_only(prev_q = q); \
tonyp@791 700 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
tonyp@791 701 assert(q > prev_q, "we should be moving forward through memory"); \
tonyp@791 702 } \
tonyp@791 703 } \
duke@435 704 \
tonyp@791 705 assert(q == t, "just checking"); \
duke@435 706 }
duke@435 707
tonyp@791 708 #define SCAN_AND_COMPACT(obj_size) { \
duke@435 709 /* Copy all live objects to their new location \
tonyp@791 710 * Used by MarkSweep::mark_sweep_phase4() */ \
duke@435 711 \
tonyp@791 712 HeapWord* q = bottom(); \
tonyp@791 713 HeapWord* const t = _end_of_live; \
tonyp@791 714 debug_only(HeapWord* prev_q = NULL); \
duke@435 715 \
tonyp@791 716 if (q < t && _first_dead > q && \
duke@435 717 !oop(q)->is_gc_marked()) { \
tonyp@791 718 debug_only( \
coleenp@548 719 /* we have a chunk of the space which hasn't moved and we've reinitialized \
coleenp@548 720 * the mark word during the previous pass, so we can't use is_gc_marked for \
coleenp@548 721 * the traversal. */ \
tonyp@791 722 HeapWord* const end = _first_dead; \
tonyp@791 723 \
tonyp@791 724 while (q < end) { \
coleenp@548 725 size_t size = obj_size(q); \
coleenp@548 726 assert(!oop(q)->is_gc_marked(), \
coleenp@548 727 "should be unmarked (special dense prefix handling)"); \
tonyp@791 728 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \
tonyp@791 729 debug_only(prev_q = q); \
coleenp@548 730 q += size; \
tonyp@791 731 } \
tonyp@791 732 ) /* debug_only */ \
duke@435 733 \
tonyp@791 734 if (_first_dead == t) { \
tonyp@791 735 q = t; \
tonyp@791 736 } else { \
tonyp@791 737 /* $$$ Funky */ \
tonyp@791 738 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
tonyp@791 739 } \
tonyp@791 740 } \
tonyp@791 741 \
tonyp@791 742 const intx scan_interval = PrefetchScanIntervalInBytes; \
tonyp@791 743 const intx copy_interval = PrefetchCopyIntervalInBytes; \
tonyp@791 744 while (q < t) { \
tonyp@791 745 if (!oop(q)->is_gc_marked()) { \
tonyp@791 746 /* mark is pointer to next marked oop */ \
tonyp@791 747 debug_only(prev_q = q); \
tonyp@791 748 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
tonyp@791 749 assert(q > prev_q, "we should be moving forward through memory"); \
tonyp@791 750 } else { \
tonyp@791 751 /* prefetch beyond q */ \
duke@435 752 Prefetch::read(q, scan_interval); \
duke@435 753 \
duke@435 754 /* size and destination */ \
duke@435 755 size_t size = obj_size(q); \
duke@435 756 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
duke@435 757 \
tonyp@791 758 /* prefetch beyond compaction_top */ \
duke@435 759 Prefetch::write(compaction_top, copy_interval); \
duke@435 760 \
tonyp@791 761 /* copy object and reinit its mark */ \
coleenp@548 762 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \
coleenp@548 763 compaction_top)); \
tonyp@791 764 assert(q != compaction_top, "everything in this pass should be moving"); \
tonyp@791 765 Copy::aligned_conjoint_words(q, compaction_top, size); \
tonyp@791 766 oop(compaction_top)->init_mark(); \
tonyp@791 767 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
duke@435 768 \
tonyp@791 769 debug_only(prev_q = q); \
duke@435 770 q += size; \
tonyp@791 771 } \
tonyp@791 772 } \
duke@435 773 \
ysr@777 774 /* Let's remember if we were empty before we did the compaction. */ \
ysr@777 775 bool was_empty = used_region().is_empty(); \
duke@435 776 /* Reset space after compaction is complete */ \
tonyp@791 777 reset_after_compaction(); \
duke@435 778 /* We do this clear, below, since it has overloaded meanings for some */ \
duke@435 779 /* space subtypes. For example, OffsetTableContigSpace's that were */ \
duke@435 780 /* compacted into will have had their offset table thresholds updated */ \
duke@435 781 /* continuously, but those that weren't need to have their thresholds */ \
duke@435 782 /* re-initialized. Also mangles unused area for debugging. */ \
ysr@777 783 if (used_region().is_empty()) { \
tonyp@791 784 if (!was_empty) clear(SpaceDecorator::Mangle); \
duke@435 785 } else { \
duke@435 786 if (ZapUnusedHeapArea) mangle_unused_area(); \
duke@435 787 } \
duke@435 788 }
duke@435 789
jmasa@698 790 class GenSpaceMangler;
jmasa@698 791
duke@435 792 // A space in which the free area is contiguous. It therefore supports
duke@435 793 // faster allocation, and compaction.
duke@435 794 class ContiguousSpace: public CompactibleSpace {
duke@435 795 friend class OneContigSpaceCardGeneration;
duke@435 796 friend class VMStructs;
duke@435 797 protected:
duke@435 798 HeapWord* _top;
duke@435 799 HeapWord* _concurrent_iteration_safe_limit;
jmasa@698 800 // A helper for mangling the unused area of the space in debug builds.
jmasa@698 801 GenSpaceMangler* _mangler;
jmasa@698 802
jmasa@698 803 GenSpaceMangler* mangler() { return _mangler; }
duke@435 804
duke@435 805 // Allocation helpers (return NULL if full).
duke@435 806 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
duke@435 807 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
duke@435 808
duke@435 809 public:
jmasa@698 810 ContiguousSpace();
jmasa@698 811 ~ContiguousSpace();
jmasa@698 812
jmasa@698 813 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 814 virtual void clear(bool mangle_space);
duke@435 815
duke@435 816 // Accessors
duke@435 817 HeapWord* top() const { return _top; }
duke@435 818 void set_top(HeapWord* value) { _top = value; }
duke@435 819
ysr@777 820 virtual void set_saved_mark() { _saved_mark_word = top(); }
ysr@777 821 void reset_saved_mark() { _saved_mark_word = bottom(); }
duke@435 822
duke@435 823 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
duke@435 824 WaterMark top_mark() { return WaterMark(this, top()); }
duke@435 825 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
duke@435 826 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
duke@435 827
jmasa@698 828 // In debug mode mangle (write it with a particular bit
jmasa@698 829 // pattern) the unused part of a space.
jmasa@698 830
jmasa@698 831 // Used to save the an address in a space for later use during mangling.
jmasa@698 832 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
jmasa@698 833 // Used to save the space's current top for later use during mangling.
jmasa@698 834 void set_top_for_allocations() PRODUCT_RETURN;
jmasa@698 835
jmasa@698 836 // Mangle regions in the space from the current top up to the
jmasa@698 837 // previously mangled part of the space.
jmasa@698 838 void mangle_unused_area() PRODUCT_RETURN;
jmasa@698 839 // Mangle [top, end)
jmasa@698 840 void mangle_unused_area_complete() PRODUCT_RETURN;
jmasa@698 841 // Mangle the given MemRegion.
jmasa@698 842 void mangle_region(MemRegion mr) PRODUCT_RETURN;
jmasa@698 843
jmasa@698 844 // Do some sparse checking on the area that should have been mangled.
jmasa@698 845 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
jmasa@698 846 // Check the complete area that should have been mangled.
jmasa@698 847 // This code may be NULL depending on the macro DEBUG_MANGLING.
jmasa@698 848 void check_mangled_unused_area_complete() PRODUCT_RETURN;
duke@435 849
duke@435 850 // Size computations: sizes in bytes.
duke@435 851 size_t capacity() const { return byte_size(bottom(), end()); }
duke@435 852 size_t used() const { return byte_size(bottom(), top()); }
duke@435 853 size_t free() const { return byte_size(top(), end()); }
duke@435 854
duke@435 855 // Override from space.
duke@435 856 bool is_in(const void* p) const;
duke@435 857
duke@435 858 virtual bool is_free_block(const HeapWord* p) const;
duke@435 859
duke@435 860 // In a contiguous space we have a more obvious bound on what parts
duke@435 861 // contain objects.
duke@435 862 MemRegion used_region() const { return MemRegion(bottom(), top()); }
duke@435 863
duke@435 864 MemRegion used_region_at_save_marks() const {
duke@435 865 return MemRegion(bottom(), saved_mark_word());
duke@435 866 }
duke@435 867
duke@435 868 // Allocation (return NULL if full)
duke@435 869 virtual HeapWord* allocate(size_t word_size);
duke@435 870 virtual HeapWord* par_allocate(size_t word_size);
duke@435 871
duke@435 872 virtual bool obj_allocated_since_save_marks(const oop obj) const {
duke@435 873 return (HeapWord*)obj >= saved_mark_word();
duke@435 874 }
duke@435 875
duke@435 876 // Iteration
coleenp@4037 877 void oop_iterate(ExtendedOopClosure* cl);
coleenp@4037 878 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
duke@435 879 void object_iterate(ObjectClosure* blk);
jmasa@952 880 // For contiguous spaces this method will iterate safely over objects
jmasa@952 881 // in the space (i.e., between bottom and top) when at a safepoint.
jmasa@952 882 void safe_object_iterate(ObjectClosure* blk);
duke@435 883 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
duke@435 884 // iterates on objects up to the safe limit
duke@435 885 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
stefank@3751 886 HeapWord* concurrent_iteration_safe_limit() {
stefank@3751 887 assert(_concurrent_iteration_safe_limit <= top(),
stefank@3751 888 "_concurrent_iteration_safe_limit update missed");
stefank@3751 889 return _concurrent_iteration_safe_limit;
stefank@3751 890 }
duke@435 891 // changes the safe limit, all objects from bottom() to the new
duke@435 892 // limit should be properly initialized
stefank@3751 893 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
stefank@3751 894 assert(new_limit <= top(), "uninitialized objects in the safe range");
stefank@3751 895 _concurrent_iteration_safe_limit = new_limit;
stefank@3751 896 }
duke@435 897
coleenp@4037 898
duke@435 899 #ifndef SERIALGC
duke@435 900 // In support of parallel oop_iterate.
duke@435 901 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
duke@435 902 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
duke@435 903
duke@435 904 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
duke@435 905 #undef ContigSpace_PAR_OOP_ITERATE_DECL
duke@435 906 #endif // SERIALGC
duke@435 907
duke@435 908 // Compaction support
duke@435 909 virtual void reset_after_compaction() {
duke@435 910 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
duke@435 911 set_top(compaction_top());
duke@435 912 // set new iteration safe limit
duke@435 913 set_concurrent_iteration_safe_limit(compaction_top());
duke@435 914 }
duke@435 915 virtual size_t minimum_free_block_size() const { return 0; }
duke@435 916
duke@435 917 // Override.
coleenp@4037 918 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
duke@435 919 CardTableModRefBS::PrecisionStyle precision,
duke@435 920 HeapWord* boundary = NULL);
duke@435 921
duke@435 922 // Apply "blk->do_oop" to the addresses of all reference fields in objects
duke@435 923 // starting with the _saved_mark_word, which was noted during a generation's
duke@435 924 // save_marks and is required to denote the head of an object.
duke@435 925 // Fields in objects allocated by applications of the closure
duke@435 926 // *are* included in the iteration.
duke@435 927 // Updates _saved_mark_word to point to just after the last object
duke@435 928 // iterated over.
duke@435 929 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
duke@435 930 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
duke@435 931
duke@435 932 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
duke@435 933 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
duke@435 934
duke@435 935 // Same as object_iterate, but starting from "mark", which is required
duke@435 936 // to denote the start of an object. Objects allocated by
duke@435 937 // applications of the closure *are* included in the iteration.
duke@435 938 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
duke@435 939
duke@435 940 // Very inefficient implementation.
ysr@777 941 virtual HeapWord* block_start_const(const void* p) const;
duke@435 942 size_t block_size(const HeapWord* p) const;
duke@435 943 // If a block is in the allocated area, it is an object.
duke@435 944 bool block_is_obj(const HeapWord* p) const { return p < top(); }
duke@435 945
duke@435 946 // Addresses for inlined allocation
duke@435 947 HeapWord** top_addr() { return &_top; }
duke@435 948 HeapWord** end_addr() { return &_end; }
duke@435 949
duke@435 950 // Overrides for more efficient compaction support.
duke@435 951 void prepare_for_compaction(CompactPoint* cp);
duke@435 952
duke@435 953 // PrintHeapAtGC support.
duke@435 954 virtual void print_on(outputStream* st) const;
duke@435 955
duke@435 956 // Checked dynamic downcasts.
duke@435 957 virtual ContiguousSpace* toContiguousSpace() {
duke@435 958 return this;
duke@435 959 }
duke@435 960
duke@435 961 // Debugging
brutisso@3711 962 virtual void verify() const;
duke@435 963
duke@435 964 // Used to increase collection frequency. "factor" of 0 means entire
duke@435 965 // space.
duke@435 966 void allocate_temporary_filler(int factor);
duke@435 967
duke@435 968 };
duke@435 969
duke@435 970
duke@435 971 // A dirty card to oop closure that does filtering.
duke@435 972 // It knows how to filter out objects that are outside of the _boundary.
duke@435 973 class Filtering_DCTOC : public DirtyCardToOopClosure {
duke@435 974 protected:
duke@435 975 // Override.
duke@435 976 void walk_mem_region(MemRegion mr,
duke@435 977 HeapWord* bottom, HeapWord* top);
duke@435 978
duke@435 979 // Walk the given memory region, from bottom to top, applying
duke@435 980 // the given oop closure to (possibly) all objects found. The
duke@435 981 // given oop closure may or may not be the same as the oop
duke@435 982 // closure with which this closure was created, as it may
duke@435 983 // be a filtering closure which makes use of the _boundary.
duke@435 984 // We offer two signatures, so the FilteringClosure static type is
duke@435 985 // apparent.
duke@435 986 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 987 HeapWord* bottom, HeapWord* top,
coleenp@4037 988 ExtendedOopClosure* cl) = 0;
duke@435 989 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 990 HeapWord* bottom, HeapWord* top,
duke@435 991 FilteringClosure* cl) = 0;
duke@435 992
duke@435 993 public:
coleenp@4037 994 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
duke@435 995 CardTableModRefBS::PrecisionStyle precision,
duke@435 996 HeapWord* boundary) :
duke@435 997 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
duke@435 998 };
duke@435 999
duke@435 1000 // A dirty card to oop closure for contiguous spaces
duke@435 1001 // (ContiguousSpace and sub-classes).
duke@435 1002 // It is a FilteringClosure, as defined above, and it knows:
duke@435 1003 //
duke@435 1004 // 1. That the actual top of any area in a memory region
duke@435 1005 // contained by the space is bounded by the end of the contiguous
duke@435 1006 // region of the space.
duke@435 1007 // 2. That the space is really made up of objects and not just
duke@435 1008 // blocks.
duke@435 1009
duke@435 1010 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
duke@435 1011 protected:
duke@435 1012 // Overrides.
duke@435 1013 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
duke@435 1014
duke@435 1015 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 1016 HeapWord* bottom, HeapWord* top,
coleenp@4037 1017 ExtendedOopClosure* cl);
duke@435 1018 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 1019 HeapWord* bottom, HeapWord* top,
duke@435 1020 FilteringClosure* cl);
duke@435 1021
duke@435 1022 public:
coleenp@4037 1023 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
duke@435 1024 CardTableModRefBS::PrecisionStyle precision,
duke@435 1025 HeapWord* boundary) :
duke@435 1026 Filtering_DCTOC(sp, cl, precision, boundary)
duke@435 1027 {}
duke@435 1028 };
duke@435 1029
duke@435 1030
duke@435 1031 // Class EdenSpace describes eden-space in new generation.
duke@435 1032
duke@435 1033 class DefNewGeneration;
duke@435 1034
duke@435 1035 class EdenSpace : public ContiguousSpace {
duke@435 1036 friend class VMStructs;
duke@435 1037 private:
duke@435 1038 DefNewGeneration* _gen;
duke@435 1039
duke@435 1040 // _soft_end is used as a soft limit on allocation. As soft limits are
duke@435 1041 // reached, the slow-path allocation code can invoke other actions and then
duke@435 1042 // adjust _soft_end up to a new soft limit or to end().
duke@435 1043 HeapWord* _soft_end;
duke@435 1044
duke@435 1045 public:
ysr@782 1046 EdenSpace(DefNewGeneration* gen) :
ysr@782 1047 _gen(gen), _soft_end(NULL) {}
duke@435 1048
duke@435 1049 // Get/set just the 'soft' limit.
duke@435 1050 HeapWord* soft_end() { return _soft_end; }
duke@435 1051 HeapWord** soft_end_addr() { return &_soft_end; }
duke@435 1052 void set_soft_end(HeapWord* value) { _soft_end = value; }
duke@435 1053
duke@435 1054 // Override.
jmasa@698 1055 void clear(bool mangle_space);
duke@435 1056
duke@435 1057 // Set both the 'hard' and 'soft' limits (_end and _soft_end).
duke@435 1058 void set_end(HeapWord* value) {
duke@435 1059 set_soft_end(value);
duke@435 1060 ContiguousSpace::set_end(value);
duke@435 1061 }
duke@435 1062
duke@435 1063 // Allocation (return NULL if full)
duke@435 1064 HeapWord* allocate(size_t word_size);
duke@435 1065 HeapWord* par_allocate(size_t word_size);
duke@435 1066 };
duke@435 1067
duke@435 1068 // Class ConcEdenSpace extends EdenSpace for the sake of safe
duke@435 1069 // allocation while soft-end is being modified concurrently
duke@435 1070
duke@435 1071 class ConcEdenSpace : public EdenSpace {
duke@435 1072 public:
duke@435 1073 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
duke@435 1074
duke@435 1075 // Allocation (return NULL if full)
duke@435 1076 HeapWord* par_allocate(size_t word_size);
duke@435 1077 };
duke@435 1078
duke@435 1079
duke@435 1080 // A ContigSpace that Supports an efficient "block_start" operation via
duke@435 1081 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
duke@435 1082 // other spaces.) This is the abstract base class for old generation
coleenp@4037 1083 // (tenured) spaces.
duke@435 1084
duke@435 1085 class OffsetTableContigSpace: public ContiguousSpace {
duke@435 1086 friend class VMStructs;
duke@435 1087 protected:
duke@435 1088 BlockOffsetArrayContigSpace _offsets;
duke@435 1089 Mutex _par_alloc_lock;
duke@435 1090
duke@435 1091 public:
duke@435 1092 // Constructor
duke@435 1093 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
duke@435 1094 MemRegion mr);
duke@435 1095
duke@435 1096 void set_bottom(HeapWord* value);
duke@435 1097 void set_end(HeapWord* value);
duke@435 1098
jmasa@698 1099 void clear(bool mangle_space);
duke@435 1100
ysr@777 1101 inline HeapWord* block_start_const(const void* p) const;
duke@435 1102
duke@435 1103 // Add offset table update.
duke@435 1104 virtual inline HeapWord* allocate(size_t word_size);
duke@435 1105 inline HeapWord* par_allocate(size_t word_size);
duke@435 1106
duke@435 1107 // MarkSweep support phase3
duke@435 1108 virtual HeapWord* initialize_threshold();
duke@435 1109 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
duke@435 1110
duke@435 1111 virtual void print_on(outputStream* st) const;
duke@435 1112
duke@435 1113 // Debugging
brutisso@3711 1114 void verify() const;
duke@435 1115 };
duke@435 1116
duke@435 1117
duke@435 1118 // Class TenuredSpace is used by TenuredGeneration
duke@435 1119
duke@435 1120 class TenuredSpace: public OffsetTableContigSpace {
duke@435 1121 friend class VMStructs;
duke@435 1122 protected:
duke@435 1123 // Mark sweep support
jcoomes@873 1124 size_t allowed_dead_ratio() const;
duke@435 1125 public:
duke@435 1126 // Constructor
duke@435 1127 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
duke@435 1128 MemRegion mr) :
duke@435 1129 OffsetTableContigSpace(sharedOffsetArray, mr) {}
duke@435 1130 };
stefank@2314 1131 #endif // SHARE_VM_MEMORY_SPACE_HPP

mercurial