src/share/vm/memory/space.hpp

Thu, 06 Mar 2014 09:08:18 +0100

author
mgerdin
date
Thu, 06 Mar 2014 09:08:18 +0100
changeset 6978
30c99d8e0f02
parent 6912
c49dcaf78a65
child 6979
5255b195f828
permissions
-rw-r--r--

8038399: Remove dead oop_iterate MemRegion variants from SharedHeap, Generation and Space classes
Reviewed-by: tschatzl, stefank

duke@435 1 /*
mikael@6198 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_SPACE_HPP
stefank@2314 27
stefank@2314 28 #include "memory/allocation.hpp"
stefank@2314 29 #include "memory/blockOffsetTable.hpp"
stefank@2314 30 #include "memory/cardTableModRefBS.hpp"
stefank@2314 31 #include "memory/iterator.hpp"
stefank@2314 32 #include "memory/memRegion.hpp"
stefank@2314 33 #include "memory/watermark.hpp"
stefank@2314 34 #include "oops/markOop.hpp"
stefank@2314 35 #include "runtime/mutexLocker.hpp"
jprovino@4542 36 #include "utilities/macros.hpp"
stefank@2314 37 #include "utilities/workgroup.hpp"
stefank@2314 38
duke@435 39 // A space is an abstraction for the "storage units" backing
duke@435 40 // up the generation abstraction. It includes specific
duke@435 41 // implementations for keeping track of free and used space,
duke@435 42 // for iterating over objects and free blocks, etc.
duke@435 43
duke@435 44 // Here's the Space hierarchy:
duke@435 45 //
duke@435 46 // - Space -- an asbtract base class describing a heap area
duke@435 47 // - CompactibleSpace -- a space supporting compaction
duke@435 48 // - CompactibleFreeListSpace -- (used for CMS generation)
duke@435 49 // - ContiguousSpace -- a compactible space in which all free space
duke@435 50 // is contiguous
duke@435 51 // - EdenSpace -- contiguous space used as nursery
duke@435 52 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
duke@435 53 // - OffsetTableContigSpace -- contiguous space with a block offset array
duke@435 54 // that allows "fast" block_start calls
duke@435 55 // - TenuredSpace -- (used for TenuredGeneration)
duke@435 56
duke@435 57 // Forward decls.
duke@435 58 class Space;
duke@435 59 class BlockOffsetArray;
duke@435 60 class BlockOffsetArrayContigSpace;
duke@435 61 class Generation;
duke@435 62 class CompactibleSpace;
duke@435 63 class BlockOffsetTable;
duke@435 64 class GenRemSet;
duke@435 65 class CardTableRS;
duke@435 66 class DirtyCardToOopClosure;
duke@435 67
duke@435 68 // A Space describes a heap area. Class Space is an abstract
duke@435 69 // base class.
duke@435 70 //
duke@435 71 // Space supports allocation, size computation and GC support is provided.
duke@435 72 //
duke@435 73 // Invariant: bottom() and end() are on page_size boundaries and
duke@435 74 // bottom() <= top() <= end()
duke@435 75 // top() is inclusive and end() is exclusive.
duke@435 76
zgu@3900 77 class Space: public CHeapObj<mtGC> {
duke@435 78 friend class VMStructs;
duke@435 79 protected:
duke@435 80 HeapWord* _bottom;
duke@435 81 HeapWord* _end;
duke@435 82
duke@435 83 // Used in support of save_marks()
duke@435 84 HeapWord* _saved_mark_word;
duke@435 85
duke@435 86 MemRegionClosure* _preconsumptionDirtyCardClosure;
duke@435 87
duke@435 88 // A sequential tasks done structure. This supports
duke@435 89 // parallel GC, where we have threads dynamically
duke@435 90 // claiming sub-tasks from a larger parallel task.
duke@435 91 SequentialSubTasksDone _par_seq_tasks;
duke@435 92
duke@435 93 Space():
duke@435 94 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
duke@435 95
duke@435 96 public:
duke@435 97 // Accessors
duke@435 98 HeapWord* bottom() const { return _bottom; }
duke@435 99 HeapWord* end() const { return _end; }
duke@435 100 virtual void set_bottom(HeapWord* value) { _bottom = value; }
duke@435 101 virtual void set_end(HeapWord* value) { _end = value; }
duke@435 102
ysr@777 103 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
ysr@1280 104
duke@435 105 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
duke@435 106
duke@435 107 MemRegionClosure* preconsumptionDirtyCardClosure() const {
duke@435 108 return _preconsumptionDirtyCardClosure;
duke@435 109 }
duke@435 110 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
duke@435 111 _preconsumptionDirtyCardClosure = cl;
duke@435 112 }
duke@435 113
duke@435 114 // Returns a subregion of the space containing all the objects in
duke@435 115 // the space.
duke@435 116 virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
duke@435 117
duke@435 118 // Returns a region that is guaranteed to contain (at least) all objects
duke@435 119 // allocated at the time of the last call to "save_marks". If the space
duke@435 120 // initializes its DirtyCardToOopClosure's specifying the "contig" option
duke@435 121 // (that is, if the space is contiguous), then this region must contain only
duke@435 122 // such objects: the memregion will be from the bottom of the region to the
duke@435 123 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
duke@435 124 // the space must distiguish between objects in the region allocated before
duke@435 125 // and after the call to save marks.
duke@435 126 virtual MemRegion used_region_at_save_marks() const {
duke@435 127 return MemRegion(bottom(), saved_mark_word());
duke@435 128 }
duke@435 129
ysr@777 130 // Initialization.
ysr@777 131 // "initialize" should be called once on a space, before it is used for
ysr@777 132 // any purpose. The "mr" arguments gives the bounds of the space, and
ysr@777 133 // the "clear_space" argument should be true unless the memory in "mr" is
ysr@777 134 // known to be zeroed.
jmasa@698 135 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
ysr@777 136
ysr@777 137 // The "clear" method must be called on a region that may have
ysr@777 138 // had allocation performed in it, but is now to be considered empty.
jmasa@698 139 virtual void clear(bool mangle_space);
duke@435 140
duke@435 141 // For detecting GC bugs. Should only be called at GC boundaries, since
duke@435 142 // some unused space may be used as scratch space during GC's.
duke@435 143 // Default implementation does nothing. We also call this when expanding
duke@435 144 // a space to satisfy an allocation request. See bug #4668531
duke@435 145 virtual void mangle_unused_area() {}
jmasa@698 146 virtual void mangle_unused_area_complete() {}
duke@435 147 virtual void mangle_region(MemRegion mr) {}
duke@435 148
duke@435 149 // Testers
duke@435 150 bool is_empty() const { return used() == 0; }
duke@435 151 bool not_empty() const { return used() > 0; }
duke@435 152
duke@435 153 // Returns true iff the given the space contains the
duke@435 154 // given address as part of an allocated object. For
duke@435 155 // ceratin kinds of spaces, this might be a potentially
duke@435 156 // expensive operation. To prevent performance problems
duke@435 157 // on account of its inadvertent use in product jvm's,
duke@435 158 // we restrict its use to assertion checks only.
stefank@3335 159 virtual bool is_in(const void* p) const = 0;
duke@435 160
duke@435 161 // Returns true iff the given reserved memory of the space contains the
duke@435 162 // given address.
duke@435 163 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
duke@435 164
duke@435 165 // Returns true iff the given block is not allocated.
duke@435 166 virtual bool is_free_block(const HeapWord* p) const = 0;
duke@435 167
duke@435 168 // Test whether p is double-aligned
duke@435 169 static bool is_aligned(void* p) {
duke@435 170 return ((intptr_t)p & (sizeof(double)-1)) == 0;
duke@435 171 }
duke@435 172
duke@435 173 // Size computations. Sizes are in bytes.
duke@435 174 size_t capacity() const { return byte_size(bottom(), end()); }
duke@435 175 virtual size_t used() const = 0;
duke@435 176 virtual size_t free() const = 0;
duke@435 177
duke@435 178 // Iterate over all the ref-containing fields of all objects in the
duke@435 179 // space, calling "cl.do_oop" on each. Fields in objects allocated by
duke@435 180 // applications of the closure are not included in the iteration.
coleenp@4037 181 virtual void oop_iterate(ExtendedOopClosure* cl);
duke@435 182
duke@435 183 // Iterate over all objects in the space, calling "cl.do_object" on
duke@435 184 // each. Objects allocated by applications of the closure are not
duke@435 185 // included in the iteration.
duke@435 186 virtual void object_iterate(ObjectClosure* blk) = 0;
jmasa@952 187 // Similar to object_iterate() except only iterates over
jmasa@952 188 // objects whose internal references point to objects in the space.
jmasa@952 189 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
duke@435 190
duke@435 191 // Iterate over all objects that intersect with mr, calling "cl->do_object"
duke@435 192 // on each. There is an exception to this: if this closure has already
duke@435 193 // been invoked on an object, it may skip such objects in some cases. This is
duke@435 194 // Most likely to happen in an "upwards" (ascending address) iteration of
duke@435 195 // MemRegions.
duke@435 196 virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
duke@435 197
duke@435 198 // Iterate over as many initialized objects in the space as possible,
duke@435 199 // calling "cl.do_object_careful" on each. Return NULL if all objects
duke@435 200 // in the space (at the start of the iteration) were iterated over.
duke@435 201 // Return an address indicating the extent of the iteration in the
duke@435 202 // event that the iteration had to return because of finding an
duke@435 203 // uninitialized object in the space, or if the closure "cl"
duke@435 204 // signalled early termination.
duke@435 205 virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
duke@435 206 virtual HeapWord* object_iterate_careful_m(MemRegion mr,
duke@435 207 ObjectClosureCareful* cl);
duke@435 208
duke@435 209 // Create and return a new dirty card to oop closure. Can be
duke@435 210 // overriden to return the appropriate type of closure
duke@435 211 // depending on the type of space in which the closure will
duke@435 212 // operate. ResourceArea allocated.
coleenp@4037 213 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
duke@435 214 CardTableModRefBS::PrecisionStyle precision,
duke@435 215 HeapWord* boundary = NULL);
duke@435 216
duke@435 217 // If "p" is in the space, returns the address of the start of the
duke@435 218 // "block" that contains "p". We say "block" instead of "object" since
duke@435 219 // some heaps may not pack objects densely; a chunk may either be an
duke@435 220 // object or a non-object. If "p" is not in the space, return NULL.
ysr@777 221 virtual HeapWord* block_start_const(const void* p) const = 0;
ysr@777 222
ysr@777 223 // The non-const version may have benevolent side effects on the data
ysr@777 224 // structure supporting these calls, possibly speeding up future calls.
ysr@777 225 // The default implementation, however, is simply to call the const
ysr@777 226 // version.
ysr@777 227 inline virtual HeapWord* block_start(const void* p);
duke@435 228
duke@435 229 // Requires "addr" to be the start of a chunk, and returns its size.
duke@435 230 // "addr + size" is required to be the start of a new chunk, or the end
duke@435 231 // of the active area of the heap.
duke@435 232 virtual size_t block_size(const HeapWord* addr) const = 0;
duke@435 233
duke@435 234 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@435 235 // the block is an object.
duke@435 236 virtual bool block_is_obj(const HeapWord* addr) const = 0;
duke@435 237
duke@435 238 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@435 239 // the block is an object and the object is alive.
duke@435 240 virtual bool obj_is_alive(const HeapWord* addr) const;
duke@435 241
duke@435 242 // Allocation (return NULL if full). Assumes the caller has established
duke@435 243 // mutually exclusive access to the space.
duke@435 244 virtual HeapWord* allocate(size_t word_size) = 0;
duke@435 245
duke@435 246 // Allocation (return NULL if full). Enforces mutual exclusion internally.
duke@435 247 virtual HeapWord* par_allocate(size_t word_size) = 0;
duke@435 248
duke@435 249 // Returns true if this object has been allocated since a
duke@435 250 // generation's "save_marks" call.
duke@435 251 virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
duke@435 252
duke@435 253 // Mark-sweep-compact support: all spaces can update pointers to objects
duke@435 254 // moving as a part of compaction.
duke@435 255 virtual void adjust_pointers();
duke@435 256
duke@435 257 // PrintHeapAtGC support
duke@435 258 virtual void print() const;
duke@435 259 virtual void print_on(outputStream* st) const;
duke@435 260 virtual void print_short() const;
duke@435 261 virtual void print_short_on(outputStream* st) const;
duke@435 262
duke@435 263
duke@435 264 // Accessor for parallel sequential tasks.
duke@435 265 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
duke@435 266
duke@435 267 // IF "this" is a ContiguousSpace, return it, else return NULL.
duke@435 268 virtual ContiguousSpace* toContiguousSpace() {
duke@435 269 return NULL;
duke@435 270 }
duke@435 271
duke@435 272 // Debugging
brutisso@3711 273 virtual void verify() const = 0;
duke@435 274 };
duke@435 275
duke@435 276 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
duke@435 277 // OopClosure to (the addresses of) all the ref-containing fields that could
duke@435 278 // be modified by virtue of the given MemRegion being dirty. (Note that
duke@435 279 // because of the imprecise nature of the write barrier, this may iterate
duke@435 280 // over oops beyond the region.)
duke@435 281 // This base type for dirty card to oop closures handles memory regions
duke@435 282 // in non-contiguous spaces with no boundaries, and should be sub-classed
duke@435 283 // to support other space types. See ContiguousDCTOC for a sub-class
duke@435 284 // that works with ContiguousSpaces.
duke@435 285
duke@435 286 class DirtyCardToOopClosure: public MemRegionClosureRO {
duke@435 287 protected:
coleenp@4037 288 ExtendedOopClosure* _cl;
duke@435 289 Space* _sp;
duke@435 290 CardTableModRefBS::PrecisionStyle _precision;
duke@435 291 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
duke@435 292 // pointing below boundary.
ysr@777 293 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
duke@435 294 // a downwards traversal; this is the
duke@435 295 // lowest location already done (or,
duke@435 296 // alternatively, the lowest address that
duke@435 297 // shouldn't be done again. NULL means infinity.)
duke@435 298 NOT_PRODUCT(HeapWord* _last_bottom;)
ysr@777 299 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
duke@435 300
duke@435 301 // Get the actual top of the area on which the closure will
duke@435 302 // operate, given where the top is assumed to be (the end of the
duke@435 303 // memory region passed to do_MemRegion) and where the object
duke@435 304 // at the top is assumed to start. For example, an object may
duke@435 305 // start at the top but actually extend past the assumed top,
duke@435 306 // in which case the top becomes the end of the object.
duke@435 307 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
duke@435 308
duke@435 309 // Walk the given memory region from bottom to (actual) top
duke@435 310 // looking for objects and applying the oop closure (_cl) to
duke@435 311 // them. The base implementation of this treats the area as
duke@435 312 // blocks, where a block may or may not be an object. Sub-
duke@435 313 // classes should override this to provide more accurate
duke@435 314 // or possibly more efficient walking.
duke@435 315 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
duke@435 316
duke@435 317 public:
coleenp@4037 318 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
duke@435 319 CardTableModRefBS::PrecisionStyle precision,
duke@435 320 HeapWord* boundary) :
duke@435 321 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
duke@435 322 _min_done(NULL) {
ysr@777 323 NOT_PRODUCT(_last_bottom = NULL);
ysr@777 324 NOT_PRODUCT(_last_explicit_min_done = NULL);
duke@435 325 }
duke@435 326
duke@435 327 void do_MemRegion(MemRegion mr);
duke@435 328
duke@435 329 void set_min_done(HeapWord* min_done) {
duke@435 330 _min_done = min_done;
ysr@777 331 NOT_PRODUCT(_last_explicit_min_done = _min_done);
duke@435 332 }
duke@435 333 #ifndef PRODUCT
duke@435 334 void set_last_bottom(HeapWord* last_bottom) {
duke@435 335 _last_bottom = last_bottom;
duke@435 336 }
duke@435 337 #endif
duke@435 338 };
duke@435 339
duke@435 340 // A structure to represent a point at which objects are being copied
duke@435 341 // during compaction.
duke@435 342 class CompactPoint : public StackObj {
duke@435 343 public:
duke@435 344 Generation* gen;
duke@435 345 CompactibleSpace* space;
duke@435 346 HeapWord* threshold;
duke@435 347 CompactPoint(Generation* _gen, CompactibleSpace* _space,
duke@435 348 HeapWord* _threshold) :
duke@435 349 gen(_gen), space(_space), threshold(_threshold) {}
duke@435 350 };
duke@435 351
duke@435 352
duke@435 353 // A space that supports compaction operations. This is usually, but not
duke@435 354 // necessarily, a space that is normally contiguous. But, for example, a
duke@435 355 // free-list-based space whose normal collection is a mark-sweep without
duke@435 356 // compaction could still support compaction in full GC's.
duke@435 357
duke@435 358 class CompactibleSpace: public Space {
duke@435 359 friend class VMStructs;
duke@435 360 friend class CompactibleFreeListSpace;
duke@435 361 private:
duke@435 362 HeapWord* _compaction_top;
duke@435 363 CompactibleSpace* _next_compaction_space;
duke@435 364
duke@435 365 public:
ysr@782 366 CompactibleSpace() :
ysr@782 367 _compaction_top(NULL), _next_compaction_space(NULL) {}
ysr@782 368
jmasa@698 369 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 370 virtual void clear(bool mangle_space);
duke@435 371
duke@435 372 // Used temporarily during a compaction phase to hold the value
duke@435 373 // top should have when compaction is complete.
duke@435 374 HeapWord* compaction_top() const { return _compaction_top; }
duke@435 375
duke@435 376 void set_compaction_top(HeapWord* value) {
duke@435 377 assert(value == NULL || (value >= bottom() && value <= end()),
duke@435 378 "should point inside space");
duke@435 379 _compaction_top = value;
duke@435 380 }
duke@435 381
duke@435 382 // Perform operations on the space needed after a compaction
duke@435 383 // has been performed.
duke@435 384 virtual void reset_after_compaction() {}
duke@435 385
duke@435 386 // Returns the next space (in the current generation) to be compacted in
duke@435 387 // the global compaction order. Also is used to select the next
duke@435 388 // space into which to compact.
duke@435 389
duke@435 390 virtual CompactibleSpace* next_compaction_space() const {
duke@435 391 return _next_compaction_space;
duke@435 392 }
duke@435 393
duke@435 394 void set_next_compaction_space(CompactibleSpace* csp) {
duke@435 395 _next_compaction_space = csp;
duke@435 396 }
duke@435 397
duke@435 398 // MarkSweep support phase2
duke@435 399
duke@435 400 // Start the process of compaction of the current space: compute
duke@435 401 // post-compaction addresses, and insert forwarding pointers. The fields
duke@435 402 // "cp->gen" and "cp->compaction_space" are the generation and space into
duke@435 403 // which we are currently compacting. This call updates "cp" as necessary,
duke@435 404 // and leaves the "compaction_top" of the final value of
duke@435 405 // "cp->compaction_space" up-to-date. Offset tables may be updated in
duke@435 406 // this phase as if the final copy had occurred; if so, "cp->threshold"
duke@435 407 // indicates when the next such action should be taken.
duke@435 408 virtual void prepare_for_compaction(CompactPoint* cp);
duke@435 409 // MarkSweep support phase3
duke@435 410 virtual void adjust_pointers();
duke@435 411 // MarkSweep support phase4
duke@435 412 virtual void compact();
duke@435 413
duke@435 414 // The maximum percentage of objects that can be dead in the compacted
duke@435 415 // live part of a compacted space ("deadwood" support.)
jcoomes@873 416 virtual size_t allowed_dead_ratio() const { return 0; };
duke@435 417
duke@435 418 // Some contiguous spaces may maintain some data structures that should
duke@435 419 // be updated whenever an allocation crosses a boundary. This function
duke@435 420 // returns the first such boundary.
duke@435 421 // (The default implementation returns the end of the space, so the
duke@435 422 // boundary is never crossed.)
duke@435 423 virtual HeapWord* initialize_threshold() { return end(); }
duke@435 424
duke@435 425 // "q" is an object of the given "size" that should be forwarded;
duke@435 426 // "cp" names the generation ("gen") and containing "this" (which must
duke@435 427 // also equal "cp->space"). "compact_top" is where in "this" the
duke@435 428 // next object should be forwarded to. If there is room in "this" for
duke@435 429 // the object, insert an appropriate forwarding pointer in "q".
duke@435 430 // If not, go to the next compaction space (there must
duke@435 431 // be one, since compaction must succeed -- we go to the first space of
duke@435 432 // the previous generation if necessary, updating "cp"), reset compact_top
duke@435 433 // and then forward. In either case, returns the new value of "compact_top".
duke@435 434 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
duke@435 435 // function of the then-current compaction space, and updates "cp->threshold
duke@435 436 // accordingly".
duke@435 437 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
duke@435 438 HeapWord* compact_top);
duke@435 439
duke@435 440 // Return a size with adjusments as required of the space.
duke@435 441 virtual size_t adjust_object_size_v(size_t size) const { return size; }
duke@435 442
duke@435 443 protected:
duke@435 444 // Used during compaction.
duke@435 445 HeapWord* _first_dead;
duke@435 446 HeapWord* _end_of_live;
duke@435 447
duke@435 448 // Minimum size of a free block.
duke@435 449 virtual size_t minimum_free_block_size() const = 0;
duke@435 450
duke@435 451 // This the function is invoked when an allocation of an object covering
duke@435 452 // "start" to "end occurs crosses the threshold; returns the next
duke@435 453 // threshold. (The default implementation does nothing.)
duke@435 454 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 455 return end();
duke@435 456 }
duke@435 457
duke@435 458 // Requires "allowed_deadspace_words > 0", that "q" is the start of a
duke@435 459 // free block of the given "word_len", and that "q", were it an object,
duke@435 460 // would not move if forwared. If the size allows, fill the free
duke@435 461 // block with an object, to prevent excessive compaction. Returns "true"
duke@435 462 // iff the free region was made deadspace, and modifies
duke@435 463 // "allowed_deadspace_words" to reflect the number of available deadspace
duke@435 464 // words remaining after this operation.
duke@435 465 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
duke@435 466 size_t word_len);
duke@435 467 };
duke@435 468
jmasa@698 469 class GenSpaceMangler;
jmasa@698 470
duke@435 471 // A space in which the free area is contiguous. It therefore supports
duke@435 472 // faster allocation, and compaction.
duke@435 473 class ContiguousSpace: public CompactibleSpace {
duke@435 474 friend class OneContigSpaceCardGeneration;
duke@435 475 friend class VMStructs;
duke@435 476 protected:
duke@435 477 HeapWord* _top;
duke@435 478 HeapWord* _concurrent_iteration_safe_limit;
jmasa@698 479 // A helper for mangling the unused area of the space in debug builds.
jmasa@698 480 GenSpaceMangler* _mangler;
jmasa@698 481
jmasa@698 482 GenSpaceMangler* mangler() { return _mangler; }
duke@435 483
duke@435 484 // Allocation helpers (return NULL if full).
duke@435 485 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
duke@435 486 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
duke@435 487
duke@435 488 public:
jmasa@698 489 ContiguousSpace();
jmasa@698 490 ~ContiguousSpace();
jmasa@698 491
jmasa@698 492 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 493 virtual void clear(bool mangle_space);
duke@435 494
duke@435 495 // Accessors
duke@435 496 HeapWord* top() const { return _top; }
duke@435 497 void set_top(HeapWord* value) { _top = value; }
duke@435 498
ysr@777 499 virtual void set_saved_mark() { _saved_mark_word = top(); }
ysr@777 500 void reset_saved_mark() { _saved_mark_word = bottom(); }
duke@435 501
duke@435 502 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
duke@435 503 WaterMark top_mark() { return WaterMark(this, top()); }
duke@435 504 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
duke@435 505 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
duke@435 506
jmasa@698 507 // In debug mode mangle (write it with a particular bit
jmasa@698 508 // pattern) the unused part of a space.
jmasa@698 509
jmasa@698 510 // Used to save the an address in a space for later use during mangling.
jmasa@698 511 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
jmasa@698 512 // Used to save the space's current top for later use during mangling.
jmasa@698 513 void set_top_for_allocations() PRODUCT_RETURN;
jmasa@698 514
jmasa@698 515 // Mangle regions in the space from the current top up to the
jmasa@698 516 // previously mangled part of the space.
jmasa@698 517 void mangle_unused_area() PRODUCT_RETURN;
jmasa@698 518 // Mangle [top, end)
jmasa@698 519 void mangle_unused_area_complete() PRODUCT_RETURN;
jmasa@698 520 // Mangle the given MemRegion.
jmasa@698 521 void mangle_region(MemRegion mr) PRODUCT_RETURN;
jmasa@698 522
jmasa@698 523 // Do some sparse checking on the area that should have been mangled.
jmasa@698 524 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
jmasa@698 525 // Check the complete area that should have been mangled.
jmasa@698 526 // This code may be NULL depending on the macro DEBUG_MANGLING.
jmasa@698 527 void check_mangled_unused_area_complete() PRODUCT_RETURN;
duke@435 528
duke@435 529 // Size computations: sizes in bytes.
duke@435 530 size_t capacity() const { return byte_size(bottom(), end()); }
duke@435 531 size_t used() const { return byte_size(bottom(), top()); }
duke@435 532 size_t free() const { return byte_size(top(), end()); }
duke@435 533
duke@435 534 // Override from space.
duke@435 535 bool is_in(const void* p) const;
duke@435 536
duke@435 537 virtual bool is_free_block(const HeapWord* p) const;
duke@435 538
duke@435 539 // In a contiguous space we have a more obvious bound on what parts
duke@435 540 // contain objects.
duke@435 541 MemRegion used_region() const { return MemRegion(bottom(), top()); }
duke@435 542
duke@435 543 MemRegion used_region_at_save_marks() const {
duke@435 544 return MemRegion(bottom(), saved_mark_word());
duke@435 545 }
duke@435 546
duke@435 547 // Allocation (return NULL if full)
duke@435 548 virtual HeapWord* allocate(size_t word_size);
duke@435 549 virtual HeapWord* par_allocate(size_t word_size);
duke@435 550
duke@435 551 virtual bool obj_allocated_since_save_marks(const oop obj) const {
duke@435 552 return (HeapWord*)obj >= saved_mark_word();
duke@435 553 }
duke@435 554
duke@435 555 // Iteration
coleenp@4037 556 void oop_iterate(ExtendedOopClosure* cl);
duke@435 557 void object_iterate(ObjectClosure* blk);
jmasa@952 558 // For contiguous spaces this method will iterate safely over objects
jmasa@952 559 // in the space (i.e., between bottom and top) when at a safepoint.
jmasa@952 560 void safe_object_iterate(ObjectClosure* blk);
duke@435 561 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
duke@435 562 // iterates on objects up to the safe limit
duke@435 563 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
stefank@3751 564 HeapWord* concurrent_iteration_safe_limit() {
stefank@3751 565 assert(_concurrent_iteration_safe_limit <= top(),
stefank@3751 566 "_concurrent_iteration_safe_limit update missed");
stefank@3751 567 return _concurrent_iteration_safe_limit;
stefank@3751 568 }
duke@435 569 // changes the safe limit, all objects from bottom() to the new
duke@435 570 // limit should be properly initialized
stefank@3751 571 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
stefank@3751 572 assert(new_limit <= top(), "uninitialized objects in the safe range");
stefank@3751 573 _concurrent_iteration_safe_limit = new_limit;
stefank@3751 574 }
duke@435 575
coleenp@4037 576
jprovino@4542 577 #if INCLUDE_ALL_GCS
duke@435 578 // In support of parallel oop_iterate.
duke@435 579 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
duke@435 580 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
duke@435 581
duke@435 582 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
duke@435 583 #undef ContigSpace_PAR_OOP_ITERATE_DECL
jprovino@4542 584 #endif // INCLUDE_ALL_GCS
duke@435 585
duke@435 586 // Compaction support
duke@435 587 virtual void reset_after_compaction() {
duke@435 588 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
duke@435 589 set_top(compaction_top());
duke@435 590 // set new iteration safe limit
duke@435 591 set_concurrent_iteration_safe_limit(compaction_top());
duke@435 592 }
duke@435 593 virtual size_t minimum_free_block_size() const { return 0; }
duke@435 594
duke@435 595 // Override.
coleenp@4037 596 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
duke@435 597 CardTableModRefBS::PrecisionStyle precision,
duke@435 598 HeapWord* boundary = NULL);
duke@435 599
duke@435 600 // Apply "blk->do_oop" to the addresses of all reference fields in objects
duke@435 601 // starting with the _saved_mark_word, which was noted during a generation's
duke@435 602 // save_marks and is required to denote the head of an object.
duke@435 603 // Fields in objects allocated by applications of the closure
duke@435 604 // *are* included in the iteration.
duke@435 605 // Updates _saved_mark_word to point to just after the last object
duke@435 606 // iterated over.
duke@435 607 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
duke@435 608 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
duke@435 609
duke@435 610 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
duke@435 611 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
duke@435 612
duke@435 613 // Same as object_iterate, but starting from "mark", which is required
duke@435 614 // to denote the start of an object. Objects allocated by
duke@435 615 // applications of the closure *are* included in the iteration.
duke@435 616 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
duke@435 617
duke@435 618 // Very inefficient implementation.
ysr@777 619 virtual HeapWord* block_start_const(const void* p) const;
duke@435 620 size_t block_size(const HeapWord* p) const;
duke@435 621 // If a block is in the allocated area, it is an object.
duke@435 622 bool block_is_obj(const HeapWord* p) const { return p < top(); }
duke@435 623
duke@435 624 // Addresses for inlined allocation
duke@435 625 HeapWord** top_addr() { return &_top; }
duke@435 626 HeapWord** end_addr() { return &_end; }
duke@435 627
duke@435 628 // Overrides for more efficient compaction support.
duke@435 629 void prepare_for_compaction(CompactPoint* cp);
duke@435 630
duke@435 631 // PrintHeapAtGC support.
duke@435 632 virtual void print_on(outputStream* st) const;
duke@435 633
duke@435 634 // Checked dynamic downcasts.
duke@435 635 virtual ContiguousSpace* toContiguousSpace() {
duke@435 636 return this;
duke@435 637 }
duke@435 638
duke@435 639 // Debugging
brutisso@3711 640 virtual void verify() const;
duke@435 641
duke@435 642 // Used to increase collection frequency. "factor" of 0 means entire
duke@435 643 // space.
duke@435 644 void allocate_temporary_filler(int factor);
duke@435 645
duke@435 646 };
duke@435 647
duke@435 648
duke@435 649 // A dirty card to oop closure that does filtering.
duke@435 650 // It knows how to filter out objects that are outside of the _boundary.
duke@435 651 class Filtering_DCTOC : public DirtyCardToOopClosure {
duke@435 652 protected:
duke@435 653 // Override.
duke@435 654 void walk_mem_region(MemRegion mr,
duke@435 655 HeapWord* bottom, HeapWord* top);
duke@435 656
duke@435 657 // Walk the given memory region, from bottom to top, applying
duke@435 658 // the given oop closure to (possibly) all objects found. The
duke@435 659 // given oop closure may or may not be the same as the oop
duke@435 660 // closure with which this closure was created, as it may
duke@435 661 // be a filtering closure which makes use of the _boundary.
duke@435 662 // We offer two signatures, so the FilteringClosure static type is
duke@435 663 // apparent.
duke@435 664 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 665 HeapWord* bottom, HeapWord* top,
coleenp@4037 666 ExtendedOopClosure* cl) = 0;
duke@435 667 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 668 HeapWord* bottom, HeapWord* top,
duke@435 669 FilteringClosure* cl) = 0;
duke@435 670
duke@435 671 public:
coleenp@4037 672 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
duke@435 673 CardTableModRefBS::PrecisionStyle precision,
duke@435 674 HeapWord* boundary) :
duke@435 675 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
duke@435 676 };
duke@435 677
duke@435 678 // A dirty card to oop closure for contiguous spaces
duke@435 679 // (ContiguousSpace and sub-classes).
duke@435 680 // It is a FilteringClosure, as defined above, and it knows:
duke@435 681 //
duke@435 682 // 1. That the actual top of any area in a memory region
duke@435 683 // contained by the space is bounded by the end of the contiguous
duke@435 684 // region of the space.
duke@435 685 // 2. That the space is really made up of objects and not just
duke@435 686 // blocks.
duke@435 687
duke@435 688 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
duke@435 689 protected:
duke@435 690 // Overrides.
duke@435 691 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
duke@435 692
duke@435 693 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 694 HeapWord* bottom, HeapWord* top,
coleenp@4037 695 ExtendedOopClosure* cl);
duke@435 696 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 697 HeapWord* bottom, HeapWord* top,
duke@435 698 FilteringClosure* cl);
duke@435 699
duke@435 700 public:
coleenp@4037 701 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
duke@435 702 CardTableModRefBS::PrecisionStyle precision,
duke@435 703 HeapWord* boundary) :
duke@435 704 Filtering_DCTOC(sp, cl, precision, boundary)
duke@435 705 {}
duke@435 706 };
duke@435 707
duke@435 708
duke@435 709 // Class EdenSpace describes eden-space in new generation.
duke@435 710
duke@435 711 class DefNewGeneration;
duke@435 712
duke@435 713 class EdenSpace : public ContiguousSpace {
duke@435 714 friend class VMStructs;
duke@435 715 private:
duke@435 716 DefNewGeneration* _gen;
duke@435 717
duke@435 718 // _soft_end is used as a soft limit on allocation. As soft limits are
duke@435 719 // reached, the slow-path allocation code can invoke other actions and then
duke@435 720 // adjust _soft_end up to a new soft limit or to end().
duke@435 721 HeapWord* _soft_end;
duke@435 722
duke@435 723 public:
ysr@782 724 EdenSpace(DefNewGeneration* gen) :
ysr@782 725 _gen(gen), _soft_end(NULL) {}
duke@435 726
duke@435 727 // Get/set just the 'soft' limit.
duke@435 728 HeapWord* soft_end() { return _soft_end; }
duke@435 729 HeapWord** soft_end_addr() { return &_soft_end; }
duke@435 730 void set_soft_end(HeapWord* value) { _soft_end = value; }
duke@435 731
duke@435 732 // Override.
jmasa@698 733 void clear(bool mangle_space);
duke@435 734
duke@435 735 // Set both the 'hard' and 'soft' limits (_end and _soft_end).
duke@435 736 void set_end(HeapWord* value) {
duke@435 737 set_soft_end(value);
duke@435 738 ContiguousSpace::set_end(value);
duke@435 739 }
duke@435 740
duke@435 741 // Allocation (return NULL if full)
duke@435 742 HeapWord* allocate(size_t word_size);
duke@435 743 HeapWord* par_allocate(size_t word_size);
duke@435 744 };
duke@435 745
duke@435 746 // Class ConcEdenSpace extends EdenSpace for the sake of safe
duke@435 747 // allocation while soft-end is being modified concurrently
duke@435 748
duke@435 749 class ConcEdenSpace : public EdenSpace {
duke@435 750 public:
duke@435 751 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
duke@435 752
duke@435 753 // Allocation (return NULL if full)
duke@435 754 HeapWord* par_allocate(size_t word_size);
duke@435 755 };
duke@435 756
duke@435 757
duke@435 758 // A ContigSpace that Supports an efficient "block_start" operation via
duke@435 759 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
duke@435 760 // other spaces.) This is the abstract base class for old generation
coleenp@4037 761 // (tenured) spaces.
duke@435 762
duke@435 763 class OffsetTableContigSpace: public ContiguousSpace {
duke@435 764 friend class VMStructs;
duke@435 765 protected:
duke@435 766 BlockOffsetArrayContigSpace _offsets;
duke@435 767 Mutex _par_alloc_lock;
duke@435 768
duke@435 769 public:
duke@435 770 // Constructor
duke@435 771 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
duke@435 772 MemRegion mr);
duke@435 773
duke@435 774 void set_bottom(HeapWord* value);
duke@435 775 void set_end(HeapWord* value);
duke@435 776
jmasa@698 777 void clear(bool mangle_space);
duke@435 778
ysr@777 779 inline HeapWord* block_start_const(const void* p) const;
duke@435 780
duke@435 781 // Add offset table update.
duke@435 782 virtual inline HeapWord* allocate(size_t word_size);
duke@435 783 inline HeapWord* par_allocate(size_t word_size);
duke@435 784
duke@435 785 // MarkSweep support phase3
duke@435 786 virtual HeapWord* initialize_threshold();
duke@435 787 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
duke@435 788
duke@435 789 virtual void print_on(outputStream* st) const;
duke@435 790
duke@435 791 // Debugging
brutisso@3711 792 void verify() const;
duke@435 793 };
duke@435 794
duke@435 795
duke@435 796 // Class TenuredSpace is used by TenuredGeneration
duke@435 797
duke@435 798 class TenuredSpace: public OffsetTableContigSpace {
duke@435 799 friend class VMStructs;
duke@435 800 protected:
duke@435 801 // Mark sweep support
jcoomes@873 802 size_t allowed_dead_ratio() const;
duke@435 803 public:
duke@435 804 // Constructor
duke@435 805 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
duke@435 806 MemRegion mr) :
duke@435 807 OffsetTableContigSpace(sharedOffsetArray, mr) {}
duke@435 808 };
stefank@2314 809 #endif // SHARE_VM_MEMORY_SPACE_HPP

mercurial