src/share/vm/memory/space.hpp

Mon, 21 Jul 2014 10:00:31 +0200

author
tschatzl
date
Mon, 21 Jul 2014 10:00:31 +0200
changeset 7009
3f2894c5052e
parent 6981
ff1e37e7eb83
child 7031
ee019285a52c
permissions
-rw-r--r--

8048112: G1 Full GC needs to support the case when the very first region is not available
Summary: Refactor preparation for compaction during Full GC so that it lazily initializes the first compaction point. This also avoids problems later when the first region may not be committed. Also reviewed by K. Barrett.
Reviewed-by: brutisso

duke@435 1 /*
mikael@6198 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_SPACE_HPP
stefank@2314 27
stefank@2314 28 #include "memory/allocation.hpp"
stefank@2314 29 #include "memory/blockOffsetTable.hpp"
stefank@2314 30 #include "memory/cardTableModRefBS.hpp"
stefank@2314 31 #include "memory/iterator.hpp"
stefank@2314 32 #include "memory/memRegion.hpp"
stefank@2314 33 #include "memory/watermark.hpp"
stefank@2314 34 #include "oops/markOop.hpp"
stefank@2314 35 #include "runtime/mutexLocker.hpp"
jprovino@4542 36 #include "utilities/macros.hpp"
stefank@2314 37 #include "utilities/workgroup.hpp"
stefank@2314 38
duke@435 39 // A space is an abstraction for the "storage units" backing
duke@435 40 // up the generation abstraction. It includes specific
duke@435 41 // implementations for keeping track of free and used space,
duke@435 42 // for iterating over objects and free blocks, etc.
duke@435 43
duke@435 44 // Here's the Space hierarchy:
duke@435 45 //
duke@435 46 // - Space -- an asbtract base class describing a heap area
duke@435 47 // - CompactibleSpace -- a space supporting compaction
duke@435 48 // - CompactibleFreeListSpace -- (used for CMS generation)
duke@435 49 // - ContiguousSpace -- a compactible space in which all free space
duke@435 50 // is contiguous
duke@435 51 // - EdenSpace -- contiguous space used as nursery
duke@435 52 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
duke@435 53 // - OffsetTableContigSpace -- contiguous space with a block offset array
duke@435 54 // that allows "fast" block_start calls
duke@435 55 // - TenuredSpace -- (used for TenuredGeneration)
duke@435 56
duke@435 57 // Forward decls.
duke@435 58 class Space;
duke@435 59 class BlockOffsetArray;
duke@435 60 class BlockOffsetArrayContigSpace;
duke@435 61 class Generation;
duke@435 62 class CompactibleSpace;
duke@435 63 class BlockOffsetTable;
duke@435 64 class GenRemSet;
duke@435 65 class CardTableRS;
duke@435 66 class DirtyCardToOopClosure;
duke@435 67
duke@435 68 // A Space describes a heap area. Class Space is an abstract
duke@435 69 // base class.
duke@435 70 //
duke@435 71 // Space supports allocation, size computation and GC support is provided.
duke@435 72 //
duke@435 73 // Invariant: bottom() and end() are on page_size boundaries and
duke@435 74 // bottom() <= top() <= end()
duke@435 75 // top() is inclusive and end() is exclusive.
duke@435 76
zgu@3900 77 class Space: public CHeapObj<mtGC> {
duke@435 78 friend class VMStructs;
duke@435 79 protected:
duke@435 80 HeapWord* _bottom;
duke@435 81 HeapWord* _end;
duke@435 82
duke@435 83 // Used in support of save_marks()
duke@435 84 HeapWord* _saved_mark_word;
duke@435 85
duke@435 86 MemRegionClosure* _preconsumptionDirtyCardClosure;
duke@435 87
duke@435 88 // A sequential tasks done structure. This supports
duke@435 89 // parallel GC, where we have threads dynamically
duke@435 90 // claiming sub-tasks from a larger parallel task.
duke@435 91 SequentialSubTasksDone _par_seq_tasks;
duke@435 92
duke@435 93 Space():
duke@435 94 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
duke@435 95
duke@435 96 public:
duke@435 97 // Accessors
duke@435 98 HeapWord* bottom() const { return _bottom; }
duke@435 99 HeapWord* end() const { return _end; }
duke@435 100 virtual void set_bottom(HeapWord* value) { _bottom = value; }
duke@435 101 virtual void set_end(HeapWord* value) { _end = value; }
duke@435 102
ysr@777 103 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
ysr@1280 104
duke@435 105 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
duke@435 106
mgerdin@6981 107 // Returns true if this object has been allocated since a
mgerdin@6981 108 // generation's "save_marks" call.
mgerdin@6981 109 virtual bool obj_allocated_since_save_marks(const oop obj) const {
mgerdin@6981 110 return (HeapWord*)obj >= saved_mark_word();
mgerdin@6981 111 }
mgerdin@6981 112
duke@435 113 MemRegionClosure* preconsumptionDirtyCardClosure() const {
duke@435 114 return _preconsumptionDirtyCardClosure;
duke@435 115 }
duke@435 116 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
duke@435 117 _preconsumptionDirtyCardClosure = cl;
duke@435 118 }
duke@435 119
mgerdin@6981 120 // Returns a subregion of the space containing only the allocated objects in
duke@435 121 // the space.
mgerdin@6981 122 virtual MemRegion used_region() const = 0;
duke@435 123
duke@435 124 // Returns a region that is guaranteed to contain (at least) all objects
duke@435 125 // allocated at the time of the last call to "save_marks". If the space
duke@435 126 // initializes its DirtyCardToOopClosure's specifying the "contig" option
duke@435 127 // (that is, if the space is contiguous), then this region must contain only
duke@435 128 // such objects: the memregion will be from the bottom of the region to the
duke@435 129 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
duke@435 130 // the space must distiguish between objects in the region allocated before
duke@435 131 // and after the call to save marks.
mgerdin@6981 132 MemRegion used_region_at_save_marks() const {
duke@435 133 return MemRegion(bottom(), saved_mark_word());
duke@435 134 }
duke@435 135
ysr@777 136 // Initialization.
ysr@777 137 // "initialize" should be called once on a space, before it is used for
ysr@777 138 // any purpose. The "mr" arguments gives the bounds of the space, and
ysr@777 139 // the "clear_space" argument should be true unless the memory in "mr" is
ysr@777 140 // known to be zeroed.
jmasa@698 141 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
ysr@777 142
ysr@777 143 // The "clear" method must be called on a region that may have
ysr@777 144 // had allocation performed in it, but is now to be considered empty.
jmasa@698 145 virtual void clear(bool mangle_space);
duke@435 146
duke@435 147 // For detecting GC bugs. Should only be called at GC boundaries, since
duke@435 148 // some unused space may be used as scratch space during GC's.
duke@435 149 // Default implementation does nothing. We also call this when expanding
duke@435 150 // a space to satisfy an allocation request. See bug #4668531
duke@435 151 virtual void mangle_unused_area() {}
jmasa@698 152 virtual void mangle_unused_area_complete() {}
duke@435 153 virtual void mangle_region(MemRegion mr) {}
duke@435 154
duke@435 155 // Testers
duke@435 156 bool is_empty() const { return used() == 0; }
duke@435 157 bool not_empty() const { return used() > 0; }
duke@435 158
duke@435 159 // Returns true iff the given the space contains the
duke@435 160 // given address as part of an allocated object. For
duke@435 161 // ceratin kinds of spaces, this might be a potentially
duke@435 162 // expensive operation. To prevent performance problems
duke@435 163 // on account of its inadvertent use in product jvm's,
duke@435 164 // we restrict its use to assertion checks only.
mgerdin@6981 165 bool is_in(const void* p) const {
mgerdin@6981 166 return used_region().contains(p);
mgerdin@6981 167 }
duke@435 168
duke@435 169 // Returns true iff the given reserved memory of the space contains the
duke@435 170 // given address.
duke@435 171 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
duke@435 172
duke@435 173 // Returns true iff the given block is not allocated.
duke@435 174 virtual bool is_free_block(const HeapWord* p) const = 0;
duke@435 175
duke@435 176 // Test whether p is double-aligned
duke@435 177 static bool is_aligned(void* p) {
duke@435 178 return ((intptr_t)p & (sizeof(double)-1)) == 0;
duke@435 179 }
duke@435 180
duke@435 181 // Size computations. Sizes are in bytes.
duke@435 182 size_t capacity() const { return byte_size(bottom(), end()); }
duke@435 183 virtual size_t used() const = 0;
duke@435 184 virtual size_t free() const = 0;
duke@435 185
duke@435 186 // Iterate over all the ref-containing fields of all objects in the
duke@435 187 // space, calling "cl.do_oop" on each. Fields in objects allocated by
duke@435 188 // applications of the closure are not included in the iteration.
coleenp@4037 189 virtual void oop_iterate(ExtendedOopClosure* cl);
duke@435 190
duke@435 191 // Iterate over all objects in the space, calling "cl.do_object" on
duke@435 192 // each. Objects allocated by applications of the closure are not
duke@435 193 // included in the iteration.
duke@435 194 virtual void object_iterate(ObjectClosure* blk) = 0;
jmasa@952 195 // Similar to object_iterate() except only iterates over
jmasa@952 196 // objects whose internal references point to objects in the space.
jmasa@952 197 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
duke@435 198
duke@435 199 // Create and return a new dirty card to oop closure. Can be
duke@435 200 // overriden to return the appropriate type of closure
duke@435 201 // depending on the type of space in which the closure will
duke@435 202 // operate. ResourceArea allocated.
coleenp@4037 203 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
duke@435 204 CardTableModRefBS::PrecisionStyle precision,
duke@435 205 HeapWord* boundary = NULL);
duke@435 206
duke@435 207 // If "p" is in the space, returns the address of the start of the
duke@435 208 // "block" that contains "p". We say "block" instead of "object" since
duke@435 209 // some heaps may not pack objects densely; a chunk may either be an
duke@435 210 // object or a non-object. If "p" is not in the space, return NULL.
ysr@777 211 virtual HeapWord* block_start_const(const void* p) const = 0;
ysr@777 212
ysr@777 213 // The non-const version may have benevolent side effects on the data
ysr@777 214 // structure supporting these calls, possibly speeding up future calls.
ysr@777 215 // The default implementation, however, is simply to call the const
ysr@777 216 // version.
ysr@777 217 inline virtual HeapWord* block_start(const void* p);
duke@435 218
duke@435 219 // Requires "addr" to be the start of a chunk, and returns its size.
duke@435 220 // "addr + size" is required to be the start of a new chunk, or the end
duke@435 221 // of the active area of the heap.
duke@435 222 virtual size_t block_size(const HeapWord* addr) const = 0;
duke@435 223
duke@435 224 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@435 225 // the block is an object.
duke@435 226 virtual bool block_is_obj(const HeapWord* addr) const = 0;
duke@435 227
duke@435 228 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@435 229 // the block is an object and the object is alive.
duke@435 230 virtual bool obj_is_alive(const HeapWord* addr) const;
duke@435 231
duke@435 232 // Allocation (return NULL if full). Assumes the caller has established
duke@435 233 // mutually exclusive access to the space.
duke@435 234 virtual HeapWord* allocate(size_t word_size) = 0;
duke@435 235
duke@435 236 // Allocation (return NULL if full). Enforces mutual exclusion internally.
duke@435 237 virtual HeapWord* par_allocate(size_t word_size) = 0;
duke@435 238
duke@435 239 // Mark-sweep-compact support: all spaces can update pointers to objects
duke@435 240 // moving as a part of compaction.
duke@435 241 virtual void adjust_pointers();
duke@435 242
duke@435 243 // PrintHeapAtGC support
duke@435 244 virtual void print() const;
duke@435 245 virtual void print_on(outputStream* st) const;
duke@435 246 virtual void print_short() const;
duke@435 247 virtual void print_short_on(outputStream* st) const;
duke@435 248
duke@435 249
duke@435 250 // Accessor for parallel sequential tasks.
duke@435 251 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
duke@435 252
duke@435 253 // IF "this" is a ContiguousSpace, return it, else return NULL.
duke@435 254 virtual ContiguousSpace* toContiguousSpace() {
duke@435 255 return NULL;
duke@435 256 }
duke@435 257
duke@435 258 // Debugging
brutisso@3711 259 virtual void verify() const = 0;
duke@435 260 };
duke@435 261
duke@435 262 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
duke@435 263 // OopClosure to (the addresses of) all the ref-containing fields that could
duke@435 264 // be modified by virtue of the given MemRegion being dirty. (Note that
duke@435 265 // because of the imprecise nature of the write barrier, this may iterate
duke@435 266 // over oops beyond the region.)
duke@435 267 // This base type for dirty card to oop closures handles memory regions
duke@435 268 // in non-contiguous spaces with no boundaries, and should be sub-classed
duke@435 269 // to support other space types. See ContiguousDCTOC for a sub-class
duke@435 270 // that works with ContiguousSpaces.
duke@435 271
duke@435 272 class DirtyCardToOopClosure: public MemRegionClosureRO {
duke@435 273 protected:
coleenp@4037 274 ExtendedOopClosure* _cl;
duke@435 275 Space* _sp;
duke@435 276 CardTableModRefBS::PrecisionStyle _precision;
duke@435 277 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
duke@435 278 // pointing below boundary.
ysr@777 279 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
duke@435 280 // a downwards traversal; this is the
duke@435 281 // lowest location already done (or,
duke@435 282 // alternatively, the lowest address that
duke@435 283 // shouldn't be done again. NULL means infinity.)
duke@435 284 NOT_PRODUCT(HeapWord* _last_bottom;)
ysr@777 285 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
duke@435 286
duke@435 287 // Get the actual top of the area on which the closure will
duke@435 288 // operate, given where the top is assumed to be (the end of the
duke@435 289 // memory region passed to do_MemRegion) and where the object
duke@435 290 // at the top is assumed to start. For example, an object may
duke@435 291 // start at the top but actually extend past the assumed top,
duke@435 292 // in which case the top becomes the end of the object.
duke@435 293 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
duke@435 294
duke@435 295 // Walk the given memory region from bottom to (actual) top
duke@435 296 // looking for objects and applying the oop closure (_cl) to
duke@435 297 // them. The base implementation of this treats the area as
duke@435 298 // blocks, where a block may or may not be an object. Sub-
duke@435 299 // classes should override this to provide more accurate
duke@435 300 // or possibly more efficient walking.
duke@435 301 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
duke@435 302
duke@435 303 public:
coleenp@4037 304 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
duke@435 305 CardTableModRefBS::PrecisionStyle precision,
duke@435 306 HeapWord* boundary) :
duke@435 307 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
duke@435 308 _min_done(NULL) {
ysr@777 309 NOT_PRODUCT(_last_bottom = NULL);
ysr@777 310 NOT_PRODUCT(_last_explicit_min_done = NULL);
duke@435 311 }
duke@435 312
duke@435 313 void do_MemRegion(MemRegion mr);
duke@435 314
duke@435 315 void set_min_done(HeapWord* min_done) {
duke@435 316 _min_done = min_done;
ysr@777 317 NOT_PRODUCT(_last_explicit_min_done = _min_done);
duke@435 318 }
duke@435 319 #ifndef PRODUCT
duke@435 320 void set_last_bottom(HeapWord* last_bottom) {
duke@435 321 _last_bottom = last_bottom;
duke@435 322 }
duke@435 323 #endif
duke@435 324 };
duke@435 325
duke@435 326 // A structure to represent a point at which objects are being copied
duke@435 327 // during compaction.
duke@435 328 class CompactPoint : public StackObj {
duke@435 329 public:
duke@435 330 Generation* gen;
duke@435 331 CompactibleSpace* space;
duke@435 332 HeapWord* threshold;
tschatzl@7009 333
tschatzl@7009 334 CompactPoint(Generation* _gen) :
tschatzl@7009 335 gen(_gen), space(NULL), threshold(0) {}
duke@435 336 };
duke@435 337
duke@435 338
duke@435 339 // A space that supports compaction operations. This is usually, but not
duke@435 340 // necessarily, a space that is normally contiguous. But, for example, a
duke@435 341 // free-list-based space whose normal collection is a mark-sweep without
duke@435 342 // compaction could still support compaction in full GC's.
duke@435 343
duke@435 344 class CompactibleSpace: public Space {
duke@435 345 friend class VMStructs;
duke@435 346 friend class CompactibleFreeListSpace;
duke@435 347 private:
duke@435 348 HeapWord* _compaction_top;
duke@435 349 CompactibleSpace* _next_compaction_space;
duke@435 350
duke@435 351 public:
ysr@782 352 CompactibleSpace() :
ysr@782 353 _compaction_top(NULL), _next_compaction_space(NULL) {}
ysr@782 354
jmasa@698 355 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 356 virtual void clear(bool mangle_space);
duke@435 357
duke@435 358 // Used temporarily during a compaction phase to hold the value
duke@435 359 // top should have when compaction is complete.
duke@435 360 HeapWord* compaction_top() const { return _compaction_top; }
duke@435 361
duke@435 362 void set_compaction_top(HeapWord* value) {
duke@435 363 assert(value == NULL || (value >= bottom() && value <= end()),
duke@435 364 "should point inside space");
duke@435 365 _compaction_top = value;
duke@435 366 }
duke@435 367
duke@435 368 // Perform operations on the space needed after a compaction
duke@435 369 // has been performed.
mgerdin@6981 370 virtual void reset_after_compaction() = 0;
duke@435 371
duke@435 372 // Returns the next space (in the current generation) to be compacted in
duke@435 373 // the global compaction order. Also is used to select the next
duke@435 374 // space into which to compact.
duke@435 375
duke@435 376 virtual CompactibleSpace* next_compaction_space() const {
duke@435 377 return _next_compaction_space;
duke@435 378 }
duke@435 379
duke@435 380 void set_next_compaction_space(CompactibleSpace* csp) {
duke@435 381 _next_compaction_space = csp;
duke@435 382 }
duke@435 383
duke@435 384 // MarkSweep support phase2
duke@435 385
duke@435 386 // Start the process of compaction of the current space: compute
duke@435 387 // post-compaction addresses, and insert forwarding pointers. The fields
duke@435 388 // "cp->gen" and "cp->compaction_space" are the generation and space into
duke@435 389 // which we are currently compacting. This call updates "cp" as necessary,
duke@435 390 // and leaves the "compaction_top" of the final value of
duke@435 391 // "cp->compaction_space" up-to-date. Offset tables may be updated in
duke@435 392 // this phase as if the final copy had occurred; if so, "cp->threshold"
duke@435 393 // indicates when the next such action should be taken.
duke@435 394 virtual void prepare_for_compaction(CompactPoint* cp);
duke@435 395 // MarkSweep support phase3
duke@435 396 virtual void adjust_pointers();
duke@435 397 // MarkSweep support phase4
duke@435 398 virtual void compact();
duke@435 399
duke@435 400 // The maximum percentage of objects that can be dead in the compacted
duke@435 401 // live part of a compacted space ("deadwood" support.)
jcoomes@873 402 virtual size_t allowed_dead_ratio() const { return 0; };
duke@435 403
duke@435 404 // Some contiguous spaces may maintain some data structures that should
duke@435 405 // be updated whenever an allocation crosses a boundary. This function
duke@435 406 // returns the first such boundary.
duke@435 407 // (The default implementation returns the end of the space, so the
duke@435 408 // boundary is never crossed.)
duke@435 409 virtual HeapWord* initialize_threshold() { return end(); }
duke@435 410
duke@435 411 // "q" is an object of the given "size" that should be forwarded;
duke@435 412 // "cp" names the generation ("gen") and containing "this" (which must
duke@435 413 // also equal "cp->space"). "compact_top" is where in "this" the
duke@435 414 // next object should be forwarded to. If there is room in "this" for
duke@435 415 // the object, insert an appropriate forwarding pointer in "q".
duke@435 416 // If not, go to the next compaction space (there must
duke@435 417 // be one, since compaction must succeed -- we go to the first space of
duke@435 418 // the previous generation if necessary, updating "cp"), reset compact_top
duke@435 419 // and then forward. In either case, returns the new value of "compact_top".
duke@435 420 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
duke@435 421 // function of the then-current compaction space, and updates "cp->threshold
duke@435 422 // accordingly".
duke@435 423 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
duke@435 424 HeapWord* compact_top);
duke@435 425
duke@435 426 // Return a size with adjusments as required of the space.
duke@435 427 virtual size_t adjust_object_size_v(size_t size) const { return size; }
duke@435 428
duke@435 429 protected:
duke@435 430 // Used during compaction.
duke@435 431 HeapWord* _first_dead;
duke@435 432 HeapWord* _end_of_live;
duke@435 433
duke@435 434 // Minimum size of a free block.
mgerdin@6981 435 virtual size_t minimum_free_block_size() const { return 0; }
duke@435 436
duke@435 437 // This the function is invoked when an allocation of an object covering
duke@435 438 // "start" to "end occurs crosses the threshold; returns the next
duke@435 439 // threshold. (The default implementation does nothing.)
duke@435 440 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@435 441 return end();
duke@435 442 }
duke@435 443
duke@435 444 // Requires "allowed_deadspace_words > 0", that "q" is the start of a
duke@435 445 // free block of the given "word_len", and that "q", were it an object,
duke@435 446 // would not move if forwared. If the size allows, fill the free
duke@435 447 // block with an object, to prevent excessive compaction. Returns "true"
duke@435 448 // iff the free region was made deadspace, and modifies
duke@435 449 // "allowed_deadspace_words" to reflect the number of available deadspace
duke@435 450 // words remaining after this operation.
duke@435 451 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
duke@435 452 size_t word_len);
duke@435 453 };
duke@435 454
jmasa@698 455 class GenSpaceMangler;
jmasa@698 456
duke@435 457 // A space in which the free area is contiguous. It therefore supports
duke@435 458 // faster allocation, and compaction.
duke@435 459 class ContiguousSpace: public CompactibleSpace {
duke@435 460 friend class OneContigSpaceCardGeneration;
duke@435 461 friend class VMStructs;
duke@435 462 protected:
duke@435 463 HeapWord* _top;
duke@435 464 HeapWord* _concurrent_iteration_safe_limit;
jmasa@698 465 // A helper for mangling the unused area of the space in debug builds.
jmasa@698 466 GenSpaceMangler* _mangler;
jmasa@698 467
jmasa@698 468 GenSpaceMangler* mangler() { return _mangler; }
duke@435 469
duke@435 470 // Allocation helpers (return NULL if full).
duke@435 471 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
duke@435 472 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
duke@435 473
duke@435 474 public:
jmasa@698 475 ContiguousSpace();
jmasa@698 476 ~ContiguousSpace();
jmasa@698 477
jmasa@698 478 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 479 virtual void clear(bool mangle_space);
duke@435 480
duke@435 481 // Accessors
duke@435 482 HeapWord* top() const { return _top; }
duke@435 483 void set_top(HeapWord* value) { _top = value; }
duke@435 484
mgerdin@6981 485 void set_saved_mark() { _saved_mark_word = top(); }
ysr@777 486 void reset_saved_mark() { _saved_mark_word = bottom(); }
duke@435 487
duke@435 488 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
duke@435 489 WaterMark top_mark() { return WaterMark(this, top()); }
duke@435 490 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
duke@435 491 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
duke@435 492
jmasa@698 493 // In debug mode mangle (write it with a particular bit
jmasa@698 494 // pattern) the unused part of a space.
jmasa@698 495
jmasa@698 496 // Used to save the an address in a space for later use during mangling.
jmasa@698 497 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
jmasa@698 498 // Used to save the space's current top for later use during mangling.
jmasa@698 499 void set_top_for_allocations() PRODUCT_RETURN;
jmasa@698 500
jmasa@698 501 // Mangle regions in the space from the current top up to the
jmasa@698 502 // previously mangled part of the space.
jmasa@698 503 void mangle_unused_area() PRODUCT_RETURN;
jmasa@698 504 // Mangle [top, end)
jmasa@698 505 void mangle_unused_area_complete() PRODUCT_RETURN;
jmasa@698 506 // Mangle the given MemRegion.
jmasa@698 507 void mangle_region(MemRegion mr) PRODUCT_RETURN;
jmasa@698 508
jmasa@698 509 // Do some sparse checking on the area that should have been mangled.
jmasa@698 510 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
jmasa@698 511 // Check the complete area that should have been mangled.
jmasa@698 512 // This code may be NULL depending on the macro DEBUG_MANGLING.
jmasa@698 513 void check_mangled_unused_area_complete() PRODUCT_RETURN;
duke@435 514
duke@435 515 // Size computations: sizes in bytes.
duke@435 516 size_t capacity() const { return byte_size(bottom(), end()); }
duke@435 517 size_t used() const { return byte_size(bottom(), top()); }
duke@435 518 size_t free() const { return byte_size(top(), end()); }
duke@435 519
duke@435 520 virtual bool is_free_block(const HeapWord* p) const;
duke@435 521
duke@435 522 // In a contiguous space we have a more obvious bound on what parts
duke@435 523 // contain objects.
duke@435 524 MemRegion used_region() const { return MemRegion(bottom(), top()); }
duke@435 525
duke@435 526 // Allocation (return NULL if full)
duke@435 527 virtual HeapWord* allocate(size_t word_size);
duke@435 528 virtual HeapWord* par_allocate(size_t word_size);
duke@435 529
duke@435 530 // Iteration
coleenp@4037 531 void oop_iterate(ExtendedOopClosure* cl);
duke@435 532 void object_iterate(ObjectClosure* blk);
jmasa@952 533 // For contiguous spaces this method will iterate safely over objects
jmasa@952 534 // in the space (i.e., between bottom and top) when at a safepoint.
jmasa@952 535 void safe_object_iterate(ObjectClosure* blk);
mgerdin@6980 536
mgerdin@6980 537 // Iterate over as many initialized objects in the space as possible,
mgerdin@6980 538 // calling "cl.do_object_careful" on each. Return NULL if all objects
mgerdin@6980 539 // in the space (at the start of the iteration) were iterated over.
mgerdin@6980 540 // Return an address indicating the extent of the iteration in the
mgerdin@6980 541 // event that the iteration had to return because of finding an
mgerdin@6980 542 // uninitialized object in the space, or if the closure "cl"
mgerdin@6980 543 // signaled early termination.
duke@435 544 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
stefank@3751 545 HeapWord* concurrent_iteration_safe_limit() {
stefank@3751 546 assert(_concurrent_iteration_safe_limit <= top(),
stefank@3751 547 "_concurrent_iteration_safe_limit update missed");
stefank@3751 548 return _concurrent_iteration_safe_limit;
stefank@3751 549 }
duke@435 550 // changes the safe limit, all objects from bottom() to the new
duke@435 551 // limit should be properly initialized
stefank@3751 552 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
stefank@3751 553 assert(new_limit <= top(), "uninitialized objects in the safe range");
stefank@3751 554 _concurrent_iteration_safe_limit = new_limit;
stefank@3751 555 }
duke@435 556
coleenp@4037 557
jprovino@4542 558 #if INCLUDE_ALL_GCS
duke@435 559 // In support of parallel oop_iterate.
duke@435 560 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
duke@435 561 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
duke@435 562
duke@435 563 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
duke@435 564 #undef ContigSpace_PAR_OOP_ITERATE_DECL
jprovino@4542 565 #endif // INCLUDE_ALL_GCS
duke@435 566
duke@435 567 // Compaction support
duke@435 568 virtual void reset_after_compaction() {
duke@435 569 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
duke@435 570 set_top(compaction_top());
duke@435 571 // set new iteration safe limit
duke@435 572 set_concurrent_iteration_safe_limit(compaction_top());
duke@435 573 }
duke@435 574
duke@435 575 // Override.
coleenp@4037 576 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
duke@435 577 CardTableModRefBS::PrecisionStyle precision,
duke@435 578 HeapWord* boundary = NULL);
duke@435 579
duke@435 580 // Apply "blk->do_oop" to the addresses of all reference fields in objects
duke@435 581 // starting with the _saved_mark_word, which was noted during a generation's
duke@435 582 // save_marks and is required to denote the head of an object.
duke@435 583 // Fields in objects allocated by applications of the closure
duke@435 584 // *are* included in the iteration.
duke@435 585 // Updates _saved_mark_word to point to just after the last object
duke@435 586 // iterated over.
duke@435 587 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
duke@435 588 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
duke@435 589
duke@435 590 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
duke@435 591 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
duke@435 592
duke@435 593 // Same as object_iterate, but starting from "mark", which is required
duke@435 594 // to denote the start of an object. Objects allocated by
duke@435 595 // applications of the closure *are* included in the iteration.
duke@435 596 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
duke@435 597
duke@435 598 // Very inefficient implementation.
ysr@777 599 virtual HeapWord* block_start_const(const void* p) const;
duke@435 600 size_t block_size(const HeapWord* p) const;
duke@435 601 // If a block is in the allocated area, it is an object.
duke@435 602 bool block_is_obj(const HeapWord* p) const { return p < top(); }
duke@435 603
duke@435 604 // Addresses for inlined allocation
duke@435 605 HeapWord** top_addr() { return &_top; }
duke@435 606 HeapWord** end_addr() { return &_end; }
duke@435 607
duke@435 608 // Overrides for more efficient compaction support.
duke@435 609 void prepare_for_compaction(CompactPoint* cp);
duke@435 610
duke@435 611 // PrintHeapAtGC support.
duke@435 612 virtual void print_on(outputStream* st) const;
duke@435 613
duke@435 614 // Checked dynamic downcasts.
duke@435 615 virtual ContiguousSpace* toContiguousSpace() {
duke@435 616 return this;
duke@435 617 }
duke@435 618
duke@435 619 // Debugging
brutisso@3711 620 virtual void verify() const;
duke@435 621
duke@435 622 // Used to increase collection frequency. "factor" of 0 means entire
duke@435 623 // space.
duke@435 624 void allocate_temporary_filler(int factor);
duke@435 625
duke@435 626 };
duke@435 627
duke@435 628
duke@435 629 // A dirty card to oop closure that does filtering.
duke@435 630 // It knows how to filter out objects that are outside of the _boundary.
duke@435 631 class Filtering_DCTOC : public DirtyCardToOopClosure {
duke@435 632 protected:
duke@435 633 // Override.
duke@435 634 void walk_mem_region(MemRegion mr,
duke@435 635 HeapWord* bottom, HeapWord* top);
duke@435 636
duke@435 637 // Walk the given memory region, from bottom to top, applying
duke@435 638 // the given oop closure to (possibly) all objects found. The
duke@435 639 // given oop closure may or may not be the same as the oop
duke@435 640 // closure with which this closure was created, as it may
duke@435 641 // be a filtering closure which makes use of the _boundary.
duke@435 642 // We offer two signatures, so the FilteringClosure static type is
duke@435 643 // apparent.
duke@435 644 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 645 HeapWord* bottom, HeapWord* top,
coleenp@4037 646 ExtendedOopClosure* cl) = 0;
duke@435 647 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 648 HeapWord* bottom, HeapWord* top,
duke@435 649 FilteringClosure* cl) = 0;
duke@435 650
duke@435 651 public:
coleenp@4037 652 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
duke@435 653 CardTableModRefBS::PrecisionStyle precision,
duke@435 654 HeapWord* boundary) :
duke@435 655 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
duke@435 656 };
duke@435 657
duke@435 658 // A dirty card to oop closure for contiguous spaces
duke@435 659 // (ContiguousSpace and sub-classes).
duke@435 660 // It is a FilteringClosure, as defined above, and it knows:
duke@435 661 //
duke@435 662 // 1. That the actual top of any area in a memory region
duke@435 663 // contained by the space is bounded by the end of the contiguous
duke@435 664 // region of the space.
duke@435 665 // 2. That the space is really made up of objects and not just
duke@435 666 // blocks.
duke@435 667
duke@435 668 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
duke@435 669 protected:
duke@435 670 // Overrides.
duke@435 671 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
duke@435 672
duke@435 673 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 674 HeapWord* bottom, HeapWord* top,
coleenp@4037 675 ExtendedOopClosure* cl);
duke@435 676 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@435 677 HeapWord* bottom, HeapWord* top,
duke@435 678 FilteringClosure* cl);
duke@435 679
duke@435 680 public:
coleenp@4037 681 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
duke@435 682 CardTableModRefBS::PrecisionStyle precision,
duke@435 683 HeapWord* boundary) :
duke@435 684 Filtering_DCTOC(sp, cl, precision, boundary)
duke@435 685 {}
duke@435 686 };
duke@435 687
duke@435 688
duke@435 689 // Class EdenSpace describes eden-space in new generation.
duke@435 690
duke@435 691 class DefNewGeneration;
duke@435 692
duke@435 693 class EdenSpace : public ContiguousSpace {
duke@435 694 friend class VMStructs;
duke@435 695 private:
duke@435 696 DefNewGeneration* _gen;
duke@435 697
duke@435 698 // _soft_end is used as a soft limit on allocation. As soft limits are
duke@435 699 // reached, the slow-path allocation code can invoke other actions and then
duke@435 700 // adjust _soft_end up to a new soft limit or to end().
duke@435 701 HeapWord* _soft_end;
duke@435 702
duke@435 703 public:
ysr@782 704 EdenSpace(DefNewGeneration* gen) :
ysr@782 705 _gen(gen), _soft_end(NULL) {}
duke@435 706
duke@435 707 // Get/set just the 'soft' limit.
duke@435 708 HeapWord* soft_end() { return _soft_end; }
duke@435 709 HeapWord** soft_end_addr() { return &_soft_end; }
duke@435 710 void set_soft_end(HeapWord* value) { _soft_end = value; }
duke@435 711
duke@435 712 // Override.
jmasa@698 713 void clear(bool mangle_space);
duke@435 714
duke@435 715 // Set both the 'hard' and 'soft' limits (_end and _soft_end).
duke@435 716 void set_end(HeapWord* value) {
duke@435 717 set_soft_end(value);
duke@435 718 ContiguousSpace::set_end(value);
duke@435 719 }
duke@435 720
duke@435 721 // Allocation (return NULL if full)
duke@435 722 HeapWord* allocate(size_t word_size);
duke@435 723 HeapWord* par_allocate(size_t word_size);
duke@435 724 };
duke@435 725
duke@435 726 // Class ConcEdenSpace extends EdenSpace for the sake of safe
duke@435 727 // allocation while soft-end is being modified concurrently
duke@435 728
duke@435 729 class ConcEdenSpace : public EdenSpace {
duke@435 730 public:
duke@435 731 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
duke@435 732
duke@435 733 // Allocation (return NULL if full)
duke@435 734 HeapWord* par_allocate(size_t word_size);
duke@435 735 };
duke@435 736
duke@435 737
duke@435 738 // A ContigSpace that Supports an efficient "block_start" operation via
duke@435 739 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
duke@435 740 // other spaces.) This is the abstract base class for old generation
coleenp@4037 741 // (tenured) spaces.
duke@435 742
duke@435 743 class OffsetTableContigSpace: public ContiguousSpace {
duke@435 744 friend class VMStructs;
duke@435 745 protected:
duke@435 746 BlockOffsetArrayContigSpace _offsets;
duke@435 747 Mutex _par_alloc_lock;
duke@435 748
duke@435 749 public:
duke@435 750 // Constructor
duke@435 751 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
duke@435 752 MemRegion mr);
duke@435 753
duke@435 754 void set_bottom(HeapWord* value);
duke@435 755 void set_end(HeapWord* value);
duke@435 756
jmasa@698 757 void clear(bool mangle_space);
duke@435 758
ysr@777 759 inline HeapWord* block_start_const(const void* p) const;
duke@435 760
duke@435 761 // Add offset table update.
duke@435 762 virtual inline HeapWord* allocate(size_t word_size);
duke@435 763 inline HeapWord* par_allocate(size_t word_size);
duke@435 764
duke@435 765 // MarkSweep support phase3
duke@435 766 virtual HeapWord* initialize_threshold();
duke@435 767 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
duke@435 768
duke@435 769 virtual void print_on(outputStream* st) const;
duke@435 770
duke@435 771 // Debugging
brutisso@3711 772 void verify() const;
duke@435 773 };
duke@435 774
duke@435 775
duke@435 776 // Class TenuredSpace is used by TenuredGeneration
duke@435 777
duke@435 778 class TenuredSpace: public OffsetTableContigSpace {
duke@435 779 friend class VMStructs;
duke@435 780 protected:
duke@435 781 // Mark sweep support
jcoomes@873 782 size_t allowed_dead_ratio() const;
duke@435 783 public:
duke@435 784 // Constructor
duke@435 785 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
duke@435 786 MemRegion mr) :
duke@435 787 OffsetTableContigSpace(sharedOffsetArray, mr) {}
duke@435 788 };
stefank@2314 789 #endif // SHARE_VM_MEMORY_SPACE_HPP

mercurial