src/share/vm/memory/space.hpp

Thu, 12 Oct 2017 21:27:07 +0800

author
aoqi
date
Thu, 12 Oct 2017 21:27:07 +0800
changeset 7535
7ae4e26cb1e0
parent 7131
d35872270666
parent 6876
710a3c8b516e
permissions
-rw-r--r--

merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
aoqi@0 26 #define SHARE_VM_MEMORY_SPACE_HPP
aoqi@0 27
aoqi@0 28 #include "memory/allocation.hpp"
aoqi@0 29 #include "memory/blockOffsetTable.hpp"
aoqi@0 30 #include "memory/cardTableModRefBS.hpp"
aoqi@0 31 #include "memory/iterator.hpp"
aoqi@0 32 #include "memory/memRegion.hpp"
aoqi@0 33 #include "memory/watermark.hpp"
aoqi@0 34 #include "oops/markOop.hpp"
aoqi@0 35 #include "runtime/mutexLocker.hpp"
aoqi@0 36 #include "utilities/macros.hpp"
aoqi@0 37 #include "utilities/workgroup.hpp"
aoqi@0 38
aoqi@0 39 // A space is an abstraction for the "storage units" backing
aoqi@0 40 // up the generation abstraction. It includes specific
aoqi@0 41 // implementations for keeping track of free and used space,
aoqi@0 42 // for iterating over objects and free blocks, etc.
aoqi@0 43
aoqi@0 44 // Here's the Space hierarchy:
aoqi@0 45 //
aoqi@0 46 // - Space -- an asbtract base class describing a heap area
aoqi@0 47 // - CompactibleSpace -- a space supporting compaction
aoqi@0 48 // - CompactibleFreeListSpace -- (used for CMS generation)
aoqi@0 49 // - ContiguousSpace -- a compactible space in which all free space
aoqi@0 50 // is contiguous
aoqi@0 51 // - EdenSpace -- contiguous space used as nursery
aoqi@0 52 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
aoqi@0 53 // - OffsetTableContigSpace -- contiguous space with a block offset array
aoqi@0 54 // that allows "fast" block_start calls
aoqi@0 55 // - TenuredSpace -- (used for TenuredGeneration)
aoqi@0 56
aoqi@0 57 // Forward decls.
aoqi@0 58 class Space;
aoqi@0 59 class BlockOffsetArray;
aoqi@0 60 class BlockOffsetArrayContigSpace;
aoqi@0 61 class Generation;
aoqi@0 62 class CompactibleSpace;
aoqi@0 63 class BlockOffsetTable;
aoqi@0 64 class GenRemSet;
aoqi@0 65 class CardTableRS;
aoqi@0 66 class DirtyCardToOopClosure;
aoqi@0 67
aoqi@0 68 // A Space describes a heap area. Class Space is an abstract
aoqi@0 69 // base class.
aoqi@0 70 //
aoqi@0 71 // Space supports allocation, size computation and GC support is provided.
aoqi@0 72 //
aoqi@0 73 // Invariant: bottom() and end() are on page_size boundaries and
aoqi@0 74 // bottom() <= top() <= end()
aoqi@0 75 // top() is inclusive and end() is exclusive.
aoqi@0 76
aoqi@0 77 class Space: public CHeapObj<mtGC> {
aoqi@0 78 friend class VMStructs;
aoqi@0 79 protected:
aoqi@0 80 HeapWord* _bottom;
aoqi@0 81 HeapWord* _end;
aoqi@0 82
aoqi@0 83 // Used in support of save_marks()
aoqi@0 84 HeapWord* _saved_mark_word;
aoqi@0 85
aoqi@0 86 MemRegionClosure* _preconsumptionDirtyCardClosure;
aoqi@0 87
aoqi@0 88 // A sequential tasks done structure. This supports
aoqi@0 89 // parallel GC, where we have threads dynamically
aoqi@0 90 // claiming sub-tasks from a larger parallel task.
aoqi@0 91 SequentialSubTasksDone _par_seq_tasks;
aoqi@0 92
aoqi@0 93 Space():
aoqi@0 94 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
aoqi@0 95
aoqi@0 96 public:
aoqi@0 97 // Accessors
aoqi@0 98 HeapWord* bottom() const { return _bottom; }
aoqi@0 99 HeapWord* end() const { return _end; }
aoqi@0 100 virtual void set_bottom(HeapWord* value) { _bottom = value; }
aoqi@0 101 virtual void set_end(HeapWord* value) { _end = value; }
aoqi@0 102
aoqi@0 103 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
aoqi@0 104
aoqi@0 105 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
aoqi@0 106
mgerdin@6981 107 // Returns true if this object has been allocated since a
mgerdin@6981 108 // generation's "save_marks" call.
mgerdin@6981 109 virtual bool obj_allocated_since_save_marks(const oop obj) const {
mgerdin@6981 110 return (HeapWord*)obj >= saved_mark_word();
mgerdin@6981 111 }
mgerdin@6981 112
aoqi@0 113 MemRegionClosure* preconsumptionDirtyCardClosure() const {
aoqi@0 114 return _preconsumptionDirtyCardClosure;
aoqi@0 115 }
aoqi@0 116 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
aoqi@0 117 _preconsumptionDirtyCardClosure = cl;
aoqi@0 118 }
aoqi@0 119
mgerdin@6981 120 // Returns a subregion of the space containing only the allocated objects in
aoqi@0 121 // the space.
mgerdin@6981 122 virtual MemRegion used_region() const = 0;
aoqi@0 123
aoqi@0 124 // Returns a region that is guaranteed to contain (at least) all objects
aoqi@0 125 // allocated at the time of the last call to "save_marks". If the space
aoqi@0 126 // initializes its DirtyCardToOopClosure's specifying the "contig" option
aoqi@0 127 // (that is, if the space is contiguous), then this region must contain only
aoqi@0 128 // such objects: the memregion will be from the bottom of the region to the
aoqi@0 129 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
aoqi@0 130 // the space must distiguish between objects in the region allocated before
aoqi@0 131 // and after the call to save marks.
mgerdin@6981 132 MemRegion used_region_at_save_marks() const {
aoqi@0 133 return MemRegion(bottom(), saved_mark_word());
aoqi@0 134 }
aoqi@0 135
aoqi@0 136 // Initialization.
aoqi@0 137 // "initialize" should be called once on a space, before it is used for
aoqi@0 138 // any purpose. The "mr" arguments gives the bounds of the space, and
aoqi@0 139 // the "clear_space" argument should be true unless the memory in "mr" is
aoqi@0 140 // known to be zeroed.
aoqi@0 141 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
aoqi@0 142
aoqi@0 143 // The "clear" method must be called on a region that may have
aoqi@0 144 // had allocation performed in it, but is now to be considered empty.
aoqi@0 145 virtual void clear(bool mangle_space);
aoqi@0 146
aoqi@0 147 // For detecting GC bugs. Should only be called at GC boundaries, since
aoqi@0 148 // some unused space may be used as scratch space during GC's.
aoqi@0 149 // Default implementation does nothing. We also call this when expanding
aoqi@0 150 // a space to satisfy an allocation request. See bug #4668531
aoqi@0 151 virtual void mangle_unused_area() {}
aoqi@0 152 virtual void mangle_unused_area_complete() {}
aoqi@0 153 virtual void mangle_region(MemRegion mr) {}
aoqi@0 154
aoqi@0 155 // Testers
aoqi@0 156 bool is_empty() const { return used() == 0; }
aoqi@0 157 bool not_empty() const { return used() > 0; }
aoqi@0 158
aoqi@0 159 // Returns true iff the given the space contains the
aoqi@0 160 // given address as part of an allocated object. For
aoqi@0 161 // ceratin kinds of spaces, this might be a potentially
aoqi@0 162 // expensive operation. To prevent performance problems
aoqi@0 163 // on account of its inadvertent use in product jvm's,
aoqi@0 164 // we restrict its use to assertion checks only.
mgerdin@6981 165 bool is_in(const void* p) const {
mgerdin@6981 166 return used_region().contains(p);
mgerdin@6981 167 }
aoqi@0 168
aoqi@0 169 // Returns true iff the given reserved memory of the space contains the
aoqi@0 170 // given address.
aoqi@0 171 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
aoqi@0 172
aoqi@0 173 // Returns true iff the given block is not allocated.
aoqi@0 174 virtual bool is_free_block(const HeapWord* p) const = 0;
aoqi@0 175
aoqi@0 176 // Test whether p is double-aligned
aoqi@0 177 static bool is_aligned(void* p) {
aoqi@0 178 return ((intptr_t)p & (sizeof(double)-1)) == 0;
aoqi@0 179 }
aoqi@0 180
aoqi@0 181 // Size computations. Sizes are in bytes.
aoqi@0 182 size_t capacity() const { return byte_size(bottom(), end()); }
aoqi@0 183 virtual size_t used() const = 0;
aoqi@0 184 virtual size_t free() const = 0;
aoqi@0 185
aoqi@0 186 // Iterate over all the ref-containing fields of all objects in the
aoqi@0 187 // space, calling "cl.do_oop" on each. Fields in objects allocated by
aoqi@0 188 // applications of the closure are not included in the iteration.
aoqi@0 189 virtual void oop_iterate(ExtendedOopClosure* cl);
aoqi@0 190
aoqi@0 191 // Iterate over all objects in the space, calling "cl.do_object" on
aoqi@0 192 // each. Objects allocated by applications of the closure are not
aoqi@0 193 // included in the iteration.
aoqi@0 194 virtual void object_iterate(ObjectClosure* blk) = 0;
aoqi@0 195 // Similar to object_iterate() except only iterates over
aoqi@0 196 // objects whose internal references point to objects in the space.
aoqi@0 197 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
aoqi@0 198
aoqi@0 199 // Create and return a new dirty card to oop closure. Can be
aoqi@0 200 // overriden to return the appropriate type of closure
aoqi@0 201 // depending on the type of space in which the closure will
aoqi@0 202 // operate. ResourceArea allocated.
aoqi@0 203 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
aoqi@0 204 CardTableModRefBS::PrecisionStyle precision,
aoqi@0 205 HeapWord* boundary = NULL);
aoqi@0 206
aoqi@0 207 // If "p" is in the space, returns the address of the start of the
aoqi@0 208 // "block" that contains "p". We say "block" instead of "object" since
aoqi@0 209 // some heaps may not pack objects densely; a chunk may either be an
aoqi@0 210 // object or a non-object. If "p" is not in the space, return NULL.
aoqi@0 211 virtual HeapWord* block_start_const(const void* p) const = 0;
aoqi@0 212
aoqi@0 213 // The non-const version may have benevolent side effects on the data
aoqi@0 214 // structure supporting these calls, possibly speeding up future calls.
aoqi@0 215 // The default implementation, however, is simply to call the const
aoqi@0 216 // version.
aoqi@0 217 inline virtual HeapWord* block_start(const void* p);
aoqi@0 218
aoqi@0 219 // Requires "addr" to be the start of a chunk, and returns its size.
aoqi@0 220 // "addr + size" is required to be the start of a new chunk, or the end
aoqi@0 221 // of the active area of the heap.
aoqi@0 222 virtual size_t block_size(const HeapWord* addr) const = 0;
aoqi@0 223
aoqi@0 224 // Requires "addr" to be the start of a block, and returns "TRUE" iff
aoqi@0 225 // the block is an object.
aoqi@0 226 virtual bool block_is_obj(const HeapWord* addr) const = 0;
aoqi@0 227
aoqi@0 228 // Requires "addr" to be the start of a block, and returns "TRUE" iff
aoqi@0 229 // the block is an object and the object is alive.
aoqi@0 230 virtual bool obj_is_alive(const HeapWord* addr) const;
aoqi@0 231
aoqi@0 232 // Allocation (return NULL if full). Assumes the caller has established
aoqi@0 233 // mutually exclusive access to the space.
aoqi@0 234 virtual HeapWord* allocate(size_t word_size) = 0;
aoqi@0 235
aoqi@0 236 // Allocation (return NULL if full). Enforces mutual exclusion internally.
aoqi@0 237 virtual HeapWord* par_allocate(size_t word_size) = 0;
aoqi@0 238
aoqi@0 239 // Mark-sweep-compact support: all spaces can update pointers to objects
aoqi@0 240 // moving as a part of compaction.
aoqi@0 241 virtual void adjust_pointers();
aoqi@0 242
aoqi@0 243 // PrintHeapAtGC support
aoqi@0 244 virtual void print() const;
aoqi@0 245 virtual void print_on(outputStream* st) const;
aoqi@0 246 virtual void print_short() const;
aoqi@0 247 virtual void print_short_on(outputStream* st) const;
aoqi@0 248
aoqi@0 249
aoqi@0 250 // Accessor for parallel sequential tasks.
aoqi@0 251 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
aoqi@0 252
aoqi@0 253 // IF "this" is a ContiguousSpace, return it, else return NULL.
aoqi@0 254 virtual ContiguousSpace* toContiguousSpace() {
aoqi@0 255 return NULL;
aoqi@0 256 }
aoqi@0 257
aoqi@0 258 // Debugging
aoqi@0 259 virtual void verify() const = 0;
aoqi@0 260 };
aoqi@0 261
aoqi@0 262 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
aoqi@0 263 // OopClosure to (the addresses of) all the ref-containing fields that could
aoqi@0 264 // be modified by virtue of the given MemRegion being dirty. (Note that
aoqi@0 265 // because of the imprecise nature of the write barrier, this may iterate
aoqi@0 266 // over oops beyond the region.)
aoqi@0 267 // This base type for dirty card to oop closures handles memory regions
aoqi@0 268 // in non-contiguous spaces with no boundaries, and should be sub-classed
aoqi@0 269 // to support other space types. See ContiguousDCTOC for a sub-class
aoqi@0 270 // that works with ContiguousSpaces.
aoqi@0 271
aoqi@0 272 class DirtyCardToOopClosure: public MemRegionClosureRO {
aoqi@0 273 protected:
aoqi@0 274 ExtendedOopClosure* _cl;
aoqi@0 275 Space* _sp;
aoqi@0 276 CardTableModRefBS::PrecisionStyle _precision;
aoqi@0 277 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
aoqi@0 278 // pointing below boundary.
aoqi@0 279 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
aoqi@0 280 // a downwards traversal; this is the
aoqi@0 281 // lowest location already done (or,
aoqi@0 282 // alternatively, the lowest address that
aoqi@0 283 // shouldn't be done again. NULL means infinity.)
aoqi@0 284 NOT_PRODUCT(HeapWord* _last_bottom;)
aoqi@0 285 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
aoqi@0 286
aoqi@0 287 // Get the actual top of the area on which the closure will
aoqi@0 288 // operate, given where the top is assumed to be (the end of the
aoqi@0 289 // memory region passed to do_MemRegion) and where the object
aoqi@0 290 // at the top is assumed to start. For example, an object may
aoqi@0 291 // start at the top but actually extend past the assumed top,
aoqi@0 292 // in which case the top becomes the end of the object.
aoqi@0 293 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
aoqi@0 294
aoqi@0 295 // Walk the given memory region from bottom to (actual) top
aoqi@0 296 // looking for objects and applying the oop closure (_cl) to
aoqi@0 297 // them. The base implementation of this treats the area as
aoqi@0 298 // blocks, where a block may or may not be an object. Sub-
aoqi@0 299 // classes should override this to provide more accurate
aoqi@0 300 // or possibly more efficient walking.
aoqi@0 301 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
aoqi@0 302
aoqi@0 303 public:
aoqi@0 304 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
aoqi@0 305 CardTableModRefBS::PrecisionStyle precision,
aoqi@0 306 HeapWord* boundary) :
aoqi@0 307 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
aoqi@0 308 _min_done(NULL) {
aoqi@0 309 NOT_PRODUCT(_last_bottom = NULL);
aoqi@0 310 NOT_PRODUCT(_last_explicit_min_done = NULL);
aoqi@0 311 }
aoqi@0 312
aoqi@0 313 void do_MemRegion(MemRegion mr);
aoqi@0 314
aoqi@0 315 void set_min_done(HeapWord* min_done) {
aoqi@0 316 _min_done = min_done;
aoqi@0 317 NOT_PRODUCT(_last_explicit_min_done = _min_done);
aoqi@0 318 }
aoqi@0 319 #ifndef PRODUCT
aoqi@0 320 void set_last_bottom(HeapWord* last_bottom) {
aoqi@0 321 _last_bottom = last_bottom;
aoqi@0 322 }
aoqi@0 323 #endif
aoqi@0 324 };
aoqi@0 325
aoqi@0 326 // A structure to represent a point at which objects are being copied
aoqi@0 327 // during compaction.
aoqi@0 328 class CompactPoint : public StackObj {
aoqi@0 329 public:
aoqi@0 330 Generation* gen;
aoqi@0 331 CompactibleSpace* space;
aoqi@0 332 HeapWord* threshold;
tschatzl@7009 333
sjohanss@7131 334 CompactPoint(Generation* g = NULL) :
sjohanss@7131 335 gen(g), space(NULL), threshold(0) {}
aoqi@0 336 };
aoqi@0 337
aoqi@0 338 // A space that supports compaction operations. This is usually, but not
aoqi@0 339 // necessarily, a space that is normally contiguous. But, for example, a
aoqi@0 340 // free-list-based space whose normal collection is a mark-sweep without
aoqi@0 341 // compaction could still support compaction in full GC's.
aoqi@0 342
aoqi@0 343 class CompactibleSpace: public Space {
aoqi@0 344 friend class VMStructs;
aoqi@0 345 friend class CompactibleFreeListSpace;
aoqi@0 346 private:
aoqi@0 347 HeapWord* _compaction_top;
aoqi@0 348 CompactibleSpace* _next_compaction_space;
aoqi@0 349
aoqi@0 350 public:
aoqi@0 351 CompactibleSpace() :
aoqi@0 352 _compaction_top(NULL), _next_compaction_space(NULL) {}
aoqi@0 353
aoqi@0 354 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
aoqi@0 355 virtual void clear(bool mangle_space);
aoqi@0 356
aoqi@0 357 // Used temporarily during a compaction phase to hold the value
aoqi@0 358 // top should have when compaction is complete.
aoqi@0 359 HeapWord* compaction_top() const { return _compaction_top; }
aoqi@0 360
aoqi@0 361 void set_compaction_top(HeapWord* value) {
aoqi@0 362 assert(value == NULL || (value >= bottom() && value <= end()),
aoqi@0 363 "should point inside space");
aoqi@0 364 _compaction_top = value;
aoqi@0 365 }
aoqi@0 366
aoqi@0 367 // Perform operations on the space needed after a compaction
aoqi@0 368 // has been performed.
mgerdin@6981 369 virtual void reset_after_compaction() = 0;
aoqi@0 370
aoqi@0 371 // Returns the next space (in the current generation) to be compacted in
aoqi@0 372 // the global compaction order. Also is used to select the next
aoqi@0 373 // space into which to compact.
aoqi@0 374
aoqi@0 375 virtual CompactibleSpace* next_compaction_space() const {
aoqi@0 376 return _next_compaction_space;
aoqi@0 377 }
aoqi@0 378
aoqi@0 379 void set_next_compaction_space(CompactibleSpace* csp) {
aoqi@0 380 _next_compaction_space = csp;
aoqi@0 381 }
aoqi@0 382
aoqi@0 383 // MarkSweep support phase2
aoqi@0 384
aoqi@0 385 // Start the process of compaction of the current space: compute
aoqi@0 386 // post-compaction addresses, and insert forwarding pointers. The fields
aoqi@0 387 // "cp->gen" and "cp->compaction_space" are the generation and space into
aoqi@0 388 // which we are currently compacting. This call updates "cp" as necessary,
aoqi@0 389 // and leaves the "compaction_top" of the final value of
aoqi@0 390 // "cp->compaction_space" up-to-date. Offset tables may be updated in
aoqi@0 391 // this phase as if the final copy had occurred; if so, "cp->threshold"
aoqi@0 392 // indicates when the next such action should be taken.
aoqi@0 393 virtual void prepare_for_compaction(CompactPoint* cp);
aoqi@0 394 // MarkSweep support phase3
aoqi@0 395 virtual void adjust_pointers();
aoqi@0 396 // MarkSweep support phase4
aoqi@0 397 virtual void compact();
aoqi@0 398
aoqi@0 399 // The maximum percentage of objects that can be dead in the compacted
aoqi@0 400 // live part of a compacted space ("deadwood" support.)
aoqi@0 401 virtual size_t allowed_dead_ratio() const { return 0; };
aoqi@0 402
aoqi@0 403 // Some contiguous spaces may maintain some data structures that should
aoqi@0 404 // be updated whenever an allocation crosses a boundary. This function
aoqi@0 405 // returns the first such boundary.
aoqi@0 406 // (The default implementation returns the end of the space, so the
aoqi@0 407 // boundary is never crossed.)
aoqi@0 408 virtual HeapWord* initialize_threshold() { return end(); }
aoqi@0 409
aoqi@0 410 // "q" is an object of the given "size" that should be forwarded;
aoqi@0 411 // "cp" names the generation ("gen") and containing "this" (which must
aoqi@0 412 // also equal "cp->space"). "compact_top" is where in "this" the
aoqi@0 413 // next object should be forwarded to. If there is room in "this" for
aoqi@0 414 // the object, insert an appropriate forwarding pointer in "q".
aoqi@0 415 // If not, go to the next compaction space (there must
aoqi@0 416 // be one, since compaction must succeed -- we go to the first space of
aoqi@0 417 // the previous generation if necessary, updating "cp"), reset compact_top
aoqi@0 418 // and then forward. In either case, returns the new value of "compact_top".
aoqi@0 419 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
aoqi@0 420 // function of the then-current compaction space, and updates "cp->threshold
aoqi@0 421 // accordingly".
aoqi@0 422 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
aoqi@0 423 HeapWord* compact_top);
aoqi@0 424
aoqi@0 425 // Return a size with adjusments as required of the space.
aoqi@0 426 virtual size_t adjust_object_size_v(size_t size) const { return size; }
aoqi@0 427
aoqi@0 428 protected:
aoqi@0 429 // Used during compaction.
aoqi@0 430 HeapWord* _first_dead;
aoqi@0 431 HeapWord* _end_of_live;
aoqi@0 432
aoqi@0 433 // Minimum size of a free block.
mgerdin@6981 434 virtual size_t minimum_free_block_size() const { return 0; }
aoqi@0 435
aoqi@0 436 // This the function is invoked when an allocation of an object covering
aoqi@0 437 // "start" to "end occurs crosses the threshold; returns the next
aoqi@0 438 // threshold. (The default implementation does nothing.)
aoqi@0 439 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
aoqi@0 440 return end();
aoqi@0 441 }
aoqi@0 442
aoqi@0 443 // Requires "allowed_deadspace_words > 0", that "q" is the start of a
aoqi@0 444 // free block of the given "word_len", and that "q", were it an object,
aoqi@0 445 // would not move if forwared. If the size allows, fill the free
aoqi@0 446 // block with an object, to prevent excessive compaction. Returns "true"
aoqi@0 447 // iff the free region was made deadspace, and modifies
aoqi@0 448 // "allowed_deadspace_words" to reflect the number of available deadspace
aoqi@0 449 // words remaining after this operation.
aoqi@0 450 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
aoqi@0 451 size_t word_len);
aoqi@0 452 };
aoqi@0 453
aoqi@0 454 class GenSpaceMangler;
aoqi@0 455
aoqi@0 456 // A space in which the free area is contiguous. It therefore supports
aoqi@0 457 // faster allocation, and compaction.
aoqi@0 458 class ContiguousSpace: public CompactibleSpace {
aoqi@0 459 friend class OneContigSpaceCardGeneration;
aoqi@0 460 friend class VMStructs;
aoqi@0 461 protected:
aoqi@0 462 HeapWord* _top;
aoqi@0 463 HeapWord* _concurrent_iteration_safe_limit;
aoqi@0 464 // A helper for mangling the unused area of the space in debug builds.
aoqi@0 465 GenSpaceMangler* _mangler;
aoqi@0 466
aoqi@0 467 GenSpaceMangler* mangler() { return _mangler; }
aoqi@0 468
aoqi@0 469 // Allocation helpers (return NULL if full).
aoqi@0 470 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
aoqi@0 471 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
aoqi@0 472
aoqi@0 473 public:
aoqi@0 474 ContiguousSpace();
aoqi@0 475 ~ContiguousSpace();
aoqi@0 476
aoqi@0 477 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
aoqi@0 478 virtual void clear(bool mangle_space);
aoqi@0 479
aoqi@0 480 // Accessors
aoqi@0 481 HeapWord* top() const { return _top; }
aoqi@0 482 void set_top(HeapWord* value) { _top = value; }
aoqi@0 483
mgerdin@6981 484 void set_saved_mark() { _saved_mark_word = top(); }
aoqi@0 485 void reset_saved_mark() { _saved_mark_word = bottom(); }
aoqi@0 486
aoqi@0 487 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
aoqi@0 488 WaterMark top_mark() { return WaterMark(this, top()); }
aoqi@0 489 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
aoqi@0 490 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
aoqi@0 491
aoqi@0 492 // In debug mode mangle (write it with a particular bit
aoqi@0 493 // pattern) the unused part of a space.
aoqi@0 494
aoqi@0 495 // Used to save the an address in a space for later use during mangling.
aoqi@0 496 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
aoqi@0 497 // Used to save the space's current top for later use during mangling.
aoqi@0 498 void set_top_for_allocations() PRODUCT_RETURN;
aoqi@0 499
aoqi@0 500 // Mangle regions in the space from the current top up to the
aoqi@0 501 // previously mangled part of the space.
aoqi@0 502 void mangle_unused_area() PRODUCT_RETURN;
aoqi@0 503 // Mangle [top, end)
aoqi@0 504 void mangle_unused_area_complete() PRODUCT_RETURN;
aoqi@0 505 // Mangle the given MemRegion.
aoqi@0 506 void mangle_region(MemRegion mr) PRODUCT_RETURN;
aoqi@0 507
aoqi@0 508 // Do some sparse checking on the area that should have been mangled.
aoqi@0 509 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
aoqi@0 510 // Check the complete area that should have been mangled.
aoqi@0 511 // This code may be NULL depending on the macro DEBUG_MANGLING.
aoqi@0 512 void check_mangled_unused_area_complete() PRODUCT_RETURN;
aoqi@0 513
aoqi@0 514 // Size computations: sizes in bytes.
aoqi@0 515 size_t capacity() const { return byte_size(bottom(), end()); }
aoqi@0 516 size_t used() const { return byte_size(bottom(), top()); }
aoqi@0 517 size_t free() const { return byte_size(top(), end()); }
aoqi@0 518
aoqi@0 519 virtual bool is_free_block(const HeapWord* p) const;
aoqi@0 520
aoqi@0 521 // In a contiguous space we have a more obvious bound on what parts
aoqi@0 522 // contain objects.
aoqi@0 523 MemRegion used_region() const { return MemRegion(bottom(), top()); }
aoqi@0 524
aoqi@0 525 // Allocation (return NULL if full)
aoqi@0 526 virtual HeapWord* allocate(size_t word_size);
aoqi@0 527 virtual HeapWord* par_allocate(size_t word_size);
jmasa@7031 528 HeapWord* allocate_aligned(size_t word_size);
aoqi@0 529
aoqi@0 530 // Iteration
aoqi@0 531 void oop_iterate(ExtendedOopClosure* cl);
aoqi@0 532 void object_iterate(ObjectClosure* blk);
aoqi@0 533 // For contiguous spaces this method will iterate safely over objects
aoqi@0 534 // in the space (i.e., between bottom and top) when at a safepoint.
aoqi@0 535 void safe_object_iterate(ObjectClosure* blk);
mgerdin@6980 536
mgerdin@6980 537 // Iterate over as many initialized objects in the space as possible,
mgerdin@6980 538 // calling "cl.do_object_careful" on each. Return NULL if all objects
mgerdin@6980 539 // in the space (at the start of the iteration) were iterated over.
mgerdin@6980 540 // Return an address indicating the extent of the iteration in the
mgerdin@6980 541 // event that the iteration had to return because of finding an
mgerdin@6980 542 // uninitialized object in the space, or if the closure "cl"
mgerdin@6980 543 // signaled early termination.
aoqi@0 544 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
aoqi@0 545 HeapWord* concurrent_iteration_safe_limit() {
aoqi@0 546 assert(_concurrent_iteration_safe_limit <= top(),
aoqi@0 547 "_concurrent_iteration_safe_limit update missed");
aoqi@0 548 return _concurrent_iteration_safe_limit;
aoqi@0 549 }
aoqi@0 550 // changes the safe limit, all objects from bottom() to the new
aoqi@0 551 // limit should be properly initialized
aoqi@0 552 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
aoqi@0 553 assert(new_limit <= top(), "uninitialized objects in the safe range");
aoqi@0 554 _concurrent_iteration_safe_limit = new_limit;
aoqi@0 555 }
aoqi@0 556
aoqi@0 557
aoqi@0 558 #if INCLUDE_ALL_GCS
aoqi@0 559 // In support of parallel oop_iterate.
aoqi@0 560 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
aoqi@0 561 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
aoqi@0 562
aoqi@0 563 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
aoqi@0 564 #undef ContigSpace_PAR_OOP_ITERATE_DECL
aoqi@0 565 #endif // INCLUDE_ALL_GCS
aoqi@0 566
aoqi@0 567 // Compaction support
aoqi@0 568 virtual void reset_after_compaction() {
aoqi@0 569 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
aoqi@0 570 set_top(compaction_top());
aoqi@0 571 // set new iteration safe limit
aoqi@0 572 set_concurrent_iteration_safe_limit(compaction_top());
aoqi@0 573 }
aoqi@0 574
aoqi@0 575 // Override.
aoqi@0 576 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
aoqi@0 577 CardTableModRefBS::PrecisionStyle precision,
aoqi@0 578 HeapWord* boundary = NULL);
aoqi@0 579
aoqi@0 580 // Apply "blk->do_oop" to the addresses of all reference fields in objects
aoqi@0 581 // starting with the _saved_mark_word, which was noted during a generation's
aoqi@0 582 // save_marks and is required to denote the head of an object.
aoqi@0 583 // Fields in objects allocated by applications of the closure
aoqi@0 584 // *are* included in the iteration.
aoqi@0 585 // Updates _saved_mark_word to point to just after the last object
aoqi@0 586 // iterated over.
aoqi@0 587 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
aoqi@0 588 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
aoqi@0 589
aoqi@0 590 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
aoqi@0 591 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
aoqi@0 592
aoqi@0 593 // Same as object_iterate, but starting from "mark", which is required
aoqi@0 594 // to denote the start of an object. Objects allocated by
aoqi@0 595 // applications of the closure *are* included in the iteration.
aoqi@0 596 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
aoqi@0 597
aoqi@0 598 // Very inefficient implementation.
aoqi@0 599 virtual HeapWord* block_start_const(const void* p) const;
aoqi@0 600 size_t block_size(const HeapWord* p) const;
aoqi@0 601 // If a block is in the allocated area, it is an object.
aoqi@0 602 bool block_is_obj(const HeapWord* p) const { return p < top(); }
aoqi@0 603
aoqi@0 604 // Addresses for inlined allocation
aoqi@0 605 HeapWord** top_addr() { return &_top; }
aoqi@0 606 HeapWord** end_addr() { return &_end; }
aoqi@0 607
aoqi@0 608 // Overrides for more efficient compaction support.
aoqi@0 609 void prepare_for_compaction(CompactPoint* cp);
aoqi@0 610
aoqi@0 611 // PrintHeapAtGC support.
aoqi@0 612 virtual void print_on(outputStream* st) const;
aoqi@0 613
aoqi@0 614 // Checked dynamic downcasts.
aoqi@0 615 virtual ContiguousSpace* toContiguousSpace() {
aoqi@0 616 return this;
aoqi@0 617 }
aoqi@0 618
aoqi@0 619 // Debugging
aoqi@0 620 virtual void verify() const;
aoqi@0 621
aoqi@0 622 // Used to increase collection frequency. "factor" of 0 means entire
aoqi@0 623 // space.
aoqi@0 624 void allocate_temporary_filler(int factor);
aoqi@0 625
aoqi@0 626 };
aoqi@0 627
aoqi@0 628
aoqi@0 629 // A dirty card to oop closure that does filtering.
aoqi@0 630 // It knows how to filter out objects that are outside of the _boundary.
aoqi@0 631 class Filtering_DCTOC : public DirtyCardToOopClosure {
aoqi@0 632 protected:
aoqi@0 633 // Override.
aoqi@0 634 void walk_mem_region(MemRegion mr,
aoqi@0 635 HeapWord* bottom, HeapWord* top);
aoqi@0 636
aoqi@0 637 // Walk the given memory region, from bottom to top, applying
aoqi@0 638 // the given oop closure to (possibly) all objects found. The
aoqi@0 639 // given oop closure may or may not be the same as the oop
aoqi@0 640 // closure with which this closure was created, as it may
aoqi@0 641 // be a filtering closure which makes use of the _boundary.
aoqi@0 642 // We offer two signatures, so the FilteringClosure static type is
aoqi@0 643 // apparent.
aoqi@0 644 virtual void walk_mem_region_with_cl(MemRegion mr,
aoqi@0 645 HeapWord* bottom, HeapWord* top,
aoqi@0 646 ExtendedOopClosure* cl) = 0;
aoqi@0 647 virtual void walk_mem_region_with_cl(MemRegion mr,
aoqi@0 648 HeapWord* bottom, HeapWord* top,
aoqi@0 649 FilteringClosure* cl) = 0;
aoqi@0 650
aoqi@0 651 public:
aoqi@0 652 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
aoqi@0 653 CardTableModRefBS::PrecisionStyle precision,
aoqi@0 654 HeapWord* boundary) :
aoqi@0 655 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
aoqi@0 656 };
aoqi@0 657
aoqi@0 658 // A dirty card to oop closure for contiguous spaces
aoqi@0 659 // (ContiguousSpace and sub-classes).
aoqi@0 660 // It is a FilteringClosure, as defined above, and it knows:
aoqi@0 661 //
aoqi@0 662 // 1. That the actual top of any area in a memory region
aoqi@0 663 // contained by the space is bounded by the end of the contiguous
aoqi@0 664 // region of the space.
aoqi@0 665 // 2. That the space is really made up of objects and not just
aoqi@0 666 // blocks.
aoqi@0 667
aoqi@0 668 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
aoqi@0 669 protected:
aoqi@0 670 // Overrides.
aoqi@0 671 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
aoqi@0 672
aoqi@0 673 virtual void walk_mem_region_with_cl(MemRegion mr,
aoqi@0 674 HeapWord* bottom, HeapWord* top,
aoqi@0 675 ExtendedOopClosure* cl);
aoqi@0 676 virtual void walk_mem_region_with_cl(MemRegion mr,
aoqi@0 677 HeapWord* bottom, HeapWord* top,
aoqi@0 678 FilteringClosure* cl);
aoqi@0 679
aoqi@0 680 public:
aoqi@0 681 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
aoqi@0 682 CardTableModRefBS::PrecisionStyle precision,
aoqi@0 683 HeapWord* boundary) :
aoqi@0 684 Filtering_DCTOC(sp, cl, precision, boundary)
aoqi@0 685 {}
aoqi@0 686 };
aoqi@0 687
aoqi@0 688
aoqi@0 689 // Class EdenSpace describes eden-space in new generation.
aoqi@0 690
aoqi@0 691 class DefNewGeneration;
aoqi@0 692
aoqi@0 693 class EdenSpace : public ContiguousSpace {
aoqi@0 694 friend class VMStructs;
aoqi@0 695 private:
aoqi@0 696 DefNewGeneration* _gen;
aoqi@0 697
aoqi@0 698 // _soft_end is used as a soft limit on allocation. As soft limits are
aoqi@0 699 // reached, the slow-path allocation code can invoke other actions and then
aoqi@0 700 // adjust _soft_end up to a new soft limit or to end().
aoqi@0 701 HeapWord* _soft_end;
aoqi@0 702
aoqi@0 703 public:
aoqi@0 704 EdenSpace(DefNewGeneration* gen) :
aoqi@0 705 _gen(gen), _soft_end(NULL) {}
aoqi@0 706
aoqi@0 707 // Get/set just the 'soft' limit.
aoqi@0 708 HeapWord* soft_end() { return _soft_end; }
aoqi@0 709 HeapWord** soft_end_addr() { return &_soft_end; }
aoqi@0 710 void set_soft_end(HeapWord* value) { _soft_end = value; }
aoqi@0 711
aoqi@0 712 // Override.
aoqi@0 713 void clear(bool mangle_space);
aoqi@0 714
aoqi@0 715 // Set both the 'hard' and 'soft' limits (_end and _soft_end).
aoqi@0 716 void set_end(HeapWord* value) {
aoqi@0 717 set_soft_end(value);
aoqi@0 718 ContiguousSpace::set_end(value);
aoqi@0 719 }
aoqi@0 720
aoqi@0 721 // Allocation (return NULL if full)
aoqi@0 722 HeapWord* allocate(size_t word_size);
aoqi@0 723 HeapWord* par_allocate(size_t word_size);
aoqi@0 724 };
aoqi@0 725
aoqi@0 726 // Class ConcEdenSpace extends EdenSpace for the sake of safe
aoqi@0 727 // allocation while soft-end is being modified concurrently
aoqi@0 728
aoqi@0 729 class ConcEdenSpace : public EdenSpace {
aoqi@0 730 public:
aoqi@0 731 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
aoqi@0 732
aoqi@0 733 // Allocation (return NULL if full)
aoqi@0 734 HeapWord* par_allocate(size_t word_size);
aoqi@0 735 };
aoqi@0 736
aoqi@0 737
aoqi@0 738 // A ContigSpace that Supports an efficient "block_start" operation via
aoqi@0 739 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
aoqi@0 740 // other spaces.) This is the abstract base class for old generation
aoqi@0 741 // (tenured) spaces.
aoqi@0 742
aoqi@0 743 class OffsetTableContigSpace: public ContiguousSpace {
aoqi@0 744 friend class VMStructs;
aoqi@0 745 protected:
aoqi@0 746 BlockOffsetArrayContigSpace _offsets;
aoqi@0 747 Mutex _par_alloc_lock;
aoqi@0 748
aoqi@0 749 public:
aoqi@0 750 // Constructor
aoqi@0 751 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
aoqi@0 752 MemRegion mr);
aoqi@0 753
aoqi@0 754 void set_bottom(HeapWord* value);
aoqi@0 755 void set_end(HeapWord* value);
aoqi@0 756
aoqi@0 757 void clear(bool mangle_space);
aoqi@0 758
aoqi@0 759 inline HeapWord* block_start_const(const void* p) const;
aoqi@0 760
aoqi@0 761 // Add offset table update.
aoqi@0 762 virtual inline HeapWord* allocate(size_t word_size);
aoqi@0 763 inline HeapWord* par_allocate(size_t word_size);
aoqi@0 764
aoqi@0 765 // MarkSweep support phase3
aoqi@0 766 virtual HeapWord* initialize_threshold();
aoqi@0 767 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
aoqi@0 768
aoqi@0 769 virtual void print_on(outputStream* st) const;
aoqi@0 770
aoqi@0 771 // Debugging
aoqi@0 772 void verify() const;
aoqi@0 773 };
aoqi@0 774
aoqi@0 775
aoqi@0 776 // Class TenuredSpace is used by TenuredGeneration
aoqi@0 777
aoqi@0 778 class TenuredSpace: public OffsetTableContigSpace {
aoqi@0 779 friend class VMStructs;
aoqi@0 780 protected:
aoqi@0 781 // Mark sweep support
aoqi@0 782 size_t allowed_dead_ratio() const;
aoqi@0 783 public:
aoqi@0 784 // Constructor
aoqi@0 785 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
aoqi@0 786 MemRegion mr) :
aoqi@0 787 OffsetTableContigSpace(sharedOffsetArray, mr) {}
aoqi@0 788 };
aoqi@0 789 #endif // SHARE_VM_MEMORY_SPACE_HPP

mercurial