Mon, 21 Jul 2014 10:00:31 +0200
8048112: G1 Full GC needs to support the case when the very first region is not available
Summary: Refactor preparation for compaction during Full GC so that it lazily initializes the first compaction point. This also avoids problems later when the first region may not be committed. Also reviewed by K. Barrett.
Reviewed-by: brutisso
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
26 #define SHARE_VM_MEMORY_SPACE_HPP
28 #include "memory/allocation.hpp"
29 #include "memory/blockOffsetTable.hpp"
30 #include "memory/cardTableModRefBS.hpp"
31 #include "memory/iterator.hpp"
32 #include "memory/memRegion.hpp"
33 #include "memory/watermark.hpp"
34 #include "oops/markOop.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "utilities/macros.hpp"
37 #include "utilities/workgroup.hpp"
39 // A space is an abstraction for the "storage units" backing
40 // up the generation abstraction. It includes specific
41 // implementations for keeping track of free and used space,
42 // for iterating over objects and free blocks, etc.
44 // Here's the Space hierarchy:
45 //
46 // - Space -- an asbtract base class describing a heap area
47 // - CompactibleSpace -- a space supporting compaction
48 // - CompactibleFreeListSpace -- (used for CMS generation)
49 // - ContiguousSpace -- a compactible space in which all free space
50 // is contiguous
51 // - EdenSpace -- contiguous space used as nursery
52 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
53 // - OffsetTableContigSpace -- contiguous space with a block offset array
54 // that allows "fast" block_start calls
55 // - TenuredSpace -- (used for TenuredGeneration)
57 // Forward decls.
58 class Space;
59 class BlockOffsetArray;
60 class BlockOffsetArrayContigSpace;
61 class Generation;
62 class CompactibleSpace;
63 class BlockOffsetTable;
64 class GenRemSet;
65 class CardTableRS;
66 class DirtyCardToOopClosure;
68 // A Space describes a heap area. Class Space is an abstract
69 // base class.
70 //
71 // Space supports allocation, size computation and GC support is provided.
72 //
73 // Invariant: bottom() and end() are on page_size boundaries and
74 // bottom() <= top() <= end()
75 // top() is inclusive and end() is exclusive.
77 class Space: public CHeapObj<mtGC> {
78 friend class VMStructs;
79 protected:
80 HeapWord* _bottom;
81 HeapWord* _end;
83 // Used in support of save_marks()
84 HeapWord* _saved_mark_word;
86 MemRegionClosure* _preconsumptionDirtyCardClosure;
88 // A sequential tasks done structure. This supports
89 // parallel GC, where we have threads dynamically
90 // claiming sub-tasks from a larger parallel task.
91 SequentialSubTasksDone _par_seq_tasks;
93 Space():
94 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
96 public:
97 // Accessors
98 HeapWord* bottom() const { return _bottom; }
99 HeapWord* end() const { return _end; }
100 virtual void set_bottom(HeapWord* value) { _bottom = value; }
101 virtual void set_end(HeapWord* value) { _end = value; }
103 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
105 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
107 // Returns true if this object has been allocated since a
108 // generation's "save_marks" call.
109 virtual bool obj_allocated_since_save_marks(const oop obj) const {
110 return (HeapWord*)obj >= saved_mark_word();
111 }
113 MemRegionClosure* preconsumptionDirtyCardClosure() const {
114 return _preconsumptionDirtyCardClosure;
115 }
116 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
117 _preconsumptionDirtyCardClosure = cl;
118 }
120 // Returns a subregion of the space containing only the allocated objects in
121 // the space.
122 virtual MemRegion used_region() const = 0;
124 // Returns a region that is guaranteed to contain (at least) all objects
125 // allocated at the time of the last call to "save_marks". If the space
126 // initializes its DirtyCardToOopClosure's specifying the "contig" option
127 // (that is, if the space is contiguous), then this region must contain only
128 // such objects: the memregion will be from the bottom of the region to the
129 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
130 // the space must distiguish between objects in the region allocated before
131 // and after the call to save marks.
132 MemRegion used_region_at_save_marks() const {
133 return MemRegion(bottom(), saved_mark_word());
134 }
136 // Initialization.
137 // "initialize" should be called once on a space, before it is used for
138 // any purpose. The "mr" arguments gives the bounds of the space, and
139 // the "clear_space" argument should be true unless the memory in "mr" is
140 // known to be zeroed.
141 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
143 // The "clear" method must be called on a region that may have
144 // had allocation performed in it, but is now to be considered empty.
145 virtual void clear(bool mangle_space);
147 // For detecting GC bugs. Should only be called at GC boundaries, since
148 // some unused space may be used as scratch space during GC's.
149 // Default implementation does nothing. We also call this when expanding
150 // a space to satisfy an allocation request. See bug #4668531
151 virtual void mangle_unused_area() {}
152 virtual void mangle_unused_area_complete() {}
153 virtual void mangle_region(MemRegion mr) {}
155 // Testers
156 bool is_empty() const { return used() == 0; }
157 bool not_empty() const { return used() > 0; }
159 // Returns true iff the given the space contains the
160 // given address as part of an allocated object. For
161 // ceratin kinds of spaces, this might be a potentially
162 // expensive operation. To prevent performance problems
163 // on account of its inadvertent use in product jvm's,
164 // we restrict its use to assertion checks only.
165 bool is_in(const void* p) const {
166 return used_region().contains(p);
167 }
169 // Returns true iff the given reserved memory of the space contains the
170 // given address.
171 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
173 // Returns true iff the given block is not allocated.
174 virtual bool is_free_block(const HeapWord* p) const = 0;
176 // Test whether p is double-aligned
177 static bool is_aligned(void* p) {
178 return ((intptr_t)p & (sizeof(double)-1)) == 0;
179 }
181 // Size computations. Sizes are in bytes.
182 size_t capacity() const { return byte_size(bottom(), end()); }
183 virtual size_t used() const = 0;
184 virtual size_t free() const = 0;
186 // Iterate over all the ref-containing fields of all objects in the
187 // space, calling "cl.do_oop" on each. Fields in objects allocated by
188 // applications of the closure are not included in the iteration.
189 virtual void oop_iterate(ExtendedOopClosure* cl);
191 // Iterate over all objects in the space, calling "cl.do_object" on
192 // each. Objects allocated by applications of the closure are not
193 // included in the iteration.
194 virtual void object_iterate(ObjectClosure* blk) = 0;
195 // Similar to object_iterate() except only iterates over
196 // objects whose internal references point to objects in the space.
197 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
199 // Create and return a new dirty card to oop closure. Can be
200 // overriden to return the appropriate type of closure
201 // depending on the type of space in which the closure will
202 // operate. ResourceArea allocated.
203 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
204 CardTableModRefBS::PrecisionStyle precision,
205 HeapWord* boundary = NULL);
207 // If "p" is in the space, returns the address of the start of the
208 // "block" that contains "p". We say "block" instead of "object" since
209 // some heaps may not pack objects densely; a chunk may either be an
210 // object or a non-object. If "p" is not in the space, return NULL.
211 virtual HeapWord* block_start_const(const void* p) const = 0;
213 // The non-const version may have benevolent side effects on the data
214 // structure supporting these calls, possibly speeding up future calls.
215 // The default implementation, however, is simply to call the const
216 // version.
217 inline virtual HeapWord* block_start(const void* p);
219 // Requires "addr" to be the start of a chunk, and returns its size.
220 // "addr + size" is required to be the start of a new chunk, or the end
221 // of the active area of the heap.
222 virtual size_t block_size(const HeapWord* addr) const = 0;
224 // Requires "addr" to be the start of a block, and returns "TRUE" iff
225 // the block is an object.
226 virtual bool block_is_obj(const HeapWord* addr) const = 0;
228 // Requires "addr" to be the start of a block, and returns "TRUE" iff
229 // the block is an object and the object is alive.
230 virtual bool obj_is_alive(const HeapWord* addr) const;
232 // Allocation (return NULL if full). Assumes the caller has established
233 // mutually exclusive access to the space.
234 virtual HeapWord* allocate(size_t word_size) = 0;
236 // Allocation (return NULL if full). Enforces mutual exclusion internally.
237 virtual HeapWord* par_allocate(size_t word_size) = 0;
239 // Mark-sweep-compact support: all spaces can update pointers to objects
240 // moving as a part of compaction.
241 virtual void adjust_pointers();
243 // PrintHeapAtGC support
244 virtual void print() const;
245 virtual void print_on(outputStream* st) const;
246 virtual void print_short() const;
247 virtual void print_short_on(outputStream* st) const;
250 // Accessor for parallel sequential tasks.
251 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
253 // IF "this" is a ContiguousSpace, return it, else return NULL.
254 virtual ContiguousSpace* toContiguousSpace() {
255 return NULL;
256 }
258 // Debugging
259 virtual void verify() const = 0;
260 };
262 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
263 // OopClosure to (the addresses of) all the ref-containing fields that could
264 // be modified by virtue of the given MemRegion being dirty. (Note that
265 // because of the imprecise nature of the write barrier, this may iterate
266 // over oops beyond the region.)
267 // This base type for dirty card to oop closures handles memory regions
268 // in non-contiguous spaces with no boundaries, and should be sub-classed
269 // to support other space types. See ContiguousDCTOC for a sub-class
270 // that works with ContiguousSpaces.
272 class DirtyCardToOopClosure: public MemRegionClosureRO {
273 protected:
274 ExtendedOopClosure* _cl;
275 Space* _sp;
276 CardTableModRefBS::PrecisionStyle _precision;
277 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
278 // pointing below boundary.
279 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
280 // a downwards traversal; this is the
281 // lowest location already done (or,
282 // alternatively, the lowest address that
283 // shouldn't be done again. NULL means infinity.)
284 NOT_PRODUCT(HeapWord* _last_bottom;)
285 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
287 // Get the actual top of the area on which the closure will
288 // operate, given where the top is assumed to be (the end of the
289 // memory region passed to do_MemRegion) and where the object
290 // at the top is assumed to start. For example, an object may
291 // start at the top but actually extend past the assumed top,
292 // in which case the top becomes the end of the object.
293 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
295 // Walk the given memory region from bottom to (actual) top
296 // looking for objects and applying the oop closure (_cl) to
297 // them. The base implementation of this treats the area as
298 // blocks, where a block may or may not be an object. Sub-
299 // classes should override this to provide more accurate
300 // or possibly more efficient walking.
301 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
303 public:
304 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
305 CardTableModRefBS::PrecisionStyle precision,
306 HeapWord* boundary) :
307 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
308 _min_done(NULL) {
309 NOT_PRODUCT(_last_bottom = NULL);
310 NOT_PRODUCT(_last_explicit_min_done = NULL);
311 }
313 void do_MemRegion(MemRegion mr);
315 void set_min_done(HeapWord* min_done) {
316 _min_done = min_done;
317 NOT_PRODUCT(_last_explicit_min_done = _min_done);
318 }
319 #ifndef PRODUCT
320 void set_last_bottom(HeapWord* last_bottom) {
321 _last_bottom = last_bottom;
322 }
323 #endif
324 };
326 // A structure to represent a point at which objects are being copied
327 // during compaction.
328 class CompactPoint : public StackObj {
329 public:
330 Generation* gen;
331 CompactibleSpace* space;
332 HeapWord* threshold;
334 CompactPoint(Generation* _gen) :
335 gen(_gen), space(NULL), threshold(0) {}
336 };
339 // A space that supports compaction operations. This is usually, but not
340 // necessarily, a space that is normally contiguous. But, for example, a
341 // free-list-based space whose normal collection is a mark-sweep without
342 // compaction could still support compaction in full GC's.
344 class CompactibleSpace: public Space {
345 friend class VMStructs;
346 friend class CompactibleFreeListSpace;
347 private:
348 HeapWord* _compaction_top;
349 CompactibleSpace* _next_compaction_space;
351 public:
352 CompactibleSpace() :
353 _compaction_top(NULL), _next_compaction_space(NULL) {}
355 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
356 virtual void clear(bool mangle_space);
358 // Used temporarily during a compaction phase to hold the value
359 // top should have when compaction is complete.
360 HeapWord* compaction_top() const { return _compaction_top; }
362 void set_compaction_top(HeapWord* value) {
363 assert(value == NULL || (value >= bottom() && value <= end()),
364 "should point inside space");
365 _compaction_top = value;
366 }
368 // Perform operations on the space needed after a compaction
369 // has been performed.
370 virtual void reset_after_compaction() = 0;
372 // Returns the next space (in the current generation) to be compacted in
373 // the global compaction order. Also is used to select the next
374 // space into which to compact.
376 virtual CompactibleSpace* next_compaction_space() const {
377 return _next_compaction_space;
378 }
380 void set_next_compaction_space(CompactibleSpace* csp) {
381 _next_compaction_space = csp;
382 }
384 // MarkSweep support phase2
386 // Start the process of compaction of the current space: compute
387 // post-compaction addresses, and insert forwarding pointers. The fields
388 // "cp->gen" and "cp->compaction_space" are the generation and space into
389 // which we are currently compacting. This call updates "cp" as necessary,
390 // and leaves the "compaction_top" of the final value of
391 // "cp->compaction_space" up-to-date. Offset tables may be updated in
392 // this phase as if the final copy had occurred; if so, "cp->threshold"
393 // indicates when the next such action should be taken.
394 virtual void prepare_for_compaction(CompactPoint* cp);
395 // MarkSweep support phase3
396 virtual void adjust_pointers();
397 // MarkSweep support phase4
398 virtual void compact();
400 // The maximum percentage of objects that can be dead in the compacted
401 // live part of a compacted space ("deadwood" support.)
402 virtual size_t allowed_dead_ratio() const { return 0; };
404 // Some contiguous spaces may maintain some data structures that should
405 // be updated whenever an allocation crosses a boundary. This function
406 // returns the first such boundary.
407 // (The default implementation returns the end of the space, so the
408 // boundary is never crossed.)
409 virtual HeapWord* initialize_threshold() { return end(); }
411 // "q" is an object of the given "size" that should be forwarded;
412 // "cp" names the generation ("gen") and containing "this" (which must
413 // also equal "cp->space"). "compact_top" is where in "this" the
414 // next object should be forwarded to. If there is room in "this" for
415 // the object, insert an appropriate forwarding pointer in "q".
416 // If not, go to the next compaction space (there must
417 // be one, since compaction must succeed -- we go to the first space of
418 // the previous generation if necessary, updating "cp"), reset compact_top
419 // and then forward. In either case, returns the new value of "compact_top".
420 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
421 // function of the then-current compaction space, and updates "cp->threshold
422 // accordingly".
423 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
424 HeapWord* compact_top);
426 // Return a size with adjusments as required of the space.
427 virtual size_t adjust_object_size_v(size_t size) const { return size; }
429 protected:
430 // Used during compaction.
431 HeapWord* _first_dead;
432 HeapWord* _end_of_live;
434 // Minimum size of a free block.
435 virtual size_t minimum_free_block_size() const { return 0; }
437 // This the function is invoked when an allocation of an object covering
438 // "start" to "end occurs crosses the threshold; returns the next
439 // threshold. (The default implementation does nothing.)
440 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
441 return end();
442 }
444 // Requires "allowed_deadspace_words > 0", that "q" is the start of a
445 // free block of the given "word_len", and that "q", were it an object,
446 // would not move if forwared. If the size allows, fill the free
447 // block with an object, to prevent excessive compaction. Returns "true"
448 // iff the free region was made deadspace, and modifies
449 // "allowed_deadspace_words" to reflect the number of available deadspace
450 // words remaining after this operation.
451 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
452 size_t word_len);
453 };
455 class GenSpaceMangler;
457 // A space in which the free area is contiguous. It therefore supports
458 // faster allocation, and compaction.
459 class ContiguousSpace: public CompactibleSpace {
460 friend class OneContigSpaceCardGeneration;
461 friend class VMStructs;
462 protected:
463 HeapWord* _top;
464 HeapWord* _concurrent_iteration_safe_limit;
465 // A helper for mangling the unused area of the space in debug builds.
466 GenSpaceMangler* _mangler;
468 GenSpaceMangler* mangler() { return _mangler; }
470 // Allocation helpers (return NULL if full).
471 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
472 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
474 public:
475 ContiguousSpace();
476 ~ContiguousSpace();
478 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
479 virtual void clear(bool mangle_space);
481 // Accessors
482 HeapWord* top() const { return _top; }
483 void set_top(HeapWord* value) { _top = value; }
485 void set_saved_mark() { _saved_mark_word = top(); }
486 void reset_saved_mark() { _saved_mark_word = bottom(); }
488 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
489 WaterMark top_mark() { return WaterMark(this, top()); }
490 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
491 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
493 // In debug mode mangle (write it with a particular bit
494 // pattern) the unused part of a space.
496 // Used to save the an address in a space for later use during mangling.
497 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
498 // Used to save the space's current top for later use during mangling.
499 void set_top_for_allocations() PRODUCT_RETURN;
501 // Mangle regions in the space from the current top up to the
502 // previously mangled part of the space.
503 void mangle_unused_area() PRODUCT_RETURN;
504 // Mangle [top, end)
505 void mangle_unused_area_complete() PRODUCT_RETURN;
506 // Mangle the given MemRegion.
507 void mangle_region(MemRegion mr) PRODUCT_RETURN;
509 // Do some sparse checking on the area that should have been mangled.
510 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
511 // Check the complete area that should have been mangled.
512 // This code may be NULL depending on the macro DEBUG_MANGLING.
513 void check_mangled_unused_area_complete() PRODUCT_RETURN;
515 // Size computations: sizes in bytes.
516 size_t capacity() const { return byte_size(bottom(), end()); }
517 size_t used() const { return byte_size(bottom(), top()); }
518 size_t free() const { return byte_size(top(), end()); }
520 virtual bool is_free_block(const HeapWord* p) const;
522 // In a contiguous space we have a more obvious bound on what parts
523 // contain objects.
524 MemRegion used_region() const { return MemRegion(bottom(), top()); }
526 // Allocation (return NULL if full)
527 virtual HeapWord* allocate(size_t word_size);
528 virtual HeapWord* par_allocate(size_t word_size);
530 // Iteration
531 void oop_iterate(ExtendedOopClosure* cl);
532 void object_iterate(ObjectClosure* blk);
533 // For contiguous spaces this method will iterate safely over objects
534 // in the space (i.e., between bottom and top) when at a safepoint.
535 void safe_object_iterate(ObjectClosure* blk);
537 // Iterate over as many initialized objects in the space as possible,
538 // calling "cl.do_object_careful" on each. Return NULL if all objects
539 // in the space (at the start of the iteration) were iterated over.
540 // Return an address indicating the extent of the iteration in the
541 // event that the iteration had to return because of finding an
542 // uninitialized object in the space, or if the closure "cl"
543 // signaled early termination.
544 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
545 HeapWord* concurrent_iteration_safe_limit() {
546 assert(_concurrent_iteration_safe_limit <= top(),
547 "_concurrent_iteration_safe_limit update missed");
548 return _concurrent_iteration_safe_limit;
549 }
550 // changes the safe limit, all objects from bottom() to the new
551 // limit should be properly initialized
552 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
553 assert(new_limit <= top(), "uninitialized objects in the safe range");
554 _concurrent_iteration_safe_limit = new_limit;
555 }
558 #if INCLUDE_ALL_GCS
559 // In support of parallel oop_iterate.
560 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
561 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
563 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
564 #undef ContigSpace_PAR_OOP_ITERATE_DECL
565 #endif // INCLUDE_ALL_GCS
567 // Compaction support
568 virtual void reset_after_compaction() {
569 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
570 set_top(compaction_top());
571 // set new iteration safe limit
572 set_concurrent_iteration_safe_limit(compaction_top());
573 }
575 // Override.
576 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
577 CardTableModRefBS::PrecisionStyle precision,
578 HeapWord* boundary = NULL);
580 // Apply "blk->do_oop" to the addresses of all reference fields in objects
581 // starting with the _saved_mark_word, which was noted during a generation's
582 // save_marks and is required to denote the head of an object.
583 // Fields in objects allocated by applications of the closure
584 // *are* included in the iteration.
585 // Updates _saved_mark_word to point to just after the last object
586 // iterated over.
587 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
588 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
590 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
591 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
593 // Same as object_iterate, but starting from "mark", which is required
594 // to denote the start of an object. Objects allocated by
595 // applications of the closure *are* included in the iteration.
596 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
598 // Very inefficient implementation.
599 virtual HeapWord* block_start_const(const void* p) const;
600 size_t block_size(const HeapWord* p) const;
601 // If a block is in the allocated area, it is an object.
602 bool block_is_obj(const HeapWord* p) const { return p < top(); }
604 // Addresses for inlined allocation
605 HeapWord** top_addr() { return &_top; }
606 HeapWord** end_addr() { return &_end; }
608 // Overrides for more efficient compaction support.
609 void prepare_for_compaction(CompactPoint* cp);
611 // PrintHeapAtGC support.
612 virtual void print_on(outputStream* st) const;
614 // Checked dynamic downcasts.
615 virtual ContiguousSpace* toContiguousSpace() {
616 return this;
617 }
619 // Debugging
620 virtual void verify() const;
622 // Used to increase collection frequency. "factor" of 0 means entire
623 // space.
624 void allocate_temporary_filler(int factor);
626 };
629 // A dirty card to oop closure that does filtering.
630 // It knows how to filter out objects that are outside of the _boundary.
631 class Filtering_DCTOC : public DirtyCardToOopClosure {
632 protected:
633 // Override.
634 void walk_mem_region(MemRegion mr,
635 HeapWord* bottom, HeapWord* top);
637 // Walk the given memory region, from bottom to top, applying
638 // the given oop closure to (possibly) all objects found. The
639 // given oop closure may or may not be the same as the oop
640 // closure with which this closure was created, as it may
641 // be a filtering closure which makes use of the _boundary.
642 // We offer two signatures, so the FilteringClosure static type is
643 // apparent.
644 virtual void walk_mem_region_with_cl(MemRegion mr,
645 HeapWord* bottom, HeapWord* top,
646 ExtendedOopClosure* cl) = 0;
647 virtual void walk_mem_region_with_cl(MemRegion mr,
648 HeapWord* bottom, HeapWord* top,
649 FilteringClosure* cl) = 0;
651 public:
652 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
653 CardTableModRefBS::PrecisionStyle precision,
654 HeapWord* boundary) :
655 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
656 };
658 // A dirty card to oop closure for contiguous spaces
659 // (ContiguousSpace and sub-classes).
660 // It is a FilteringClosure, as defined above, and it knows:
661 //
662 // 1. That the actual top of any area in a memory region
663 // contained by the space is bounded by the end of the contiguous
664 // region of the space.
665 // 2. That the space is really made up of objects and not just
666 // blocks.
668 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
669 protected:
670 // Overrides.
671 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
673 virtual void walk_mem_region_with_cl(MemRegion mr,
674 HeapWord* bottom, HeapWord* top,
675 ExtendedOopClosure* cl);
676 virtual void walk_mem_region_with_cl(MemRegion mr,
677 HeapWord* bottom, HeapWord* top,
678 FilteringClosure* cl);
680 public:
681 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
682 CardTableModRefBS::PrecisionStyle precision,
683 HeapWord* boundary) :
684 Filtering_DCTOC(sp, cl, precision, boundary)
685 {}
686 };
689 // Class EdenSpace describes eden-space in new generation.
691 class DefNewGeneration;
693 class EdenSpace : public ContiguousSpace {
694 friend class VMStructs;
695 private:
696 DefNewGeneration* _gen;
698 // _soft_end is used as a soft limit on allocation. As soft limits are
699 // reached, the slow-path allocation code can invoke other actions and then
700 // adjust _soft_end up to a new soft limit or to end().
701 HeapWord* _soft_end;
703 public:
704 EdenSpace(DefNewGeneration* gen) :
705 _gen(gen), _soft_end(NULL) {}
707 // Get/set just the 'soft' limit.
708 HeapWord* soft_end() { return _soft_end; }
709 HeapWord** soft_end_addr() { return &_soft_end; }
710 void set_soft_end(HeapWord* value) { _soft_end = value; }
712 // Override.
713 void clear(bool mangle_space);
715 // Set both the 'hard' and 'soft' limits (_end and _soft_end).
716 void set_end(HeapWord* value) {
717 set_soft_end(value);
718 ContiguousSpace::set_end(value);
719 }
721 // Allocation (return NULL if full)
722 HeapWord* allocate(size_t word_size);
723 HeapWord* par_allocate(size_t word_size);
724 };
726 // Class ConcEdenSpace extends EdenSpace for the sake of safe
727 // allocation while soft-end is being modified concurrently
729 class ConcEdenSpace : public EdenSpace {
730 public:
731 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
733 // Allocation (return NULL if full)
734 HeapWord* par_allocate(size_t word_size);
735 };
738 // A ContigSpace that Supports an efficient "block_start" operation via
739 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
740 // other spaces.) This is the abstract base class for old generation
741 // (tenured) spaces.
743 class OffsetTableContigSpace: public ContiguousSpace {
744 friend class VMStructs;
745 protected:
746 BlockOffsetArrayContigSpace _offsets;
747 Mutex _par_alloc_lock;
749 public:
750 // Constructor
751 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
752 MemRegion mr);
754 void set_bottom(HeapWord* value);
755 void set_end(HeapWord* value);
757 void clear(bool mangle_space);
759 inline HeapWord* block_start_const(const void* p) const;
761 // Add offset table update.
762 virtual inline HeapWord* allocate(size_t word_size);
763 inline HeapWord* par_allocate(size_t word_size);
765 // MarkSweep support phase3
766 virtual HeapWord* initialize_threshold();
767 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
769 virtual void print_on(outputStream* st) const;
771 // Debugging
772 void verify() const;
773 };
776 // Class TenuredSpace is used by TenuredGeneration
778 class TenuredSpace: public OffsetTableContigSpace {
779 friend class VMStructs;
780 protected:
781 // Mark sweep support
782 size_t allowed_dead_ratio() const;
783 public:
784 // Constructor
785 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
786 MemRegion mr) :
787 OffsetTableContigSpace(sharedOffsetArray, mr) {}
788 };
789 #endif // SHARE_VM_MEMORY_SPACE_HPP