Wed, 14 Dec 2011 12:15:26 +0100
7121373: Clean up CollectedHeap::is_in
Summary: Fixed G1CollectedHeap::is_in, added tests, cleaned up comments and made Space::is_in pure virtual.
Reviewed-by: brutisso, tonyp, jcoomes
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
26 #define SHARE_VM_MEMORY_SPACE_HPP
28 #include "memory/allocation.hpp"
29 #include "memory/blockOffsetTable.hpp"
30 #include "memory/cardTableModRefBS.hpp"
31 #include "memory/iterator.hpp"
32 #include "memory/memRegion.hpp"
33 #include "memory/watermark.hpp"
34 #include "oops/markOop.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "runtime/prefetch.hpp"
37 #include "utilities/workgroup.hpp"
38 #ifdef TARGET_OS_FAMILY_linux
39 # include "os_linux.inline.hpp"
40 #endif
41 #ifdef TARGET_OS_FAMILY_solaris
42 # include "os_solaris.inline.hpp"
43 #endif
44 #ifdef TARGET_OS_FAMILY_windows
45 # include "os_windows.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_FAMILY_bsd
48 # include "os_bsd.inline.hpp"
49 #endif
51 // A space is an abstraction for the "storage units" backing
52 // up the generation abstraction. It includes specific
53 // implementations for keeping track of free and used space,
54 // for iterating over objects and free blocks, etc.
56 // Here's the Space hierarchy:
57 //
58 // - Space -- an asbtract base class describing a heap area
59 // - CompactibleSpace -- a space supporting compaction
60 // - CompactibleFreeListSpace -- (used for CMS generation)
61 // - ContiguousSpace -- a compactible space in which all free space
62 // is contiguous
63 // - EdenSpace -- contiguous space used as nursery
64 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
65 // - OffsetTableContigSpace -- contiguous space with a block offset array
66 // that allows "fast" block_start calls
67 // - TenuredSpace -- (used for TenuredGeneration)
68 // - ContigPermSpace -- an offset table contiguous space for perm gen
70 // Forward decls.
71 class Space;
72 class BlockOffsetArray;
73 class BlockOffsetArrayContigSpace;
74 class Generation;
75 class CompactibleSpace;
76 class BlockOffsetTable;
77 class GenRemSet;
78 class CardTableRS;
79 class DirtyCardToOopClosure;
81 // An oop closure that is circumscribed by a filtering memory region.
82 class SpaceMemRegionOopsIterClosure: public OopClosure {
83 private:
84 OopClosure* _cl;
85 MemRegion _mr;
86 protected:
87 template <class T> void do_oop_work(T* p) {
88 if (_mr.contains(p)) {
89 _cl->do_oop(p);
90 }
91 }
92 public:
93 SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr):
94 _cl(cl), _mr(mr) {}
95 virtual void do_oop(oop* p);
96 virtual void do_oop(narrowOop* p);
97 };
99 // A Space describes a heap area. Class Space is an abstract
100 // base class.
101 //
102 // Space supports allocation, size computation and GC support is provided.
103 //
104 // Invariant: bottom() and end() are on page_size boundaries and
105 // bottom() <= top() <= end()
106 // top() is inclusive and end() is exclusive.
108 class Space: public CHeapObj {
109 friend class VMStructs;
110 protected:
111 HeapWord* _bottom;
112 HeapWord* _end;
114 // Used in support of save_marks()
115 HeapWord* _saved_mark_word;
117 MemRegionClosure* _preconsumptionDirtyCardClosure;
119 // A sequential tasks done structure. This supports
120 // parallel GC, where we have threads dynamically
121 // claiming sub-tasks from a larger parallel task.
122 SequentialSubTasksDone _par_seq_tasks;
124 Space():
125 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
127 public:
128 // Accessors
129 HeapWord* bottom() const { return _bottom; }
130 HeapWord* end() const { return _end; }
131 virtual void set_bottom(HeapWord* value) { _bottom = value; }
132 virtual void set_end(HeapWord* value) { _end = value; }
134 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
136 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
138 MemRegionClosure* preconsumptionDirtyCardClosure() const {
139 return _preconsumptionDirtyCardClosure;
140 }
141 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
142 _preconsumptionDirtyCardClosure = cl;
143 }
145 // Returns a subregion of the space containing all the objects in
146 // the space.
147 virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
149 // Returns a region that is guaranteed to contain (at least) all objects
150 // allocated at the time of the last call to "save_marks". If the space
151 // initializes its DirtyCardToOopClosure's specifying the "contig" option
152 // (that is, if the space is contiguous), then this region must contain only
153 // such objects: the memregion will be from the bottom of the region to the
154 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
155 // the space must distiguish between objects in the region allocated before
156 // and after the call to save marks.
157 virtual MemRegion used_region_at_save_marks() const {
158 return MemRegion(bottom(), saved_mark_word());
159 }
161 // Initialization.
162 // "initialize" should be called once on a space, before it is used for
163 // any purpose. The "mr" arguments gives the bounds of the space, and
164 // the "clear_space" argument should be true unless the memory in "mr" is
165 // known to be zeroed.
166 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
168 // The "clear" method must be called on a region that may have
169 // had allocation performed in it, but is now to be considered empty.
170 virtual void clear(bool mangle_space);
172 // For detecting GC bugs. Should only be called at GC boundaries, since
173 // some unused space may be used as scratch space during GC's.
174 // Default implementation does nothing. We also call this when expanding
175 // a space to satisfy an allocation request. See bug #4668531
176 virtual void mangle_unused_area() {}
177 virtual void mangle_unused_area_complete() {}
178 virtual void mangle_region(MemRegion mr) {}
180 // Testers
181 bool is_empty() const { return used() == 0; }
182 bool not_empty() const { return used() > 0; }
184 // Returns true iff the given the space contains the
185 // given address as part of an allocated object. For
186 // ceratin kinds of spaces, this might be a potentially
187 // expensive operation. To prevent performance problems
188 // on account of its inadvertent use in product jvm's,
189 // we restrict its use to assertion checks only.
190 virtual bool is_in(const void* p) const = 0;
192 // Returns true iff the given reserved memory of the space contains the
193 // given address.
194 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
196 // Returns true iff the given block is not allocated.
197 virtual bool is_free_block(const HeapWord* p) const = 0;
199 // Test whether p is double-aligned
200 static bool is_aligned(void* p) {
201 return ((intptr_t)p & (sizeof(double)-1)) == 0;
202 }
204 // Size computations. Sizes are in bytes.
205 size_t capacity() const { return byte_size(bottom(), end()); }
206 virtual size_t used() const = 0;
207 virtual size_t free() const = 0;
209 // Iterate over all the ref-containing fields of all objects in the
210 // space, calling "cl.do_oop" on each. Fields in objects allocated by
211 // applications of the closure are not included in the iteration.
212 virtual void oop_iterate(OopClosure* cl);
214 // Same as above, restricted to the intersection of a memory region and
215 // the space. Fields in objects allocated by applications of the closure
216 // are not included in the iteration.
217 virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0;
219 // Iterate over all objects in the space, calling "cl.do_object" on
220 // each. Objects allocated by applications of the closure are not
221 // included in the iteration.
222 virtual void object_iterate(ObjectClosure* blk) = 0;
223 // Similar to object_iterate() except only iterates over
224 // objects whose internal references point to objects in the space.
225 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
227 // Iterate over all objects that intersect with mr, calling "cl->do_object"
228 // on each. There is an exception to this: if this closure has already
229 // been invoked on an object, it may skip such objects in some cases. This is
230 // Most likely to happen in an "upwards" (ascending address) iteration of
231 // MemRegions.
232 virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
234 // Iterate over as many initialized objects in the space as possible,
235 // calling "cl.do_object_careful" on each. Return NULL if all objects
236 // in the space (at the start of the iteration) were iterated over.
237 // Return an address indicating the extent of the iteration in the
238 // event that the iteration had to return because of finding an
239 // uninitialized object in the space, or if the closure "cl"
240 // signalled early termination.
241 virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
242 virtual HeapWord* object_iterate_careful_m(MemRegion mr,
243 ObjectClosureCareful* cl);
245 // Create and return a new dirty card to oop closure. Can be
246 // overriden to return the appropriate type of closure
247 // depending on the type of space in which the closure will
248 // operate. ResourceArea allocated.
249 virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
250 CardTableModRefBS::PrecisionStyle precision,
251 HeapWord* boundary = NULL);
253 // If "p" is in the space, returns the address of the start of the
254 // "block" that contains "p". We say "block" instead of "object" since
255 // some heaps may not pack objects densely; a chunk may either be an
256 // object or a non-object. If "p" is not in the space, return NULL.
257 virtual HeapWord* block_start_const(const void* p) const = 0;
259 // The non-const version may have benevolent side effects on the data
260 // structure supporting these calls, possibly speeding up future calls.
261 // The default implementation, however, is simply to call the const
262 // version.
263 inline virtual HeapWord* block_start(const void* p);
265 // Requires "addr" to be the start of a chunk, and returns its size.
266 // "addr + size" is required to be the start of a new chunk, or the end
267 // of the active area of the heap.
268 virtual size_t block_size(const HeapWord* addr) const = 0;
270 // Requires "addr" to be the start of a block, and returns "TRUE" iff
271 // the block is an object.
272 virtual bool block_is_obj(const HeapWord* addr) const = 0;
274 // Requires "addr" to be the start of a block, and returns "TRUE" iff
275 // the block is an object and the object is alive.
276 virtual bool obj_is_alive(const HeapWord* addr) const;
278 // Allocation (return NULL if full). Assumes the caller has established
279 // mutually exclusive access to the space.
280 virtual HeapWord* allocate(size_t word_size) = 0;
282 // Allocation (return NULL if full). Enforces mutual exclusion internally.
283 virtual HeapWord* par_allocate(size_t word_size) = 0;
285 // Returns true if this object has been allocated since a
286 // generation's "save_marks" call.
287 virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
289 // Mark-sweep-compact support: all spaces can update pointers to objects
290 // moving as a part of compaction.
291 virtual void adjust_pointers();
293 // PrintHeapAtGC support
294 virtual void print() const;
295 virtual void print_on(outputStream* st) const;
296 virtual void print_short() const;
297 virtual void print_short_on(outputStream* st) const;
300 // Accessor for parallel sequential tasks.
301 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
303 // IF "this" is a ContiguousSpace, return it, else return NULL.
304 virtual ContiguousSpace* toContiguousSpace() {
305 return NULL;
306 }
308 // Debugging
309 virtual void verify(bool allow_dirty) const = 0;
310 };
312 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
313 // OopClosure to (the addresses of) all the ref-containing fields that could
314 // be modified by virtue of the given MemRegion being dirty. (Note that
315 // because of the imprecise nature of the write barrier, this may iterate
316 // over oops beyond the region.)
317 // This base type for dirty card to oop closures handles memory regions
318 // in non-contiguous spaces with no boundaries, and should be sub-classed
319 // to support other space types. See ContiguousDCTOC for a sub-class
320 // that works with ContiguousSpaces.
322 class DirtyCardToOopClosure: public MemRegionClosureRO {
323 protected:
324 OopClosure* _cl;
325 Space* _sp;
326 CardTableModRefBS::PrecisionStyle _precision;
327 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
328 // pointing below boundary.
329 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
330 // a downwards traversal; this is the
331 // lowest location already done (or,
332 // alternatively, the lowest address that
333 // shouldn't be done again. NULL means infinity.)
334 NOT_PRODUCT(HeapWord* _last_bottom;)
335 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
337 // Get the actual top of the area on which the closure will
338 // operate, given where the top is assumed to be (the end of the
339 // memory region passed to do_MemRegion) and where the object
340 // at the top is assumed to start. For example, an object may
341 // start at the top but actually extend past the assumed top,
342 // in which case the top becomes the end of the object.
343 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
345 // Walk the given memory region from bottom to (actual) top
346 // looking for objects and applying the oop closure (_cl) to
347 // them. The base implementation of this treats the area as
348 // blocks, where a block may or may not be an object. Sub-
349 // classes should override this to provide more accurate
350 // or possibly more efficient walking.
351 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
353 public:
354 DirtyCardToOopClosure(Space* sp, OopClosure* cl,
355 CardTableModRefBS::PrecisionStyle precision,
356 HeapWord* boundary) :
357 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
358 _min_done(NULL) {
359 NOT_PRODUCT(_last_bottom = NULL);
360 NOT_PRODUCT(_last_explicit_min_done = NULL);
361 }
363 void do_MemRegion(MemRegion mr);
365 void set_min_done(HeapWord* min_done) {
366 _min_done = min_done;
367 NOT_PRODUCT(_last_explicit_min_done = _min_done);
368 }
369 #ifndef PRODUCT
370 void set_last_bottom(HeapWord* last_bottom) {
371 _last_bottom = last_bottom;
372 }
373 #endif
374 };
376 // A structure to represent a point at which objects are being copied
377 // during compaction.
378 class CompactPoint : public StackObj {
379 public:
380 Generation* gen;
381 CompactibleSpace* space;
382 HeapWord* threshold;
383 CompactPoint(Generation* _gen, CompactibleSpace* _space,
384 HeapWord* _threshold) :
385 gen(_gen), space(_space), threshold(_threshold) {}
386 };
389 // A space that supports compaction operations. This is usually, but not
390 // necessarily, a space that is normally contiguous. But, for example, a
391 // free-list-based space whose normal collection is a mark-sweep without
392 // compaction could still support compaction in full GC's.
394 class CompactibleSpace: public Space {
395 friend class VMStructs;
396 friend class CompactibleFreeListSpace;
397 friend class CompactingPermGenGen;
398 friend class CMSPermGenGen;
399 private:
400 HeapWord* _compaction_top;
401 CompactibleSpace* _next_compaction_space;
403 public:
404 CompactibleSpace() :
405 _compaction_top(NULL), _next_compaction_space(NULL) {}
407 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
408 virtual void clear(bool mangle_space);
410 // Used temporarily during a compaction phase to hold the value
411 // top should have when compaction is complete.
412 HeapWord* compaction_top() const { return _compaction_top; }
414 void set_compaction_top(HeapWord* value) {
415 assert(value == NULL || (value >= bottom() && value <= end()),
416 "should point inside space");
417 _compaction_top = value;
418 }
420 // Perform operations on the space needed after a compaction
421 // has been performed.
422 virtual void reset_after_compaction() {}
424 // Returns the next space (in the current generation) to be compacted in
425 // the global compaction order. Also is used to select the next
426 // space into which to compact.
428 virtual CompactibleSpace* next_compaction_space() const {
429 return _next_compaction_space;
430 }
432 void set_next_compaction_space(CompactibleSpace* csp) {
433 _next_compaction_space = csp;
434 }
436 // MarkSweep support phase2
438 // Start the process of compaction of the current space: compute
439 // post-compaction addresses, and insert forwarding pointers. The fields
440 // "cp->gen" and "cp->compaction_space" are the generation and space into
441 // which we are currently compacting. This call updates "cp" as necessary,
442 // and leaves the "compaction_top" of the final value of
443 // "cp->compaction_space" up-to-date. Offset tables may be updated in
444 // this phase as if the final copy had occurred; if so, "cp->threshold"
445 // indicates when the next such action should be taken.
446 virtual void prepare_for_compaction(CompactPoint* cp);
447 // MarkSweep support phase3
448 virtual void adjust_pointers();
449 // MarkSweep support phase4
450 virtual void compact();
452 // The maximum percentage of objects that can be dead in the compacted
453 // live part of a compacted space ("deadwood" support.)
454 virtual size_t allowed_dead_ratio() const { return 0; };
456 // Some contiguous spaces may maintain some data structures that should
457 // be updated whenever an allocation crosses a boundary. This function
458 // returns the first such boundary.
459 // (The default implementation returns the end of the space, so the
460 // boundary is never crossed.)
461 virtual HeapWord* initialize_threshold() { return end(); }
463 // "q" is an object of the given "size" that should be forwarded;
464 // "cp" names the generation ("gen") and containing "this" (which must
465 // also equal "cp->space"). "compact_top" is where in "this" the
466 // next object should be forwarded to. If there is room in "this" for
467 // the object, insert an appropriate forwarding pointer in "q".
468 // If not, go to the next compaction space (there must
469 // be one, since compaction must succeed -- we go to the first space of
470 // the previous generation if necessary, updating "cp"), reset compact_top
471 // and then forward. In either case, returns the new value of "compact_top".
472 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
473 // function of the then-current compaction space, and updates "cp->threshold
474 // accordingly".
475 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
476 HeapWord* compact_top);
478 // Return a size with adjusments as required of the space.
479 virtual size_t adjust_object_size_v(size_t size) const { return size; }
481 protected:
482 // Used during compaction.
483 HeapWord* _first_dead;
484 HeapWord* _end_of_live;
486 // Minimum size of a free block.
487 virtual size_t minimum_free_block_size() const = 0;
489 // This the function is invoked when an allocation of an object covering
490 // "start" to "end occurs crosses the threshold; returns the next
491 // threshold. (The default implementation does nothing.)
492 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
493 return end();
494 }
496 // Requires "allowed_deadspace_words > 0", that "q" is the start of a
497 // free block of the given "word_len", and that "q", were it an object,
498 // would not move if forwared. If the size allows, fill the free
499 // block with an object, to prevent excessive compaction. Returns "true"
500 // iff the free region was made deadspace, and modifies
501 // "allowed_deadspace_words" to reflect the number of available deadspace
502 // words remaining after this operation.
503 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
504 size_t word_len);
505 };
507 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
508 /* Compute the new addresses for the live objects and store it in the mark \
509 * Used by universe::mark_sweep_phase2() \
510 */ \
511 HeapWord* compact_top; /* This is where we are currently compacting to. */ \
512 \
513 /* We're sure to be here before any objects are compacted into this \
514 * space, so this is a good time to initialize this: \
515 */ \
516 set_compaction_top(bottom()); \
517 \
518 if (cp->space == NULL) { \
519 assert(cp->gen != NULL, "need a generation"); \
520 assert(cp->threshold == NULL, "just checking"); \
521 assert(cp->gen->first_compaction_space() == this, "just checking"); \
522 cp->space = cp->gen->first_compaction_space(); \
523 compact_top = cp->space->bottom(); \
524 cp->space->set_compaction_top(compact_top); \
525 cp->threshold = cp->space->initialize_threshold(); \
526 } else { \
527 compact_top = cp->space->compaction_top(); \
528 } \
529 \
530 /* We allow some amount of garbage towards the bottom of the space, so \
531 * we don't start compacting before there is a significant gain to be made.\
532 * Occasionally, we want to ensure a full compaction, which is determined \
533 * by the MarkSweepAlwaysCompactCount parameter. \
534 */ \
535 int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\
536 bool skip_dead = (MarkSweepAlwaysCompactCount < 1) \
537 ||((invocations % MarkSweepAlwaysCompactCount) != 0); \
538 \
539 size_t allowed_deadspace = 0; \
540 if (skip_dead) { \
541 const size_t ratio = allowed_dead_ratio(); \
542 allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \
543 } \
544 \
545 HeapWord* q = bottom(); \
546 HeapWord* t = scan_limit(); \
547 \
548 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \
549 live object. */ \
550 HeapWord* first_dead = end();/* The first dead object. */ \
551 LiveRange* liveRange = NULL; /* The current live range, recorded in the \
552 first header of preceding free area. */ \
553 _first_dead = first_dead; \
554 \
555 const intx interval = PrefetchScanIntervalInBytes; \
556 \
557 while (q < t) { \
558 assert(!block_is_obj(q) || \
559 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \
560 oop(q)->mark()->has_bias_pattern(), \
561 "these are the only valid states during a mark sweep"); \
562 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
563 /* prefetch beyond q */ \
564 Prefetch::write(q, interval); \
565 /* size_t size = oop(q)->size(); changing this for cms for perm gen */\
566 size_t size = block_size(q); \
567 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
568 q += size; \
569 end_of_live = q; \
570 } else { \
571 /* run over all the contiguous dead objects */ \
572 HeapWord* end = q; \
573 do { \
574 /* prefetch beyond end */ \
575 Prefetch::write(end, interval); \
576 end += block_size(end); \
577 } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
578 \
579 /* see if we might want to pretend this object is alive so that \
580 * we don't have to compact quite as often. \
581 */ \
582 if (allowed_deadspace > 0 && q == compact_top) { \
583 size_t sz = pointer_delta(end, q); \
584 if (insert_deadspace(allowed_deadspace, q, sz)) { \
585 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \
586 q = end; \
587 end_of_live = end; \
588 continue; \
589 } \
590 } \
591 \
592 /* otherwise, it really is a free region. */ \
593 \
594 /* for the previous LiveRange, record the end of the live objects. */ \
595 if (liveRange) { \
596 liveRange->set_end(q); \
597 } \
598 \
599 /* record the current LiveRange object. \
600 * liveRange->start() is overlaid on the mark word. \
601 */ \
602 liveRange = (LiveRange*)q; \
603 liveRange->set_start(end); \
604 liveRange->set_end(end); \
605 \
606 /* see if this is the first dead region. */ \
607 if (q < first_dead) { \
608 first_dead = q; \
609 } \
610 \
611 /* move on to the next object */ \
612 q = end; \
613 } \
614 } \
615 \
616 assert(q == t, "just checking"); \
617 if (liveRange != NULL) { \
618 liveRange->set_end(q); \
619 } \
620 _end_of_live = end_of_live; \
621 if (end_of_live < first_dead) { \
622 first_dead = end_of_live; \
623 } \
624 _first_dead = first_dead; \
625 \
626 /* save the compaction_top of the compaction space. */ \
627 cp->space->set_compaction_top(compact_top); \
628 }
630 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
631 /* adjust all the interior pointers to point at the new locations of objects \
632 * Used by MarkSweep::mark_sweep_phase3() */ \
633 \
634 HeapWord* q = bottom(); \
635 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
636 \
637 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
638 \
639 if (q < t && _first_dead > q && \
640 !oop(q)->is_gc_marked()) { \
641 /* we have a chunk of the space which hasn't moved and we've \
642 * reinitialized the mark word during the previous pass, so we can't \
643 * use is_gc_marked for the traversal. */ \
644 HeapWord* end = _first_dead; \
645 \
646 while (q < end) { \
647 /* I originally tried to conjoin "block_start(q) == q" to the \
648 * assertion below, but that doesn't work, because you can't \
649 * accurately traverse previous objects to get to the current one \
650 * after their pointers (including pointers into permGen) have been \
651 * updated, until the actual compaction is done. dld, 4/00 */ \
652 assert(block_is_obj(q), \
653 "should be at block boundaries, and should be looking at objs"); \
654 \
655 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
656 \
657 /* point all the oops to the new location */ \
658 size_t size = oop(q)->adjust_pointers(); \
659 size = adjust_obj_size(size); \
660 \
661 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
662 \
663 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
664 \
665 q += size; \
666 } \
667 \
668 if (_first_dead == t) { \
669 q = t; \
670 } else { \
671 /* $$$ This is funky. Using this to read the previously written \
672 * LiveRange. See also use below. */ \
673 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
674 } \
675 } \
676 \
677 const intx interval = PrefetchScanIntervalInBytes; \
678 \
679 debug_only(HeapWord* prev_q = NULL); \
680 while (q < t) { \
681 /* prefetch beyond q */ \
682 Prefetch::write(q, interval); \
683 if (oop(q)->is_gc_marked()) { \
684 /* q is alive */ \
685 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
686 /* point all the oops to the new location */ \
687 size_t size = oop(q)->adjust_pointers(); \
688 size = adjust_obj_size(size); \
689 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
690 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
691 debug_only(prev_q = q); \
692 q += size; \
693 } else { \
694 /* q is not a live object, so its mark should point at the next \
695 * live object */ \
696 debug_only(prev_q = q); \
697 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
698 assert(q > prev_q, "we should be moving forward through memory"); \
699 } \
700 } \
701 \
702 assert(q == t, "just checking"); \
703 }
705 #define SCAN_AND_COMPACT(obj_size) { \
706 /* Copy all live objects to their new location \
707 * Used by MarkSweep::mark_sweep_phase4() */ \
708 \
709 HeapWord* q = bottom(); \
710 HeapWord* const t = _end_of_live; \
711 debug_only(HeapWord* prev_q = NULL); \
712 \
713 if (q < t && _first_dead > q && \
714 !oop(q)->is_gc_marked()) { \
715 debug_only( \
716 /* we have a chunk of the space which hasn't moved and we've reinitialized \
717 * the mark word during the previous pass, so we can't use is_gc_marked for \
718 * the traversal. */ \
719 HeapWord* const end = _first_dead; \
720 \
721 while (q < end) { \
722 size_t size = obj_size(q); \
723 assert(!oop(q)->is_gc_marked(), \
724 "should be unmarked (special dense prefix handling)"); \
725 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \
726 debug_only(prev_q = q); \
727 q += size; \
728 } \
729 ) /* debug_only */ \
730 \
731 if (_first_dead == t) { \
732 q = t; \
733 } else { \
734 /* $$$ Funky */ \
735 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
736 } \
737 } \
738 \
739 const intx scan_interval = PrefetchScanIntervalInBytes; \
740 const intx copy_interval = PrefetchCopyIntervalInBytes; \
741 while (q < t) { \
742 if (!oop(q)->is_gc_marked()) { \
743 /* mark is pointer to next marked oop */ \
744 debug_only(prev_q = q); \
745 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
746 assert(q > prev_q, "we should be moving forward through memory"); \
747 } else { \
748 /* prefetch beyond q */ \
749 Prefetch::read(q, scan_interval); \
750 \
751 /* size and destination */ \
752 size_t size = obj_size(q); \
753 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
754 \
755 /* prefetch beyond compaction_top */ \
756 Prefetch::write(compaction_top, copy_interval); \
757 \
758 /* copy object and reinit its mark */ \
759 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \
760 compaction_top)); \
761 assert(q != compaction_top, "everything in this pass should be moving"); \
762 Copy::aligned_conjoint_words(q, compaction_top, size); \
763 oop(compaction_top)->init_mark(); \
764 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
765 \
766 debug_only(prev_q = q); \
767 q += size; \
768 } \
769 } \
770 \
771 /* Let's remember if we were empty before we did the compaction. */ \
772 bool was_empty = used_region().is_empty(); \
773 /* Reset space after compaction is complete */ \
774 reset_after_compaction(); \
775 /* We do this clear, below, since it has overloaded meanings for some */ \
776 /* space subtypes. For example, OffsetTableContigSpace's that were */ \
777 /* compacted into will have had their offset table thresholds updated */ \
778 /* continuously, but those that weren't need to have their thresholds */ \
779 /* re-initialized. Also mangles unused area for debugging. */ \
780 if (used_region().is_empty()) { \
781 if (!was_empty) clear(SpaceDecorator::Mangle); \
782 } else { \
783 if (ZapUnusedHeapArea) mangle_unused_area(); \
784 } \
785 }
787 class GenSpaceMangler;
789 // A space in which the free area is contiguous. It therefore supports
790 // faster allocation, and compaction.
791 class ContiguousSpace: public CompactibleSpace {
792 friend class OneContigSpaceCardGeneration;
793 friend class VMStructs;
794 protected:
795 HeapWord* _top;
796 HeapWord* _concurrent_iteration_safe_limit;
797 // A helper for mangling the unused area of the space in debug builds.
798 GenSpaceMangler* _mangler;
800 GenSpaceMangler* mangler() { return _mangler; }
802 // Allocation helpers (return NULL if full).
803 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
804 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
806 public:
807 ContiguousSpace();
808 ~ContiguousSpace();
810 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
811 virtual void clear(bool mangle_space);
813 // Accessors
814 HeapWord* top() const { return _top; }
815 void set_top(HeapWord* value) { _top = value; }
817 virtual void set_saved_mark() { _saved_mark_word = top(); }
818 void reset_saved_mark() { _saved_mark_word = bottom(); }
820 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
821 WaterMark top_mark() { return WaterMark(this, top()); }
822 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
823 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
825 // In debug mode mangle (write it with a particular bit
826 // pattern) the unused part of a space.
828 // Used to save the an address in a space for later use during mangling.
829 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
830 // Used to save the space's current top for later use during mangling.
831 void set_top_for_allocations() PRODUCT_RETURN;
833 // Mangle regions in the space from the current top up to the
834 // previously mangled part of the space.
835 void mangle_unused_area() PRODUCT_RETURN;
836 // Mangle [top, end)
837 void mangle_unused_area_complete() PRODUCT_RETURN;
838 // Mangle the given MemRegion.
839 void mangle_region(MemRegion mr) PRODUCT_RETURN;
841 // Do some sparse checking on the area that should have been mangled.
842 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
843 // Check the complete area that should have been mangled.
844 // This code may be NULL depending on the macro DEBUG_MANGLING.
845 void check_mangled_unused_area_complete() PRODUCT_RETURN;
847 // Size computations: sizes in bytes.
848 size_t capacity() const { return byte_size(bottom(), end()); }
849 size_t used() const { return byte_size(bottom(), top()); }
850 size_t free() const { return byte_size(top(), end()); }
852 // Override from space.
853 bool is_in(const void* p) const;
855 virtual bool is_free_block(const HeapWord* p) const;
857 // In a contiguous space we have a more obvious bound on what parts
858 // contain objects.
859 MemRegion used_region() const { return MemRegion(bottom(), top()); }
861 MemRegion used_region_at_save_marks() const {
862 return MemRegion(bottom(), saved_mark_word());
863 }
865 // Allocation (return NULL if full)
866 virtual HeapWord* allocate(size_t word_size);
867 virtual HeapWord* par_allocate(size_t word_size);
869 virtual bool obj_allocated_since_save_marks(const oop obj) const {
870 return (HeapWord*)obj >= saved_mark_word();
871 }
873 // Iteration
874 void oop_iterate(OopClosure* cl);
875 void oop_iterate(MemRegion mr, OopClosure* cl);
876 void object_iterate(ObjectClosure* blk);
877 // For contiguous spaces this method will iterate safely over objects
878 // in the space (i.e., between bottom and top) when at a safepoint.
879 void safe_object_iterate(ObjectClosure* blk);
880 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
881 // iterates on objects up to the safe limit
882 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
883 inline HeapWord* concurrent_iteration_safe_limit();
884 // changes the safe limit, all objects from bottom() to the new
885 // limit should be properly initialized
886 inline void set_concurrent_iteration_safe_limit(HeapWord* new_limit);
888 #ifndef SERIALGC
889 // In support of parallel oop_iterate.
890 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
891 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
893 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
894 #undef ContigSpace_PAR_OOP_ITERATE_DECL
895 #endif // SERIALGC
897 // Compaction support
898 virtual void reset_after_compaction() {
899 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
900 set_top(compaction_top());
901 // set new iteration safe limit
902 set_concurrent_iteration_safe_limit(compaction_top());
903 }
904 virtual size_t minimum_free_block_size() const { return 0; }
906 // Override.
907 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
908 CardTableModRefBS::PrecisionStyle precision,
909 HeapWord* boundary = NULL);
911 // Apply "blk->do_oop" to the addresses of all reference fields in objects
912 // starting with the _saved_mark_word, which was noted during a generation's
913 // save_marks and is required to denote the head of an object.
914 // Fields in objects allocated by applications of the closure
915 // *are* included in the iteration.
916 // Updates _saved_mark_word to point to just after the last object
917 // iterated over.
918 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
919 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
921 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
922 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
924 // Same as object_iterate, but starting from "mark", which is required
925 // to denote the start of an object. Objects allocated by
926 // applications of the closure *are* included in the iteration.
927 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
929 // Very inefficient implementation.
930 virtual HeapWord* block_start_const(const void* p) const;
931 size_t block_size(const HeapWord* p) const;
932 // If a block is in the allocated area, it is an object.
933 bool block_is_obj(const HeapWord* p) const { return p < top(); }
935 // Addresses for inlined allocation
936 HeapWord** top_addr() { return &_top; }
937 HeapWord** end_addr() { return &_end; }
939 // Overrides for more efficient compaction support.
940 void prepare_for_compaction(CompactPoint* cp);
942 // PrintHeapAtGC support.
943 virtual void print_on(outputStream* st) const;
945 // Checked dynamic downcasts.
946 virtual ContiguousSpace* toContiguousSpace() {
947 return this;
948 }
950 // Debugging
951 virtual void verify(bool allow_dirty) const;
953 // Used to increase collection frequency. "factor" of 0 means entire
954 // space.
955 void allocate_temporary_filler(int factor);
957 };
960 // A dirty card to oop closure that does filtering.
961 // It knows how to filter out objects that are outside of the _boundary.
962 class Filtering_DCTOC : public DirtyCardToOopClosure {
963 protected:
964 // Override.
965 void walk_mem_region(MemRegion mr,
966 HeapWord* bottom, HeapWord* top);
968 // Walk the given memory region, from bottom to top, applying
969 // the given oop closure to (possibly) all objects found. The
970 // given oop closure may or may not be the same as the oop
971 // closure with which this closure was created, as it may
972 // be a filtering closure which makes use of the _boundary.
973 // We offer two signatures, so the FilteringClosure static type is
974 // apparent.
975 virtual void walk_mem_region_with_cl(MemRegion mr,
976 HeapWord* bottom, HeapWord* top,
977 OopClosure* cl) = 0;
978 virtual void walk_mem_region_with_cl(MemRegion mr,
979 HeapWord* bottom, HeapWord* top,
980 FilteringClosure* cl) = 0;
982 public:
983 Filtering_DCTOC(Space* sp, OopClosure* cl,
984 CardTableModRefBS::PrecisionStyle precision,
985 HeapWord* boundary) :
986 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
987 };
989 // A dirty card to oop closure for contiguous spaces
990 // (ContiguousSpace and sub-classes).
991 // It is a FilteringClosure, as defined above, and it knows:
992 //
993 // 1. That the actual top of any area in a memory region
994 // contained by the space is bounded by the end of the contiguous
995 // region of the space.
996 // 2. That the space is really made up of objects and not just
997 // blocks.
999 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
1000 protected:
1001 // Overrides.
1002 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
1004 virtual void walk_mem_region_with_cl(MemRegion mr,
1005 HeapWord* bottom, HeapWord* top,
1006 OopClosure* cl);
1007 virtual void walk_mem_region_with_cl(MemRegion mr,
1008 HeapWord* bottom, HeapWord* top,
1009 FilteringClosure* cl);
1011 public:
1012 ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl,
1013 CardTableModRefBS::PrecisionStyle precision,
1014 HeapWord* boundary) :
1015 Filtering_DCTOC(sp, cl, precision, boundary)
1016 {}
1017 };
1020 // Class EdenSpace describes eden-space in new generation.
1022 class DefNewGeneration;
1024 class EdenSpace : public ContiguousSpace {
1025 friend class VMStructs;
1026 private:
1027 DefNewGeneration* _gen;
1029 // _soft_end is used as a soft limit on allocation. As soft limits are
1030 // reached, the slow-path allocation code can invoke other actions and then
1031 // adjust _soft_end up to a new soft limit or to end().
1032 HeapWord* _soft_end;
1034 public:
1035 EdenSpace(DefNewGeneration* gen) :
1036 _gen(gen), _soft_end(NULL) {}
1038 // Get/set just the 'soft' limit.
1039 HeapWord* soft_end() { return _soft_end; }
1040 HeapWord** soft_end_addr() { return &_soft_end; }
1041 void set_soft_end(HeapWord* value) { _soft_end = value; }
1043 // Override.
1044 void clear(bool mangle_space);
1046 // Set both the 'hard' and 'soft' limits (_end and _soft_end).
1047 void set_end(HeapWord* value) {
1048 set_soft_end(value);
1049 ContiguousSpace::set_end(value);
1050 }
1052 // Allocation (return NULL if full)
1053 HeapWord* allocate(size_t word_size);
1054 HeapWord* par_allocate(size_t word_size);
1055 };
1057 // Class ConcEdenSpace extends EdenSpace for the sake of safe
1058 // allocation while soft-end is being modified concurrently
1060 class ConcEdenSpace : public EdenSpace {
1061 public:
1062 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
1064 // Allocation (return NULL if full)
1065 HeapWord* par_allocate(size_t word_size);
1066 };
1069 // A ContigSpace that Supports an efficient "block_start" operation via
1070 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
1071 // other spaces.) This is the abstract base class for old generation
1072 // (tenured, perm) spaces.
1074 class OffsetTableContigSpace: public ContiguousSpace {
1075 friend class VMStructs;
1076 protected:
1077 BlockOffsetArrayContigSpace _offsets;
1078 Mutex _par_alloc_lock;
1080 public:
1081 // Constructor
1082 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
1083 MemRegion mr);
1085 void set_bottom(HeapWord* value);
1086 void set_end(HeapWord* value);
1088 void clear(bool mangle_space);
1090 inline HeapWord* block_start_const(const void* p) const;
1092 // Add offset table update.
1093 virtual inline HeapWord* allocate(size_t word_size);
1094 inline HeapWord* par_allocate(size_t word_size);
1096 // MarkSweep support phase3
1097 virtual HeapWord* initialize_threshold();
1098 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
1100 virtual void print_on(outputStream* st) const;
1102 // Debugging
1103 void verify(bool allow_dirty) const;
1105 // Shared space support
1106 void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
1107 };
1110 // Class TenuredSpace is used by TenuredGeneration
1112 class TenuredSpace: public OffsetTableContigSpace {
1113 friend class VMStructs;
1114 protected:
1115 // Mark sweep support
1116 size_t allowed_dead_ratio() const;
1117 public:
1118 // Constructor
1119 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
1120 MemRegion mr) :
1121 OffsetTableContigSpace(sharedOffsetArray, mr) {}
1122 };
1125 // Class ContigPermSpace is used by CompactingPermGen
1127 class ContigPermSpace: public OffsetTableContigSpace {
1128 friend class VMStructs;
1129 protected:
1130 // Mark sweep support
1131 size_t allowed_dead_ratio() const;
1132 public:
1133 // Constructor
1134 ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) :
1135 OffsetTableContigSpace(sharedOffsetArray, mr) {}
1136 };
1138 #endif // SHARE_VM_MEMORY_SPACE_HPP