Mon, 09 Mar 2009 13:28:46 -0700
6814575: Update copyright year
Summary: Update copyright for files that have been modified in 2009, up to 03/09
Reviewed-by: katleman, tbell, ohair
1 /*
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // A space is an abstraction for the "storage units" backing
26 // up the generation abstraction. It includes specific
27 // implementations for keeping track of free and used space,
28 // for iterating over objects and free blocks, etc.
30 // Here's the Space hierarchy:
31 //
32 // - Space -- an asbtract base class describing a heap area
33 // - CompactibleSpace -- a space supporting compaction
34 // - CompactibleFreeListSpace -- (used for CMS generation)
35 // - ContiguousSpace -- a compactible space in which all free space
36 // is contiguous
37 // - EdenSpace -- contiguous space used as nursery
38 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
39 // - OffsetTableContigSpace -- contiguous space with a block offset array
40 // that allows "fast" block_start calls
41 // - TenuredSpace -- (used for TenuredGeneration)
42 // - ContigPermSpace -- an offset table contiguous space for perm gen
44 // Forward decls.
45 class Space;
46 class BlockOffsetArray;
47 class BlockOffsetArrayContigSpace;
48 class Generation;
49 class CompactibleSpace;
50 class BlockOffsetTable;
51 class GenRemSet;
52 class CardTableRS;
53 class DirtyCardToOopClosure;
55 // An oop closure that is circumscribed by a filtering memory region.
56 class SpaceMemRegionOopsIterClosure: public OopClosure {
57 private:
58 OopClosure* _cl;
59 MemRegion _mr;
60 protected:
61 template <class T> void do_oop_work(T* p) {
62 if (_mr.contains(p)) {
63 _cl->do_oop(p);
64 }
65 }
66 public:
67 SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr):
68 _cl(cl), _mr(mr) {}
69 virtual void do_oop(oop* p);
70 virtual void do_oop(narrowOop* p);
71 };
73 // A Space describes a heap area. Class Space is an abstract
74 // base class.
75 //
76 // Space supports allocation, size computation and GC support is provided.
77 //
78 // Invariant: bottom() and end() are on page_size boundaries and
79 // bottom() <= top() <= end()
80 // top() is inclusive and end() is exclusive.
82 class Space: public CHeapObj {
83 friend class VMStructs;
84 protected:
85 HeapWord* _bottom;
86 HeapWord* _end;
88 // Used in support of save_marks()
89 HeapWord* _saved_mark_word;
91 MemRegionClosure* _preconsumptionDirtyCardClosure;
93 // A sequential tasks done structure. This supports
94 // parallel GC, where we have threads dynamically
95 // claiming sub-tasks from a larger parallel task.
96 SequentialSubTasksDone _par_seq_tasks;
98 Space():
99 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
101 public:
102 // Accessors
103 HeapWord* bottom() const { return _bottom; }
104 HeapWord* end() const { return _end; }
105 virtual void set_bottom(HeapWord* value) { _bottom = value; }
106 virtual void set_end(HeapWord* value) { _end = value; }
108 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
109 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
111 MemRegionClosure* preconsumptionDirtyCardClosure() const {
112 return _preconsumptionDirtyCardClosure;
113 }
114 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
115 _preconsumptionDirtyCardClosure = cl;
116 }
118 // Returns a subregion of the space containing all the objects in
119 // the space.
120 virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
122 // Returns a region that is guaranteed to contain (at least) all objects
123 // allocated at the time of the last call to "save_marks". If the space
124 // initializes its DirtyCardToOopClosure's specifying the "contig" option
125 // (that is, if the space is contiguous), then this region must contain only
126 // such objects: the memregion will be from the bottom of the region to the
127 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
128 // the space must distiguish between objects in the region allocated before
129 // and after the call to save marks.
130 virtual MemRegion used_region_at_save_marks() const {
131 return MemRegion(bottom(), saved_mark_word());
132 }
134 // Initialization.
135 // "initialize" should be called once on a space, before it is used for
136 // any purpose. The "mr" arguments gives the bounds of the space, and
137 // the "clear_space" argument should be true unless the memory in "mr" is
138 // known to be zeroed.
139 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
141 // The "clear" method must be called on a region that may have
142 // had allocation performed in it, but is now to be considered empty.
143 virtual void clear(bool mangle_space);
145 // For detecting GC bugs. Should only be called at GC boundaries, since
146 // some unused space may be used as scratch space during GC's.
147 // Default implementation does nothing. We also call this when expanding
148 // a space to satisfy an allocation request. See bug #4668531
149 virtual void mangle_unused_area() {}
150 virtual void mangle_unused_area_complete() {}
151 virtual void mangle_region(MemRegion mr) {}
153 // Testers
154 bool is_empty() const { return used() == 0; }
155 bool not_empty() const { return used() > 0; }
157 // Returns true iff the given the space contains the
158 // given address as part of an allocated object. For
159 // ceratin kinds of spaces, this might be a potentially
160 // expensive operation. To prevent performance problems
161 // on account of its inadvertent use in product jvm's,
162 // we restrict its use to assertion checks only.
163 virtual bool is_in(const void* p) const;
165 // Returns true iff the given reserved memory of the space contains the
166 // given address.
167 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
169 // Returns true iff the given block is not allocated.
170 virtual bool is_free_block(const HeapWord* p) const = 0;
172 // Test whether p is double-aligned
173 static bool is_aligned(void* p) {
174 return ((intptr_t)p & (sizeof(double)-1)) == 0;
175 }
177 // Size computations. Sizes are in bytes.
178 size_t capacity() const { return byte_size(bottom(), end()); }
179 virtual size_t used() const = 0;
180 virtual size_t free() const = 0;
182 // Iterate over all the ref-containing fields of all objects in the
183 // space, calling "cl.do_oop" on each. Fields in objects allocated by
184 // applications of the closure are not included in the iteration.
185 virtual void oop_iterate(OopClosure* cl);
187 // Same as above, restricted to the intersection of a memory region and
188 // the space. Fields in objects allocated by applications of the closure
189 // are not included in the iteration.
190 virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0;
192 // Iterate over all objects in the space, calling "cl.do_object" on
193 // each. Objects allocated by applications of the closure are not
194 // included in the iteration.
195 virtual void object_iterate(ObjectClosure* blk) = 0;
196 // Similar to object_iterate() except only iterates over
197 // objects whose internal references point to objects in the space.
198 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
200 // Iterate over all objects that intersect with mr, calling "cl->do_object"
201 // on each. There is an exception to this: if this closure has already
202 // been invoked on an object, it may skip such objects in some cases. This is
203 // Most likely to happen in an "upwards" (ascending address) iteration of
204 // MemRegions.
205 virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
207 // Iterate over as many initialized objects in the space as possible,
208 // calling "cl.do_object_careful" on each. Return NULL if all objects
209 // in the space (at the start of the iteration) were iterated over.
210 // Return an address indicating the extent of the iteration in the
211 // event that the iteration had to return because of finding an
212 // uninitialized object in the space, or if the closure "cl"
213 // signalled early termination.
214 virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
215 virtual HeapWord* object_iterate_careful_m(MemRegion mr,
216 ObjectClosureCareful* cl);
218 // Create and return a new dirty card to oop closure. Can be
219 // overriden to return the appropriate type of closure
220 // depending on the type of space in which the closure will
221 // operate. ResourceArea allocated.
222 virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
223 CardTableModRefBS::PrecisionStyle precision,
224 HeapWord* boundary = NULL);
226 // If "p" is in the space, returns the address of the start of the
227 // "block" that contains "p". We say "block" instead of "object" since
228 // some heaps may not pack objects densely; a chunk may either be an
229 // object or a non-object. If "p" is not in the space, return NULL.
230 virtual HeapWord* block_start_const(const void* p) const = 0;
232 // The non-const version may have benevolent side effects on the data
233 // structure supporting these calls, possibly speeding up future calls.
234 // The default implementation, however, is simply to call the const
235 // version.
236 inline virtual HeapWord* block_start(const void* p);
238 // Requires "addr" to be the start of a chunk, and returns its size.
239 // "addr + size" is required to be the start of a new chunk, or the end
240 // of the active area of the heap.
241 virtual size_t block_size(const HeapWord* addr) const = 0;
243 // Requires "addr" to be the start of a block, and returns "TRUE" iff
244 // the block is an object.
245 virtual bool block_is_obj(const HeapWord* addr) const = 0;
247 // Requires "addr" to be the start of a block, and returns "TRUE" iff
248 // the block is an object and the object is alive.
249 virtual bool obj_is_alive(const HeapWord* addr) const;
251 // Allocation (return NULL if full). Assumes the caller has established
252 // mutually exclusive access to the space.
253 virtual HeapWord* allocate(size_t word_size) = 0;
255 // Allocation (return NULL if full). Enforces mutual exclusion internally.
256 virtual HeapWord* par_allocate(size_t word_size) = 0;
258 // Returns true if this object has been allocated since a
259 // generation's "save_marks" call.
260 virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
262 // Mark-sweep-compact support: all spaces can update pointers to objects
263 // moving as a part of compaction.
264 virtual void adjust_pointers();
266 // PrintHeapAtGC support
267 virtual void print() const;
268 virtual void print_on(outputStream* st) const;
269 virtual void print_short() const;
270 virtual void print_short_on(outputStream* st) const;
273 // Accessor for parallel sequential tasks.
274 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
276 // IF "this" is a ContiguousSpace, return it, else return NULL.
277 virtual ContiguousSpace* toContiguousSpace() {
278 return NULL;
279 }
281 // Debugging
282 virtual void verify(bool allow_dirty) const = 0;
283 };
285 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
286 // OopClosure to (the addresses of) all the ref-containing fields that could
287 // be modified by virtue of the given MemRegion being dirty. (Note that
288 // because of the imprecise nature of the write barrier, this may iterate
289 // over oops beyond the region.)
290 // This base type for dirty card to oop closures handles memory regions
291 // in non-contiguous spaces with no boundaries, and should be sub-classed
292 // to support other space types. See ContiguousDCTOC for a sub-class
293 // that works with ContiguousSpaces.
295 class DirtyCardToOopClosure: public MemRegionClosureRO {
296 protected:
297 OopClosure* _cl;
298 Space* _sp;
299 CardTableModRefBS::PrecisionStyle _precision;
300 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
301 // pointing below boundary.
302 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
303 // a downwards traversal; this is the
304 // lowest location already done (or,
305 // alternatively, the lowest address that
306 // shouldn't be done again. NULL means infinity.)
307 NOT_PRODUCT(HeapWord* _last_bottom;)
308 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
310 // Get the actual top of the area on which the closure will
311 // operate, given where the top is assumed to be (the end of the
312 // memory region passed to do_MemRegion) and where the object
313 // at the top is assumed to start. For example, an object may
314 // start at the top but actually extend past the assumed top,
315 // in which case the top becomes the end of the object.
316 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
318 // Walk the given memory region from bottom to (actual) top
319 // looking for objects and applying the oop closure (_cl) to
320 // them. The base implementation of this treats the area as
321 // blocks, where a block may or may not be an object. Sub-
322 // classes should override this to provide more accurate
323 // or possibly more efficient walking.
324 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
326 public:
327 DirtyCardToOopClosure(Space* sp, OopClosure* cl,
328 CardTableModRefBS::PrecisionStyle precision,
329 HeapWord* boundary) :
330 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
331 _min_done(NULL) {
332 NOT_PRODUCT(_last_bottom = NULL);
333 NOT_PRODUCT(_last_explicit_min_done = NULL);
334 }
336 void do_MemRegion(MemRegion mr);
338 void set_min_done(HeapWord* min_done) {
339 _min_done = min_done;
340 NOT_PRODUCT(_last_explicit_min_done = _min_done);
341 }
342 #ifndef PRODUCT
343 void set_last_bottom(HeapWord* last_bottom) {
344 _last_bottom = last_bottom;
345 }
346 #endif
347 };
349 // A structure to represent a point at which objects are being copied
350 // during compaction.
351 class CompactPoint : public StackObj {
352 public:
353 Generation* gen;
354 CompactibleSpace* space;
355 HeapWord* threshold;
356 CompactPoint(Generation* _gen, CompactibleSpace* _space,
357 HeapWord* _threshold) :
358 gen(_gen), space(_space), threshold(_threshold) {}
359 };
362 // A space that supports compaction operations. This is usually, but not
363 // necessarily, a space that is normally contiguous. But, for example, a
364 // free-list-based space whose normal collection is a mark-sweep without
365 // compaction could still support compaction in full GC's.
367 class CompactibleSpace: public Space {
368 friend class VMStructs;
369 friend class CompactibleFreeListSpace;
370 friend class CompactingPermGenGen;
371 friend class CMSPermGenGen;
372 private:
373 HeapWord* _compaction_top;
374 CompactibleSpace* _next_compaction_space;
376 public:
377 CompactibleSpace() :
378 _compaction_top(NULL), _next_compaction_space(NULL) {}
380 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
381 virtual void clear(bool mangle_space);
383 // Used temporarily during a compaction phase to hold the value
384 // top should have when compaction is complete.
385 HeapWord* compaction_top() const { return _compaction_top; }
387 void set_compaction_top(HeapWord* value) {
388 assert(value == NULL || (value >= bottom() && value <= end()),
389 "should point inside space");
390 _compaction_top = value;
391 }
393 // Perform operations on the space needed after a compaction
394 // has been performed.
395 virtual void reset_after_compaction() {}
397 // Returns the next space (in the current generation) to be compacted in
398 // the global compaction order. Also is used to select the next
399 // space into which to compact.
401 virtual CompactibleSpace* next_compaction_space() const {
402 return _next_compaction_space;
403 }
405 void set_next_compaction_space(CompactibleSpace* csp) {
406 _next_compaction_space = csp;
407 }
409 // MarkSweep support phase2
411 // Start the process of compaction of the current space: compute
412 // post-compaction addresses, and insert forwarding pointers. The fields
413 // "cp->gen" and "cp->compaction_space" are the generation and space into
414 // which we are currently compacting. This call updates "cp" as necessary,
415 // and leaves the "compaction_top" of the final value of
416 // "cp->compaction_space" up-to-date. Offset tables may be updated in
417 // this phase as if the final copy had occurred; if so, "cp->threshold"
418 // indicates when the next such action should be taken.
419 virtual void prepare_for_compaction(CompactPoint* cp);
420 // MarkSweep support phase3
421 virtual void adjust_pointers();
422 // MarkSweep support phase4
423 virtual void compact();
425 // The maximum percentage of objects that can be dead in the compacted
426 // live part of a compacted space ("deadwood" support.)
427 virtual size_t allowed_dead_ratio() const { return 0; };
429 // Some contiguous spaces may maintain some data structures that should
430 // be updated whenever an allocation crosses a boundary. This function
431 // returns the first such boundary.
432 // (The default implementation returns the end of the space, so the
433 // boundary is never crossed.)
434 virtual HeapWord* initialize_threshold() { return end(); }
436 // "q" is an object of the given "size" that should be forwarded;
437 // "cp" names the generation ("gen") and containing "this" (which must
438 // also equal "cp->space"). "compact_top" is where in "this" the
439 // next object should be forwarded to. If there is room in "this" for
440 // the object, insert an appropriate forwarding pointer in "q".
441 // If not, go to the next compaction space (there must
442 // be one, since compaction must succeed -- we go to the first space of
443 // the previous generation if necessary, updating "cp"), reset compact_top
444 // and then forward. In either case, returns the new value of "compact_top".
445 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
446 // function of the then-current compaction space, and updates "cp->threshold
447 // accordingly".
448 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
449 HeapWord* compact_top);
451 // Return a size with adjusments as required of the space.
452 virtual size_t adjust_object_size_v(size_t size) const { return size; }
454 protected:
455 // Used during compaction.
456 HeapWord* _first_dead;
457 HeapWord* _end_of_live;
459 // Minimum size of a free block.
460 virtual size_t minimum_free_block_size() const = 0;
462 // This the function is invoked when an allocation of an object covering
463 // "start" to "end occurs crosses the threshold; returns the next
464 // threshold. (The default implementation does nothing.)
465 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
466 return end();
467 }
469 // Requires "allowed_deadspace_words > 0", that "q" is the start of a
470 // free block of the given "word_len", and that "q", were it an object,
471 // would not move if forwared. If the size allows, fill the free
472 // block with an object, to prevent excessive compaction. Returns "true"
473 // iff the free region was made deadspace, and modifies
474 // "allowed_deadspace_words" to reflect the number of available deadspace
475 // words remaining after this operation.
476 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
477 size_t word_len);
478 };
480 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
481 /* Compute the new addresses for the live objects and store it in the mark \
482 * Used by universe::mark_sweep_phase2() \
483 */ \
484 HeapWord* compact_top; /* This is where we are currently compacting to. */ \
485 \
486 /* We're sure to be here before any objects are compacted into this \
487 * space, so this is a good time to initialize this: \
488 */ \
489 set_compaction_top(bottom()); \
490 \
491 if (cp->space == NULL) { \
492 assert(cp->gen != NULL, "need a generation"); \
493 assert(cp->threshold == NULL, "just checking"); \
494 assert(cp->gen->first_compaction_space() == this, "just checking"); \
495 cp->space = cp->gen->first_compaction_space(); \
496 compact_top = cp->space->bottom(); \
497 cp->space->set_compaction_top(compact_top); \
498 cp->threshold = cp->space->initialize_threshold(); \
499 } else { \
500 compact_top = cp->space->compaction_top(); \
501 } \
502 \
503 /* We allow some amount of garbage towards the bottom of the space, so \
504 * we don't start compacting before there is a significant gain to be made.\
505 * Occasionally, we want to ensure a full compaction, which is determined \
506 * by the MarkSweepAlwaysCompactCount parameter. \
507 */ \
508 int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\
509 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
510 \
511 size_t allowed_deadspace = 0; \
512 if (skip_dead) { \
513 const size_t ratio = allowed_dead_ratio(); \
514 allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \
515 } \
516 \
517 HeapWord* q = bottom(); \
518 HeapWord* t = scan_limit(); \
519 \
520 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \
521 live object. */ \
522 HeapWord* first_dead = end();/* The first dead object. */ \
523 LiveRange* liveRange = NULL; /* The current live range, recorded in the \
524 first header of preceding free area. */ \
525 _first_dead = first_dead; \
526 \
527 const intx interval = PrefetchScanIntervalInBytes; \
528 \
529 while (q < t) { \
530 assert(!block_is_obj(q) || \
531 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \
532 oop(q)->mark()->has_bias_pattern(), \
533 "these are the only valid states during a mark sweep"); \
534 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
535 /* prefetch beyond q */ \
536 Prefetch::write(q, interval); \
537 /* size_t size = oop(q)->size(); changing this for cms for perm gen */\
538 size_t size = block_size(q); \
539 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
540 q += size; \
541 end_of_live = q; \
542 } else { \
543 /* run over all the contiguous dead objects */ \
544 HeapWord* end = q; \
545 do { \
546 /* prefetch beyond end */ \
547 Prefetch::write(end, interval); \
548 end += block_size(end); \
549 } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
550 \
551 /* see if we might want to pretend this object is alive so that \
552 * we don't have to compact quite as often. \
553 */ \
554 if (allowed_deadspace > 0 && q == compact_top) { \
555 size_t sz = pointer_delta(end, q); \
556 if (insert_deadspace(allowed_deadspace, q, sz)) { \
557 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \
558 q = end; \
559 end_of_live = end; \
560 continue; \
561 } \
562 } \
563 \
564 /* otherwise, it really is a free region. */ \
565 \
566 /* for the previous LiveRange, record the end of the live objects. */ \
567 if (liveRange) { \
568 liveRange->set_end(q); \
569 } \
570 \
571 /* record the current LiveRange object. \
572 * liveRange->start() is overlaid on the mark word. \
573 */ \
574 liveRange = (LiveRange*)q; \
575 liveRange->set_start(end); \
576 liveRange->set_end(end); \
577 \
578 /* see if this is the first dead region. */ \
579 if (q < first_dead) { \
580 first_dead = q; \
581 } \
582 \
583 /* move on to the next object */ \
584 q = end; \
585 } \
586 } \
587 \
588 assert(q == t, "just checking"); \
589 if (liveRange != NULL) { \
590 liveRange->set_end(q); \
591 } \
592 _end_of_live = end_of_live; \
593 if (end_of_live < first_dead) { \
594 first_dead = end_of_live; \
595 } \
596 _first_dead = first_dead; \
597 \
598 /* save the compaction_top of the compaction space. */ \
599 cp->space->set_compaction_top(compact_top); \
600 }
602 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
603 /* adjust all the interior pointers to point at the new locations of objects \
604 * Used by MarkSweep::mark_sweep_phase3() */ \
605 \
606 HeapWord* q = bottom(); \
607 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
608 \
609 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
610 \
611 if (q < t && _first_dead > q && \
612 !oop(q)->is_gc_marked()) { \
613 /* we have a chunk of the space which hasn't moved and we've \
614 * reinitialized the mark word during the previous pass, so we can't \
615 * use is_gc_marked for the traversal. */ \
616 HeapWord* end = _first_dead; \
617 \
618 while (q < end) { \
619 /* I originally tried to conjoin "block_start(q) == q" to the \
620 * assertion below, but that doesn't work, because you can't \
621 * accurately traverse previous objects to get to the current one \
622 * after their pointers (including pointers into permGen) have been \
623 * updated, until the actual compaction is done. dld, 4/00 */ \
624 assert(block_is_obj(q), \
625 "should be at block boundaries, and should be looking at objs"); \
626 \
627 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
628 \
629 /* point all the oops to the new location */ \
630 size_t size = oop(q)->adjust_pointers(); \
631 size = adjust_obj_size(size); \
632 \
633 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
634 \
635 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
636 \
637 q += size; \
638 } \
639 \
640 if (_first_dead == t) { \
641 q = t; \
642 } else { \
643 /* $$$ This is funky. Using this to read the previously written \
644 * LiveRange. See also use below. */ \
645 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
646 } \
647 } \
648 \
649 const intx interval = PrefetchScanIntervalInBytes; \
650 \
651 debug_only(HeapWord* prev_q = NULL); \
652 while (q < t) { \
653 /* prefetch beyond q */ \
654 Prefetch::write(q, interval); \
655 if (oop(q)->is_gc_marked()) { \
656 /* q is alive */ \
657 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
658 /* point all the oops to the new location */ \
659 size_t size = oop(q)->adjust_pointers(); \
660 size = adjust_obj_size(size); \
661 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
662 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
663 debug_only(prev_q = q); \
664 q += size; \
665 } else { \
666 /* q is not a live object, so its mark should point at the next \
667 * live object */ \
668 debug_only(prev_q = q); \
669 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
670 assert(q > prev_q, "we should be moving forward through memory"); \
671 } \
672 } \
673 \
674 assert(q == t, "just checking"); \
675 }
677 #define SCAN_AND_COMPACT(obj_size) { \
678 /* Copy all live objects to their new location \
679 * Used by MarkSweep::mark_sweep_phase4() */ \
680 \
681 HeapWord* q = bottom(); \
682 HeapWord* const t = _end_of_live; \
683 debug_only(HeapWord* prev_q = NULL); \
684 \
685 if (q < t && _first_dead > q && \
686 !oop(q)->is_gc_marked()) { \
687 debug_only( \
688 /* we have a chunk of the space which hasn't moved and we've reinitialized \
689 * the mark word during the previous pass, so we can't use is_gc_marked for \
690 * the traversal. */ \
691 HeapWord* const end = _first_dead; \
692 \
693 while (q < end) { \
694 size_t size = obj_size(q); \
695 assert(!oop(q)->is_gc_marked(), \
696 "should be unmarked (special dense prefix handling)"); \
697 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \
698 debug_only(prev_q = q); \
699 q += size; \
700 } \
701 ) /* debug_only */ \
702 \
703 if (_first_dead == t) { \
704 q = t; \
705 } else { \
706 /* $$$ Funky */ \
707 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
708 } \
709 } \
710 \
711 const intx scan_interval = PrefetchScanIntervalInBytes; \
712 const intx copy_interval = PrefetchCopyIntervalInBytes; \
713 while (q < t) { \
714 if (!oop(q)->is_gc_marked()) { \
715 /* mark is pointer to next marked oop */ \
716 debug_only(prev_q = q); \
717 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
718 assert(q > prev_q, "we should be moving forward through memory"); \
719 } else { \
720 /* prefetch beyond q */ \
721 Prefetch::read(q, scan_interval); \
722 \
723 /* size and destination */ \
724 size_t size = obj_size(q); \
725 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
726 \
727 /* prefetch beyond compaction_top */ \
728 Prefetch::write(compaction_top, copy_interval); \
729 \
730 /* copy object and reinit its mark */ \
731 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \
732 compaction_top)); \
733 assert(q != compaction_top, "everything in this pass should be moving"); \
734 Copy::aligned_conjoint_words(q, compaction_top, size); \
735 oop(compaction_top)->init_mark(); \
736 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
737 \
738 debug_only(prev_q = q); \
739 q += size; \
740 } \
741 } \
742 \
743 /* Let's remember if we were empty before we did the compaction. */ \
744 bool was_empty = used_region().is_empty(); \
745 /* Reset space after compaction is complete */ \
746 reset_after_compaction(); \
747 /* We do this clear, below, since it has overloaded meanings for some */ \
748 /* space subtypes. For example, OffsetTableContigSpace's that were */ \
749 /* compacted into will have had their offset table thresholds updated */ \
750 /* continuously, but those that weren't need to have their thresholds */ \
751 /* re-initialized. Also mangles unused area for debugging. */ \
752 if (used_region().is_empty()) { \
753 if (!was_empty) clear(SpaceDecorator::Mangle); \
754 } else { \
755 if (ZapUnusedHeapArea) mangle_unused_area(); \
756 } \
757 }
759 class GenSpaceMangler;
761 // A space in which the free area is contiguous. It therefore supports
762 // faster allocation, and compaction.
763 class ContiguousSpace: public CompactibleSpace {
764 friend class OneContigSpaceCardGeneration;
765 friend class VMStructs;
766 protected:
767 HeapWord* _top;
768 HeapWord* _concurrent_iteration_safe_limit;
769 // A helper for mangling the unused area of the space in debug builds.
770 GenSpaceMangler* _mangler;
772 GenSpaceMangler* mangler() { return _mangler; }
774 // Allocation helpers (return NULL if full).
775 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
776 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
778 public:
779 ContiguousSpace();
780 ~ContiguousSpace();
782 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
783 virtual void clear(bool mangle_space);
785 // Accessors
786 HeapWord* top() const { return _top; }
787 void set_top(HeapWord* value) { _top = value; }
789 virtual void set_saved_mark() { _saved_mark_word = top(); }
790 void reset_saved_mark() { _saved_mark_word = bottom(); }
792 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
793 WaterMark top_mark() { return WaterMark(this, top()); }
794 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
795 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
797 // In debug mode mangle (write it with a particular bit
798 // pattern) the unused part of a space.
800 // Used to save the an address in a space for later use during mangling.
801 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
802 // Used to save the space's current top for later use during mangling.
803 void set_top_for_allocations() PRODUCT_RETURN;
805 // Mangle regions in the space from the current top up to the
806 // previously mangled part of the space.
807 void mangle_unused_area() PRODUCT_RETURN;
808 // Mangle [top, end)
809 void mangle_unused_area_complete() PRODUCT_RETURN;
810 // Mangle the given MemRegion.
811 void mangle_region(MemRegion mr) PRODUCT_RETURN;
813 // Do some sparse checking on the area that should have been mangled.
814 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
815 // Check the complete area that should have been mangled.
816 // This code may be NULL depending on the macro DEBUG_MANGLING.
817 void check_mangled_unused_area_complete() PRODUCT_RETURN;
819 // Size computations: sizes in bytes.
820 size_t capacity() const { return byte_size(bottom(), end()); }
821 size_t used() const { return byte_size(bottom(), top()); }
822 size_t free() const { return byte_size(top(), end()); }
824 // Override from space.
825 bool is_in(const void* p) const;
827 virtual bool is_free_block(const HeapWord* p) const;
829 // In a contiguous space we have a more obvious bound on what parts
830 // contain objects.
831 MemRegion used_region() const { return MemRegion(bottom(), top()); }
833 MemRegion used_region_at_save_marks() const {
834 return MemRegion(bottom(), saved_mark_word());
835 }
837 // Allocation (return NULL if full)
838 virtual HeapWord* allocate(size_t word_size);
839 virtual HeapWord* par_allocate(size_t word_size);
841 virtual bool obj_allocated_since_save_marks(const oop obj) const {
842 return (HeapWord*)obj >= saved_mark_word();
843 }
845 // Iteration
846 void oop_iterate(OopClosure* cl);
847 void oop_iterate(MemRegion mr, OopClosure* cl);
848 void object_iterate(ObjectClosure* blk);
849 // For contiguous spaces this method will iterate safely over objects
850 // in the space (i.e., between bottom and top) when at a safepoint.
851 void safe_object_iterate(ObjectClosure* blk);
852 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
853 // iterates on objects up to the safe limit
854 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
855 inline HeapWord* concurrent_iteration_safe_limit();
856 // changes the safe limit, all objects from bottom() to the new
857 // limit should be properly initialized
858 inline void set_concurrent_iteration_safe_limit(HeapWord* new_limit);
860 #ifndef SERIALGC
861 // In support of parallel oop_iterate.
862 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
863 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
865 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
866 #undef ContigSpace_PAR_OOP_ITERATE_DECL
867 #endif // SERIALGC
869 // Compaction support
870 virtual void reset_after_compaction() {
871 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
872 set_top(compaction_top());
873 // set new iteration safe limit
874 set_concurrent_iteration_safe_limit(compaction_top());
875 }
876 virtual size_t minimum_free_block_size() const { return 0; }
878 // Override.
879 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
880 CardTableModRefBS::PrecisionStyle precision,
881 HeapWord* boundary = NULL);
883 // Apply "blk->do_oop" to the addresses of all reference fields in objects
884 // starting with the _saved_mark_word, which was noted during a generation's
885 // save_marks and is required to denote the head of an object.
886 // Fields in objects allocated by applications of the closure
887 // *are* included in the iteration.
888 // Updates _saved_mark_word to point to just after the last object
889 // iterated over.
890 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
891 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
893 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
894 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
896 // Same as object_iterate, but starting from "mark", which is required
897 // to denote the start of an object. Objects allocated by
898 // applications of the closure *are* included in the iteration.
899 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
901 // Very inefficient implementation.
902 virtual HeapWord* block_start_const(const void* p) const;
903 size_t block_size(const HeapWord* p) const;
904 // If a block is in the allocated area, it is an object.
905 bool block_is_obj(const HeapWord* p) const { return p < top(); }
907 // Addresses for inlined allocation
908 HeapWord** top_addr() { return &_top; }
909 HeapWord** end_addr() { return &_end; }
911 // Overrides for more efficient compaction support.
912 void prepare_for_compaction(CompactPoint* cp);
914 // PrintHeapAtGC support.
915 virtual void print_on(outputStream* st) const;
917 // Checked dynamic downcasts.
918 virtual ContiguousSpace* toContiguousSpace() {
919 return this;
920 }
922 // Debugging
923 virtual void verify(bool allow_dirty) const;
925 // Used to increase collection frequency. "factor" of 0 means entire
926 // space.
927 void allocate_temporary_filler(int factor);
929 };
932 // A dirty card to oop closure that does filtering.
933 // It knows how to filter out objects that are outside of the _boundary.
934 class Filtering_DCTOC : public DirtyCardToOopClosure {
935 protected:
936 // Override.
937 void walk_mem_region(MemRegion mr,
938 HeapWord* bottom, HeapWord* top);
940 // Walk the given memory region, from bottom to top, applying
941 // the given oop closure to (possibly) all objects found. The
942 // given oop closure may or may not be the same as the oop
943 // closure with which this closure was created, as it may
944 // be a filtering closure which makes use of the _boundary.
945 // We offer two signatures, so the FilteringClosure static type is
946 // apparent.
947 virtual void walk_mem_region_with_cl(MemRegion mr,
948 HeapWord* bottom, HeapWord* top,
949 OopClosure* cl) = 0;
950 virtual void walk_mem_region_with_cl(MemRegion mr,
951 HeapWord* bottom, HeapWord* top,
952 FilteringClosure* cl) = 0;
954 public:
955 Filtering_DCTOC(Space* sp, OopClosure* cl,
956 CardTableModRefBS::PrecisionStyle precision,
957 HeapWord* boundary) :
958 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
959 };
961 // A dirty card to oop closure for contiguous spaces
962 // (ContiguousSpace and sub-classes).
963 // It is a FilteringClosure, as defined above, and it knows:
964 //
965 // 1. That the actual top of any area in a memory region
966 // contained by the space is bounded by the end of the contiguous
967 // region of the space.
968 // 2. That the space is really made up of objects and not just
969 // blocks.
971 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
972 protected:
973 // Overrides.
974 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
976 virtual void walk_mem_region_with_cl(MemRegion mr,
977 HeapWord* bottom, HeapWord* top,
978 OopClosure* cl);
979 virtual void walk_mem_region_with_cl(MemRegion mr,
980 HeapWord* bottom, HeapWord* top,
981 FilteringClosure* cl);
983 public:
984 ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl,
985 CardTableModRefBS::PrecisionStyle precision,
986 HeapWord* boundary) :
987 Filtering_DCTOC(sp, cl, precision, boundary)
988 {}
989 };
992 // Class EdenSpace describes eden-space in new generation.
994 class DefNewGeneration;
996 class EdenSpace : public ContiguousSpace {
997 friend class VMStructs;
998 private:
999 DefNewGeneration* _gen;
1001 // _soft_end is used as a soft limit on allocation. As soft limits are
1002 // reached, the slow-path allocation code can invoke other actions and then
1003 // adjust _soft_end up to a new soft limit or to end().
1004 HeapWord* _soft_end;
1006 public:
1007 EdenSpace(DefNewGeneration* gen) :
1008 _gen(gen), _soft_end(NULL) {}
1010 // Get/set just the 'soft' limit.
1011 HeapWord* soft_end() { return _soft_end; }
1012 HeapWord** soft_end_addr() { return &_soft_end; }
1013 void set_soft_end(HeapWord* value) { _soft_end = value; }
1015 // Override.
1016 void clear(bool mangle_space);
1018 // Set both the 'hard' and 'soft' limits (_end and _soft_end).
1019 void set_end(HeapWord* value) {
1020 set_soft_end(value);
1021 ContiguousSpace::set_end(value);
1022 }
1024 // Allocation (return NULL if full)
1025 HeapWord* allocate(size_t word_size);
1026 HeapWord* par_allocate(size_t word_size);
1027 };
1029 // Class ConcEdenSpace extends EdenSpace for the sake of safe
1030 // allocation while soft-end is being modified concurrently
1032 class ConcEdenSpace : public EdenSpace {
1033 public:
1034 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
1036 // Allocation (return NULL if full)
1037 HeapWord* par_allocate(size_t word_size);
1038 };
1041 // A ContigSpace that Supports an efficient "block_start" operation via
1042 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
1043 // other spaces.) This is the abstract base class for old generation
1044 // (tenured, perm) spaces.
1046 class OffsetTableContigSpace: public ContiguousSpace {
1047 friend class VMStructs;
1048 protected:
1049 BlockOffsetArrayContigSpace _offsets;
1050 Mutex _par_alloc_lock;
1052 public:
1053 // Constructor
1054 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
1055 MemRegion mr);
1057 void set_bottom(HeapWord* value);
1058 void set_end(HeapWord* value);
1060 void clear(bool mangle_space);
1062 inline HeapWord* block_start_const(const void* p) const;
1064 // Add offset table update.
1065 virtual inline HeapWord* allocate(size_t word_size);
1066 inline HeapWord* par_allocate(size_t word_size);
1068 // MarkSweep support phase3
1069 virtual HeapWord* initialize_threshold();
1070 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
1072 virtual void print_on(outputStream* st) const;
1074 // Debugging
1075 void verify(bool allow_dirty) const;
1077 // Shared space support
1078 void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
1079 };
1082 // Class TenuredSpace is used by TenuredGeneration
1084 class TenuredSpace: public OffsetTableContigSpace {
1085 friend class VMStructs;
1086 protected:
1087 // Mark sweep support
1088 size_t allowed_dead_ratio() const;
1089 public:
1090 // Constructor
1091 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
1092 MemRegion mr) :
1093 OffsetTableContigSpace(sharedOffsetArray, mr) {}
1094 };
1097 // Class ContigPermSpace is used by CompactingPermGen
1099 class ContigPermSpace: public OffsetTableContigSpace {
1100 friend class VMStructs;
1101 protected:
1102 // Mark sweep support
1103 size_t allowed_dead_ratio() const;
1104 public:
1105 // Constructor
1106 ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) :
1107 OffsetTableContigSpace(sharedOffsetArray, mr) {}
1108 };