src/share/vm/memory/space.hpp

changeset 777
37f87013dfd8
parent 548
ba764ed4b6f2
child 782
60fb9c4db4e6
equal deleted inserted replaced
624:0b27f3512f9e 777:37f87013dfd8
103 HeapWord* bottom() const { return _bottom; } 103 HeapWord* bottom() const { return _bottom; }
104 HeapWord* end() const { return _end; } 104 HeapWord* end() const { return _end; }
105 virtual void set_bottom(HeapWord* value) { _bottom = value; } 105 virtual void set_bottom(HeapWord* value) { _bottom = value; }
106 virtual void set_end(HeapWord* value) { _end = value; } 106 virtual void set_end(HeapWord* value) { _end = value; }
107 107
108 HeapWord* saved_mark_word() const { return _saved_mark_word; } 108 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
109 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; } 109 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
110 110
111 MemRegionClosure* preconsumptionDirtyCardClosure() const { 111 MemRegionClosure* preconsumptionDirtyCardClosure() const {
112 return _preconsumptionDirtyCardClosure; 112 return _preconsumptionDirtyCardClosure;
113 } 113 }
129 // and after the call to save marks. 129 // and after the call to save marks.
130 virtual MemRegion used_region_at_save_marks() const { 130 virtual MemRegion used_region_at_save_marks() const {
131 return MemRegion(bottom(), saved_mark_word()); 131 return MemRegion(bottom(), saved_mark_word());
132 } 132 }
133 133
134 // Initialization 134 // Initialization.
135 // "initialize" should be called once on a space, before it is used for
136 // any purpose. The "mr" arguments gives the bounds of the space, and
137 // the "clear_space" argument should be true unless the memory in "mr" is
138 // known to be zeroed.
135 virtual void initialize(MemRegion mr, bool clear_space); 139 virtual void initialize(MemRegion mr, bool clear_space);
140
141 // Sets the bounds (bottom and end) of the current space to those of "mr."
142 void set_bounds(MemRegion mr);
143
144 // The "clear" method must be called on a region that may have
145 // had allocation performed in it, but is now to be considered empty.
136 virtual void clear(); 146 virtual void clear();
137 147
138 // For detecting GC bugs. Should only be called at GC boundaries, since 148 // For detecting GC bugs. Should only be called at GC boundaries, since
139 // some unused space may be used as scratch space during GC's. 149 // some unused space may be used as scratch space during GC's.
140 // Default implementation does nothing. We also call this when expanding 150 // Default implementation does nothing. We also call this when expanding
214 224
215 // If "p" is in the space, returns the address of the start of the 225 // If "p" is in the space, returns the address of the start of the
216 // "block" that contains "p". We say "block" instead of "object" since 226 // "block" that contains "p". We say "block" instead of "object" since
217 // some heaps may not pack objects densely; a chunk may either be an 227 // some heaps may not pack objects densely; a chunk may either be an
218 // object or a non-object. If "p" is not in the space, return NULL. 228 // object or a non-object. If "p" is not in the space, return NULL.
219 virtual HeapWord* block_start(const void* p) const = 0; 229 virtual HeapWord* block_start_const(const void* p) const = 0;
230
231 // The non-const version may have benevolent side effects on the data
232 // structure supporting these calls, possibly speeding up future calls.
233 // The default implementation, however, is simply to call the const
234 // version.
235 inline virtual HeapWord* block_start(const void* p);
220 236
221 // Requires "addr" to be the start of a chunk, and returns its size. 237 // Requires "addr" to be the start of a chunk, and returns its size.
222 // "addr + size" is required to be the start of a new chunk, or the end 238 // "addr + size" is required to be the start of a new chunk, or the end
223 // of the active area of the heap. 239 // of the active area of the heap.
224 virtual size_t block_size(const HeapWord* addr) const = 0; 240 virtual size_t block_size(const HeapWord* addr) const = 0;
280 OopClosure* _cl; 296 OopClosure* _cl;
281 Space* _sp; 297 Space* _sp;
282 CardTableModRefBS::PrecisionStyle _precision; 298 CardTableModRefBS::PrecisionStyle _precision;
283 HeapWord* _boundary; // If non-NULL, process only non-NULL oops 299 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
284 // pointing below boundary. 300 // pointing below boundary.
285 HeapWord* _min_done; // ObjHeadPreciseArray precision requires 301 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
286 // a downwards traversal; this is the 302 // a downwards traversal; this is the
287 // lowest location already done (or, 303 // lowest location already done (or,
288 // alternatively, the lowest address that 304 // alternatively, the lowest address that
289 // shouldn't be done again. NULL means infinity.) 305 // shouldn't be done again. NULL means infinity.)
290 NOT_PRODUCT(HeapWord* _last_bottom;) 306 NOT_PRODUCT(HeapWord* _last_bottom;)
307 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
291 308
292 // Get the actual top of the area on which the closure will 309 // Get the actual top of the area on which the closure will
293 // operate, given where the top is assumed to be (the end of the 310 // operate, given where the top is assumed to be (the end of the
294 // memory region passed to do_MemRegion) and where the object 311 // memory region passed to do_MemRegion) and where the object
295 // at the top is assumed to start. For example, an object may 312 // at the top is assumed to start. For example, an object may
309 DirtyCardToOopClosure(Space* sp, OopClosure* cl, 326 DirtyCardToOopClosure(Space* sp, OopClosure* cl,
310 CardTableModRefBS::PrecisionStyle precision, 327 CardTableModRefBS::PrecisionStyle precision,
311 HeapWord* boundary) : 328 HeapWord* boundary) :
312 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary), 329 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
313 _min_done(NULL) { 330 _min_done(NULL) {
314 NOT_PRODUCT(_last_bottom = NULL;) 331 NOT_PRODUCT(_last_bottom = NULL);
332 NOT_PRODUCT(_last_explicit_min_done = NULL);
315 } 333 }
316 334
317 void do_MemRegion(MemRegion mr); 335 void do_MemRegion(MemRegion mr);
318 336
319 void set_min_done(HeapWord* min_done) { 337 void set_min_done(HeapWord* min_done) {
320 _min_done = min_done; 338 _min_done = min_done;
339 NOT_PRODUCT(_last_explicit_min_done = _min_done);
321 } 340 }
322 #ifndef PRODUCT 341 #ifndef PRODUCT
323 void set_last_bottom(HeapWord* last_bottom) { 342 void set_last_bottom(HeapWord* last_bottom) {
324 _last_bottom = last_bottom; 343 _last_bottom = last_bottom;
325 } 344 }
353 HeapWord* _compaction_top; 372 HeapWord* _compaction_top;
354 CompactibleSpace* _next_compaction_space; 373 CompactibleSpace* _next_compaction_space;
355 374
356 public: 375 public:
357 virtual void initialize(MemRegion mr, bool clear_space); 376 virtual void initialize(MemRegion mr, bool clear_space);
377 virtual void clear();
358 378
359 // Used temporarily during a compaction phase to hold the value 379 // Used temporarily during a compaction phase to hold the value
360 // top should have when compaction is complete. 380 // top should have when compaction is complete.
361 HeapWord* compaction_top() const { return _compaction_top; } 381 HeapWord* compaction_top() const { return _compaction_top; }
362 382
509 "these are the only valid states during a mark sweep"); \ 529 "these are the only valid states during a mark sweep"); \
510 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ 530 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
511 /* prefetch beyond q */ \ 531 /* prefetch beyond q */ \
512 Prefetch::write(q, interval); \ 532 Prefetch::write(q, interval); \
513 /* size_t size = oop(q)->size(); changing this for cms for perm gen */\ 533 /* size_t size = oop(q)->size(); changing this for cms for perm gen */\
514 size_t size = block_size(q); \ 534 size_t size = block_size(q); \
515 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ 535 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
516 q += size; \ 536 q += size; \
517 end_of_live = q; \ 537 end_of_live = q; \
518 } else { \ 538 } else { \
519 /* run over all the contiguous dead objects */ \ 539 /* run over all the contiguous dead objects */ \
573 \ 593 \
574 /* save the compaction_top of the compaction space. */ \ 594 /* save the compaction_top of the compaction space. */ \
575 cp->space->set_compaction_top(compact_top); \ 595 cp->space->set_compaction_top(compact_top); \
576 } 596 }
577 597
578 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ 598 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
579 /* adjust all the interior pointers to point at the new locations of objects \ 599 /* adjust all the interior pointers to point at the new locations of objects \
580 * Used by MarkSweep::mark_sweep_phase3() */ \ 600 * Used by MarkSweep::mark_sweep_phase3() */ \
581 \ 601 \
582 HeapWord* q = bottom(); \ 602 HeapWord* q = bottom(); \
583 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ 603 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
584 \ 604 \
585 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ 605 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
586 \ 606 \
587 if (q < t && _first_dead > q && \ 607 if (q < t && _first_dead > q && \
588 !oop(q)->is_gc_marked()) { \ 608 !oop(q)->is_gc_marked()) { \
589 /* we have a chunk of the space which hasn't moved and we've \ 609 /* we have a chunk of the space which hasn't moved and we've \
590 * reinitialized the mark word during the previous pass, so we can't \ 610 * reinitialized the mark word during the previous pass, so we can't \
591 * use is_gc_marked for the traversal. */ \ 611 * use is_gc_marked for the traversal. */ \
592 HeapWord* end = _first_dead; \ 612 HeapWord* end = _first_dead; \
593 \ 613 \
594 while (q < end) { \ 614 while (q < end) { \
595 /* I originally tried to conjoin "block_start(q) == q" to the \ 615 /* I originally tried to conjoin "block_start(q) == q" to the \
596 * assertion below, but that doesn't work, because you can't \ 616 * assertion below, but that doesn't work, because you can't \
597 * accurately traverse previous objects to get to the current one \ 617 * accurately traverse previous objects to get to the current one \
598 * after their pointers (including pointers into permGen) have been \ 618 * after their pointers (including pointers into permGen) have been \
599 * updated, until the actual compaction is done. dld, 4/00 */ \ 619 * updated, until the actual compaction is done. dld, 4/00 */ \
600 assert(block_is_obj(q), \ 620 assert(block_is_obj(q), \
601 "should be at block boundaries, and should be looking at objs"); \ 621 "should be at block boundaries, and should be looking at objs"); \
602 \ 622 \
603 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ 623 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
604 \ 624 \
605 /* point all the oops to the new location */ \ 625 /* point all the oops to the new location */ \
606 size_t size = oop(q)->adjust_pointers(); \ 626 size_t size = oop(q)->adjust_pointers(); \
607 size = adjust_obj_size(size); \ 627 size = adjust_obj_size(size); \
608 \ 628 \
609 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ 629 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
610 \ 630 \
611 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ 631 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
612 \ 632 \
613 q += size; \ 633 q += size; \
614 } \ 634 } \
615 \ 635 \
616 if (_first_dead == t) { \ 636 if (_first_dead == t) { \
617 q = t; \ 637 q = t; \
618 } else { \ 638 } else { \
619 /* $$$ This is funky. Using this to read the previously written \ 639 /* $$$ This is funky. Using this to read the previously written \
620 * LiveRange. See also use below. */ \ 640 * LiveRange. See also use below. */ \
621 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ 641 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
622 } \ 642 } \
623 } \ 643 } \
624 \ 644 \
625 const intx interval = PrefetchScanIntervalInBytes; \ 645 const intx interval = PrefetchScanIntervalInBytes; \
626 \ 646 \
627 debug_only(HeapWord* prev_q = NULL); \ 647 debug_only(HeapWord* prev_q = NULL); \
628 while (q < t) { \ 648 while (q < t) { \
629 /* prefetch beyond q */ \ 649 /* prefetch beyond q */ \
630 Prefetch::write(q, interval); \ 650 Prefetch::write(q, interval); \
631 if (oop(q)->is_gc_marked()) { \ 651 if (oop(q)->is_gc_marked()) { \
632 /* q is alive */ \ 652 /* q is alive */ \
633 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \ 653 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
634 /* point all the oops to the new location */ \ 654 /* point all the oops to the new location */ \
635 size_t size = oop(q)->adjust_pointers(); \ 655 size_t size = oop(q)->adjust_pointers(); \
636 size = adjust_obj_size(size); \ 656 size = adjust_obj_size(size); \
637 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \ 657 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
638 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \ 658 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
639 debug_only(prev_q = q); \ 659 debug_only(prev_q = q); \
640 q += size; \ 660 q += size; \
641 } else { \ 661 } else { \
642 /* q is not a live object, so its mark should point at the next \ 662 /* q is not a live object, so its mark should point at the next \
643 * live object */ \ 663 * live object */ \
644 debug_only(prev_q = q); \ 664 debug_only(prev_q = q); \
714 debug_only(prev_q = q); \ 734 debug_only(prev_q = q); \
715 q += size; \ 735 q += size; \
716 } \ 736 } \
717 } \ 737 } \
718 \ 738 \
739 /* Let's remember if we were empty before we did the compaction. */ \
740 bool was_empty = used_region().is_empty(); \
719 /* Reset space after compaction is complete */ \ 741 /* Reset space after compaction is complete */ \
720 reset_after_compaction(); \ 742 reset_after_compaction(); \
721 /* We do this clear, below, since it has overloaded meanings for some */ \ 743 /* We do this clear, below, since it has overloaded meanings for some */ \
722 /* space subtypes. For example, OffsetTableContigSpace's that were */ \ 744 /* space subtypes. For example, OffsetTableContigSpace's that were */ \
723 /* compacted into will have had their offset table thresholds updated */ \ 745 /* compacted into will have had their offset table thresholds updated */ \
724 /* continuously, but those that weren't need to have their thresholds */ \ 746 /* continuously, but those that weren't need to have their thresholds */ \
725 /* re-initialized. Also mangles unused area for debugging. */ \ 747 /* re-initialized. Also mangles unused area for debugging. */ \
726 if (is_empty()) { \ 748 if (used_region().is_empty()) { \
727 clear(); \ 749 if (!was_empty) clear(); \
728 } else { \ 750 } else { \
729 if (ZapUnusedHeapArea) mangle_unused_area(); \ 751 if (ZapUnusedHeapArea) mangle_unused_area(); \
730 } \ 752 } \
731 } 753 }
732 754
748 770
749 // Accessors 771 // Accessors
750 HeapWord* top() const { return _top; } 772 HeapWord* top() const { return _top; }
751 void set_top(HeapWord* value) { _top = value; } 773 void set_top(HeapWord* value) { _top = value; }
752 774
753 void set_saved_mark() { _saved_mark_word = top(); } 775 virtual void set_saved_mark() { _saved_mark_word = top(); }
754 void reset_saved_mark() { _saved_mark_word = bottom(); } 776 void reset_saved_mark() { _saved_mark_word = bottom(); }
755 777
756 virtual void clear(); 778 virtual void clear();
757 779
758 WaterMark bottom_mark() { return WaterMark(this, bottom()); } 780 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
759 WaterMark top_mark() { return WaterMark(this, top()); } 781 WaterMark top_mark() { return WaterMark(this, top()); }
841 // to denote the start of an object. Objects allocated by 863 // to denote the start of an object. Objects allocated by
842 // applications of the closure *are* included in the iteration. 864 // applications of the closure *are* included in the iteration.
843 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk); 865 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
844 866
845 // Very inefficient implementation. 867 // Very inefficient implementation.
846 virtual HeapWord* block_start(const void* p) const; 868 virtual HeapWord* block_start_const(const void* p) const;
847 size_t block_size(const HeapWord* p) const; 869 size_t block_size(const HeapWord* p) const;
848 // If a block is in the allocated area, it is an object. 870 // If a block is in the allocated area, it is an object.
849 bool block_is_obj(const HeapWord* p) const { return p < top(); } 871 bool block_is_obj(const HeapWord* p) const { return p < top(); }
850 872
851 // Addresses for inlined allocation 873 // Addresses for inlined allocation
998 MemRegion mr); 1020 MemRegion mr);
999 1021
1000 void set_bottom(HeapWord* value); 1022 void set_bottom(HeapWord* value);
1001 void set_end(HeapWord* value); 1023 void set_end(HeapWord* value);
1002 1024
1025 virtual void initialize(MemRegion mr, bool clear_space);
1003 void clear(); 1026 void clear();
1004 1027
1005 inline HeapWord* block_start(const void* p) const; 1028 inline HeapWord* block_start_const(const void* p) const;
1006 1029
1007 // Add offset table update. 1030 // Add offset table update.
1008 virtual inline HeapWord* allocate(size_t word_size); 1031 virtual inline HeapWord* allocate(size_t word_size);
1009 inline HeapWord* par_allocate(size_t word_size); 1032 inline HeapWord* par_allocate(size_t word_size);
1010 1033

mercurial