1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri Aug 31 16:39:35 2012 -0700 1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Sat Sep 01 13:25:18 2012 -0400 1.3 @@ -29,7 +29,7 @@ 1.4 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" 1.5 #include "gc_implementation/shared/liveRange.hpp" 1.6 #include "gc_implementation/shared/spaceDecorator.hpp" 1.7 -#include "gc_interface/collectedHeap.hpp" 1.8 +#include "gc_interface/collectedHeap.inline.hpp" 1.9 #include "memory/allocation.inline.hpp" 1.10 #include "memory/blockOffsetTable.inline.hpp" 1.11 #include "memory/resourceArea.hpp" 1.12 @@ -658,13 +658,13 @@ 1.13 void walk_mem_region_with_cl_nopar(MemRegion mr, \ 1.14 HeapWord* bottom, HeapWord* top, \ 1.15 ClosureType* cl) 1.16 - walk_mem_region_with_cl_DECL(OopClosure); 1.17 + walk_mem_region_with_cl_DECL(ExtendedOopClosure); 1.18 walk_mem_region_with_cl_DECL(FilteringClosure); 1.19 1.20 public: 1.21 FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, 1.22 CMSCollector* collector, 1.23 - OopClosure* cl, 1.24 + ExtendedOopClosure* cl, 1.25 CardTableModRefBS::PrecisionStyle precision, 1.26 HeapWord* boundary) : 1.27 Filtering_DCTOC(sp, cl, precision, boundary), 1.28 @@ -746,11 +746,11 @@ 1.29 // (There are only two of these, rather than N, because the split is due 1.30 // only to the introduction of the FilteringClosure, a local part of the 1.31 // impl of this abstraction.) 1.32 -FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure) 1.33 +FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure) 1.34 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) 1.35 1.36 DirtyCardToOopClosure* 1.37 -CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl, 1.38 +CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl, 1.39 CardTableModRefBS::PrecisionStyle precision, 1.40 HeapWord* boundary) { 1.41 return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); 1.42 @@ -781,7 +781,7 @@ 1.43 } 1.44 1.45 // Apply the given closure to each oop in the space. 1.46 -void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) { 1.47 +void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) { 1.48 assert_lock_strong(freelistLock()); 1.49 HeapWord *cur, *limit; 1.50 size_t curSize; 1.51 @@ -795,7 +795,7 @@ 1.52 } 1.53 1.54 // Apply the given closure to each oop in the space \intersect memory region. 1.55 -void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) { 1.56 +void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { 1.57 assert_lock_strong(freelistLock()); 1.58 if (is_empty()) { 1.59 return; 1.60 @@ -1006,13 +1006,12 @@ 1.61 } 1.62 } else { 1.63 // must read from what 'p' points to in each loop. 1.64 - klassOop k = ((volatile oopDesc*)p)->klass_or_null(); 1.65 + Klass* k = ((volatile oopDesc*)p)->klass_or_null(); 1.66 if (k != NULL) { 1.67 - assert(k->is_oop(true /* ignore mark word */), "Should be klass oop"); 1.68 + assert(k->is_klass(), "Should really be klass oop."); 1.69 oop o = (oop)p; 1.70 - assert(o->is_parsable(), "Should be parsable"); 1.71 assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); 1.72 - size_t res = o->size_given_klass(k->klass_part()); 1.73 + size_t res = o->size_given_klass(k); 1.74 res = adjustObjectSize(res); 1.75 assert(res != 0, "Block size should not be 0"); 1.76 return res; 1.77 @@ -1021,6 +1020,7 @@ 1.78 } 1.79 } 1.80 1.81 +// TODO: Now that is_parsable is gone, we should combine these two functions. 1.82 // A variant of the above that uses the Printezis bits for 1.83 // unparsable but allocated objects. This avoids any possible 1.84 // stalls waiting for mutators to initialize objects, and is 1.85 @@ -1048,15 +1048,15 @@ 1.86 } 1.87 } else { 1.88 // must read from what 'p' points to in each loop. 1.89 - klassOop k = ((volatile oopDesc*)p)->klass_or_null(); 1.90 + Klass* k = ((volatile oopDesc*)p)->klass_or_null(); 1.91 // We trust the size of any object that has a non-NULL 1.92 // klass and (for those in the perm gen) is parsable 1.93 // -- irrespective of its conc_safe-ty. 1.94 - if (k != NULL && ((oopDesc*)p)->is_parsable()) { 1.95 - assert(k->is_oop(), "Should really be klass oop."); 1.96 + if (k != NULL) { 1.97 + assert(k->is_klass(), "Should really be klass oop."); 1.98 oop o = (oop)p; 1.99 assert(o->is_oop(), "Should be an oop"); 1.100 - size_t res = o->size_given_klass(k->klass_part()); 1.101 + size_t res = o->size_given_klass(k); 1.102 res = adjustObjectSize(res); 1.103 assert(res != 0, "Block size should not be 0"); 1.104 return res; 1.105 @@ -1103,7 +1103,7 @@ 1.106 // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p, 1.107 // "Should be a block boundary"); 1.108 if (FreeChunk::indicatesFreeChunk(p)) return false; 1.109 - klassOop k = oop(p)->klass_or_null(); 1.110 + Klass* k = oop(p)->klass_or_null(); 1.111 if (k != NULL) { 1.112 // Ignore mark word because it may have been used to 1.113 // chain together promoted objects (the last one 1.114 @@ -1140,23 +1140,6 @@ 1.115 if (_collector->abstract_state() == CMSCollector::Sweeping) { 1.116 CMSBitMap* live_map = _collector->markBitMap(); 1.117 return live_map->par_isMarked((HeapWord*) p); 1.118 - } else { 1.119 - // If we're not currently sweeping and we haven't swept the perm gen in 1.120 - // the previous concurrent cycle then we may have dead but unswept objects 1.121 - // in the perm gen. In this case, we use the "deadness" information 1.122 - // that we had saved in perm_gen_verify_bit_map at the last sweep. 1.123 - if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) { 1.124 - if (_collector->verifying()) { 1.125 - CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map(); 1.126 - // Object is marked in the dead_map bitmap at the previous sweep 1.127 - // when we know that it's dead; if the bitmap is not allocated then 1.128 - // the object is alive. 1.129 - return (dead_map->sizeInBits() == 0) // bit_map has been allocated 1.130 - || !dead_map->par_isMarked((HeapWord*) p); 1.131 - } else { 1.132 - return false; // We can't say for sure if it's live, so we say that it's dead. 1.133 - } 1.134 - } 1.135 } 1.136 return true; 1.137 } 1.138 @@ -2442,7 +2425,7 @@ 1.139 VerifyAllOopsClosure(const CMSCollector* collector, 1.140 const CompactibleFreeListSpace* sp, MemRegion span, 1.141 bool past_remark, CMSBitMap* bit_map) : 1.142 - OopClosure(), _collector(collector), _sp(sp), _span(span), 1.143 + _collector(collector), _sp(sp), _span(span), 1.144 _past_remark(past_remark), _bit_map(bit_map) { } 1.145 1.146 virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); } 1.147 @@ -2478,8 +2461,10 @@ 1.148 VerifyAllOopsClosure cl(_collector, this, span, past_remark, 1.149 _collector->markBitMap()); 1.150 CollectedHeap* ch = Universe::heap(); 1.151 - ch->oop_iterate(&cl); // all oops in generations 1.152 - ch->permanent_oop_iterate(&cl); // all oops in perm gen 1.153 + 1.154 + // Iterate over all oops in the heap. Uses the _no_header version 1.155 + // since we are not interested in following the klass pointers. 1.156 + ch->oop_iterate_no_header(&cl); 1.157 } 1.158 1.159 if (VerifyObjectStartArray) {