1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Aug 27 10:56:33 2008 -0700 1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Aug 27 11:20:46 2008 -0700 1.3 @@ -2761,13 +2761,14 @@ 1.4 public: 1.5 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} 1.6 1.7 - void do_bit(size_t offset) { 1.8 + bool do_bit(size_t offset) { 1.9 HeapWord* addr = _marks->offsetToHeapWord(offset); 1.10 if (!_marks->isMarked(addr)) { 1.11 oop(addr)->print(); 1.12 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); 1.13 _failed = true; 1.14 } 1.15 + return true; 1.16 } 1.17 1.18 bool failed() { return _failed; } 1.19 @@ -4669,8 +4670,11 @@ 1.20 startTimer(); 1.21 sample_eden(); 1.22 // Get and clear dirty region from card table 1.23 - dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean( 1.24 - MemRegion(nextAddr, endAddr)); 1.25 + dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset( 1.26 + MemRegion(nextAddr, endAddr), 1.27 + true, 1.28 + CardTableModRefBS::precleaned_card_val()); 1.29 + 1.30 assert(dirtyRegion.start() >= nextAddr, 1.31 "returned region inconsistent?"); 1.32 } 1.33 @@ -5438,8 +5442,8 @@ 1.34 &mrias_cl); 1.35 { 1.36 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); 1.37 - // Iterate over the dirty cards, marking them precleaned, and 1.38 - // setting the corresponding bits in the mod union table. 1.39 + // Iterate over the dirty cards, setting the corresponding bits in the 1.40 + // mod union table. 1.41 { 1.42 ModUnionClosure modUnionClosure(&_modUnionTable); 1.43 _ct->ct_bs()->dirty_card_iterate( 1.44 @@ -6211,7 +6215,7 @@ 1.45 // bit vector itself. That is done by a separate call CMSBitMap::allocate() 1.46 // further below. 1.47 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name): 1.48 - _bm(NULL,0), 1.49 + _bm(), 1.50 _shifter(shifter), 1.51 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL) 1.52 { 1.53 @@ -6236,7 +6240,7 @@ 1.54 } 1.55 assert(_virtual_space.committed_size() == brs.size(), 1.56 "didn't reserve backing store for all of CMS bit map?"); 1.57 - _bm.set_map((uintptr_t*)_virtual_space.low()); 1.58 + _bm.set_map((BitMap::bm_word_t*)_virtual_space.low()); 1.59 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= 1.60 _bmWordSize, "inconsistency in bit map sizing"); 1.61 _bm.set_size(_bmWordSize >> _shifter); 1.62 @@ -6874,10 +6878,10 @@ 1.63 1.64 // Should revisit to see if this should be restructured for 1.65 // greater efficiency. 1.66 -void MarkFromRootsClosure::do_bit(size_t offset) { 1.67 +bool MarkFromRootsClosure::do_bit(size_t offset) { 1.68 if (_skipBits > 0) { 1.69 _skipBits--; 1.70 - return; 1.71 + return true; 1.72 } 1.73 // convert offset into a HeapWord* 1.74 HeapWord* addr = _bitMap->startWord() + offset; 1.75 @@ -6915,10 +6919,11 @@ 1.76 } // ...else the setting of klass will dirty the card anyway. 1.77 } 1.78 DEBUG_ONLY(}) 1.79 - return; 1.80 + return true; 1.81 } 1.82 } 1.83 scanOopsInOop(addr); 1.84 + return true; 1.85 } 1.86 1.87 // We take a break if we've been at this for a while, 1.88 @@ -7052,10 +7057,10 @@ 1.89 1.90 // Should revisit to see if this should be restructured for 1.91 // greater efficiency. 1.92 -void Par_MarkFromRootsClosure::do_bit(size_t offset) { 1.93 +bool Par_MarkFromRootsClosure::do_bit(size_t offset) { 1.94 if (_skip_bits > 0) { 1.95 _skip_bits--; 1.96 - return; 1.97 + return true; 1.98 } 1.99 // convert offset into a HeapWord* 1.100 HeapWord* addr = _bit_map->startWord() + offset; 1.101 @@ -7070,10 +7075,11 @@ 1.102 if (p->klass_or_null() == NULL || !p->is_parsable()) { 1.103 // in the case of Clean-on-Enter optimization, redirty card 1.104 // and avoid clearing card by increasing the threshold. 1.105 - return; 1.106 + return true; 1.107 } 1.108 } 1.109 scan_oops_in_oop(addr); 1.110 + return true; 1.111 } 1.112 1.113 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { 1.114 @@ -7196,7 +7202,7 @@ 1.115 1.116 // Should revisit to see if this should be restructured for 1.117 // greater efficiency. 1.118 -void MarkFromRootsVerifyClosure::do_bit(size_t offset) { 1.119 +bool MarkFromRootsVerifyClosure::do_bit(size_t offset) { 1.120 // convert offset into a HeapWord* 1.121 HeapWord* addr = _verification_bm->startWord() + offset; 1.122 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(), 1.123 @@ -7224,6 +7230,7 @@ 1.124 new_oop->oop_iterate(&_pam_verify_closure); 1.125 } 1.126 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition"); 1.127 + return true; 1.128 } 1.129 1.130 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( 1.131 @@ -7469,8 +7476,12 @@ 1.132 // Grey object rescan during pre-cleaning and second checkpoint phases -- 1.133 // the non-parallel version (the parallel version appears further below.) 1.134 void PushAndMarkClosure::do_oop(oop obj) { 1.135 - // If _concurrent_precleaning, ignore mark word verification 1.136 - assert(obj->is_oop_or_null(_concurrent_precleaning), 1.137 + // Ignore mark word verification. If during concurrent precleaning, 1.138 + // the object monitor may be locked. If during the checkpoint 1.139 + // phases, the object may already have been reached by a different 1.140 + // path and may be at the end of the global overflow list (so 1.141 + // the mark word may be NULL). 1.142 + assert(obj->is_oop_or_null(true /* ignore mark word */), 1.143 "expected an oop or NULL"); 1.144 HeapWord* addr = (HeapWord*)obj; 1.145 // Check if oop points into the CMS generation