1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Sat Aug 14 00:47:52 2010 -0700 1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Mon Aug 16 15:58:42 2010 -0700 1.3 @@ -1,5 +1,5 @@ 1.4 /* 1.5 - * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved. 1.6 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 1.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.8 * 1.9 * This code is free software; you can redistribute it and/or modify it 1.10 @@ -402,6 +402,29 @@ 1.11 return res; 1.12 } 1.13 1.14 +void LinearAllocBlock::print_on(outputStream* st) const { 1.15 + st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT 1.16 + ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT, 1.17 + _ptr, _word_size, _refillSize, _allocation_size_limit); 1.18 +} 1.19 + 1.20 +void CompactibleFreeListSpace::print_on(outputStream* st) const { 1.21 + st->print_cr("COMPACTIBLE FREELIST SPACE"); 1.22 + st->print_cr(" Space:"); 1.23 + Space::print_on(st); 1.24 + 1.25 + st->print_cr("promoInfo:"); 1.26 + _promoInfo.print_on(st); 1.27 + 1.28 + st->print_cr("_smallLinearAllocBlock"); 1.29 + _smallLinearAllocBlock.print_on(st); 1.30 + 1.31 + // dump_memory_block(_smallLinearAllocBlock->_ptr, 128); 1.32 + 1.33 + st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s", 1.34 + _fitStrategy?"true":"false", _adaptive_freelists?"true":"false"); 1.35 +} 1.36 + 1.37 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st) 1.38 const { 1.39 reportIndexedFreeListStatistics(); 1.40 @@ -557,13 +580,15 @@ 1.41 void CompactibleFreeListSpace::set_end(HeapWord* value) { 1.42 HeapWord* prevEnd = end(); 1.43 assert(prevEnd != value, "unnecessary set_end call"); 1.44 - assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block"); 1.45 + assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), 1.46 + "New end is below unallocated block"); 1.47 _end = value; 1.48 if (prevEnd != NULL) { 1.49 // Resize the underlying block offset table. 1.50 _bt.resize(pointer_delta(value, bottom())); 1.51 if (value <= prevEnd) { 1.52 - assert(value >= unallocated_block(), "New end is below unallocated block"); 1.53 + assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), 1.54 + "New end is below unallocated block"); 1.55 } else { 1.56 // Now, take this new chunk and add it to the free blocks. 1.57 // Note that the BOT has not yet been updated for this block. 1.58 @@ -938,7 +963,6 @@ 1.59 1.60 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { 1.61 NOT_PRODUCT(verify_objects_initialized()); 1.62 - assert(MemRegion(bottom(), end()).contains(p), "p not in space"); 1.63 // This must be volatile, or else there is a danger that the compiler 1.64 // will compile the code below into a sometimes-infinite loop, by keeping 1.65 // the value read the first time in a register. 1.66 @@ -957,7 +981,7 @@ 1.67 // must read from what 'p' points to in each loop. 1.68 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); 1.69 if (k != NULL) { 1.70 - assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop."); 1.71 + assert(k->is_oop(true /* ignore mark word */), "Should be klass oop"); 1.72 oop o = (oop)p; 1.73 assert(o->is_parsable(), "Should be parsable"); 1.74 assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); 1.75 @@ -1231,7 +1255,6 @@ 1.76 // satisfy the request. This is different that 1.77 // evm. 1.78 // Don't record chunk off a LinAB? smallSplitBirth(size); 1.79 - 1.80 } else { 1.81 // Raid the exact free lists larger than size, even if they are not 1.82 // overpopulated. 1.83 @@ -1449,6 +1472,7 @@ 1.84 // Update BOT last so that other (parallel) GC threads see a consistent 1.85 // view of the BOT and free blocks. 1.86 // Above must occur before BOT is updated below. 1.87 + OrderAccess::storestore(); 1.88 _bt.split_block(res, blk_size, size); // adjust block offset table 1.89 } 1.90 return res; 1.91 @@ -1477,6 +1501,7 @@ 1.92 // Update BOT last so that other (parallel) GC threads see a consistent 1.93 // view of the BOT and free blocks. 1.94 // Above must occur before BOT is updated below. 1.95 + OrderAccess::storestore(); 1.96 _bt.split_block(res, blk_size, size); // adjust block offset table 1.97 _bt.allocated(res, size); 1.98 } 1.99 @@ -1856,6 +1881,8 @@ 1.100 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. 1.101 // Above must occur before BOT is updated below. 1.102 // adjust block offset table 1.103 + OrderAccess::storestore(); 1.104 + assert(chunk->isFree() && ffc->isFree(), "Error"); 1.105 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); 1.106 if (rem_size < SmallForDictionary) { 1.107 bool is_par = (SharedHeap::heap()->n_par_threads() > 0); 1.108 @@ -1911,8 +1938,7 @@ 1.109 // mark the "end" of the used space at the time of this call; 1.110 // note, however, that promoted objects from this point 1.111 // on are tracked in the _promoInfo below. 1.112 - set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? 1.113 - unallocated_block() : end()); 1.114 + set_saved_mark_word(unallocated_block()); 1.115 // inform allocator that promotions should be tracked. 1.116 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); 1.117 _promoInfo.startTrackingPromotions(); 1.118 @@ -2238,8 +2264,7 @@ 1.119 } 1.120 1.121 void CompactibleFreeListSpace::print() const { 1.122 - tty->print(" CompactibleFreeListSpace"); 1.123 - Space::print(); 1.124 + Space::print_on(tty); 1.125 } 1.126 1.127 void CompactibleFreeListSpace::prepare_for_verify() { 1.128 @@ -2253,18 +2278,28 @@ 1.129 private: 1.130 const CompactibleFreeListSpace* _sp; 1.131 const MemRegion _span; 1.132 + HeapWord* _last_addr; 1.133 + size_t _last_size; 1.134 + bool _last_was_obj; 1.135 + bool _last_was_live; 1.136 1.137 public: 1.138 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, 1.139 - MemRegion span) : _sp(sp), _span(span) { } 1.140 + MemRegion span) : _sp(sp), _span(span), 1.141 + _last_addr(NULL), _last_size(0), 1.142 + _last_was_obj(false), _last_was_live(false) { } 1.143 1.144 virtual size_t do_blk(HeapWord* addr) { 1.145 size_t res; 1.146 + bool was_obj = false; 1.147 + bool was_live = false; 1.148 if (_sp->block_is_obj(addr)) { 1.149 + was_obj = true; 1.150 oop p = oop(addr); 1.151 guarantee(p->is_oop(), "Should be an oop"); 1.152 res = _sp->adjustObjectSize(p->size()); 1.153 if (_sp->obj_is_alive(addr)) { 1.154 + was_live = true; 1.155 p->verify(); 1.156 } 1.157 } else { 1.158 @@ -2275,7 +2310,20 @@ 1.159 "Chunk should be on a free list"); 1.160 } 1.161 } 1.162 - guarantee(res != 0, "Livelock: no rank reduction!"); 1.163 + if (res == 0) { 1.164 + gclog_or_tty->print_cr("Livelock: no rank reduction!"); 1.165 + gclog_or_tty->print_cr( 1.166 + " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n" 1.167 + " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n", 1.168 + addr, res, was_obj ?"true":"false", was_live ?"true":"false", 1.169 + _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false"); 1.170 + _sp->print_on(gclog_or_tty); 1.171 + guarantee(false, "Seppuku!"); 1.172 + } 1.173 + _last_addr = addr; 1.174 + _last_size = res; 1.175 + _last_was_obj = was_obj; 1.176 + _last_was_live = was_live; 1.177 return res; 1.178 } 1.179 }; 1.180 @@ -2521,7 +2569,7 @@ 1.181 1.182 HeapWord* CFLS_LAB::alloc(size_t word_sz) { 1.183 FreeChunk* res; 1.184 - word_sz = _cfls->adjustObjectSize(word_sz); 1.185 + guarantee(word_sz == _cfls->adjustObjectSize(word_sz), "Error"); 1.186 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { 1.187 // This locking manages sync with other large object allocations. 1.188 MutexLockerEx x(_cfls->parDictionaryAllocLock(), 1.189 @@ -2667,12 +2715,12 @@ 1.190 (cur_sz < CompactibleFreeListSpace::IndexSetSize) && 1.191 (CMSSplitIndexedFreeListBlocks || k <= 1); 1.192 k++, cur_sz = k * word_sz) { 1.193 - FreeList* gfl = &_indexedFreeList[cur_sz]; 1.194 FreeList fl_for_cur_sz; // Empty. 1.195 fl_for_cur_sz.set_size(cur_sz); 1.196 { 1.197 MutexLockerEx x(_indexedFreeListParLocks[cur_sz], 1.198 Mutex::_no_safepoint_check_flag); 1.199 + FreeList* gfl = &_indexedFreeList[cur_sz]; 1.200 if (gfl->count() != 0) { 1.201 // nn is the number of chunks of size cur_sz that 1.202 // we'd need to split k-ways each, in order to create 1.203 @@ -2685,9 +2733,9 @@ 1.204 // we increment the split death count by the number of blocks 1.205 // we just took from the cur_sz-size blocks list and which 1.206 // we will be splitting below. 1.207 - ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() + 1.208 + ssize_t deaths = gfl->splitDeaths() + 1.209 fl_for_cur_sz.count(); 1.210 - _indexedFreeList[cur_sz].set_splitDeaths(deaths); 1.211 + gfl->set_splitDeaths(deaths); 1.212 } 1.213 } 1.214 } 1.215 @@ -2703,18 +2751,25 @@ 1.216 // access the main chunk sees it as a single free block until we 1.217 // change it. 1.218 size_t fc_size = fc->size(); 1.219 + assert(fc->isFree(), "Error"); 1.220 for (int i = k-1; i >= 0; i--) { 1.221 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); 1.222 + assert((i != 0) || 1.223 + ((fc == ffc) && ffc->isFree() && 1.224 + (ffc->size() == k*word_sz) && (fc_size == word_sz)), 1.225 + "Counting error"); 1.226 ffc->setSize(word_sz); 1.227 + ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. 1.228 ffc->linkNext(NULL); 1.229 - ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. 1.230 // Above must occur before BOT is updated below. 1.231 - // splitting from the right, fc_size == (k - i + 1) * wordsize 1.232 - _bt.mark_block((HeapWord*)ffc, word_sz); 1.233 + OrderAccess::storestore(); 1.234 + // splitting from the right, fc_size == i * word_sz 1.235 + _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); 1.236 fc_size -= word_sz; 1.237 - _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); 1.238 + assert(fc_size == i*word_sz, "Error"); 1.239 + _bt.verify_not_unallocated((HeapWord*)ffc, word_sz); 1.240 _bt.verify_single_block((HeapWord*)fc, fc_size); 1.241 - _bt.verify_single_block((HeapWord*)ffc, ffc->size()); 1.242 + _bt.verify_single_block((HeapWord*)ffc, word_sz); 1.243 // Push this on "fl". 1.244 fl->returnChunkAtHead(ffc); 1.245 } 1.246 @@ -2744,7 +2799,7 @@ 1.247 _dictionary->minSize()), 1.248 FreeBlockDictionary::atLeast); 1.249 if (fc != NULL) { 1.250 - _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk 1.251 + _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk 1.252 dictionary()->dictCensusUpdate(fc->size(), 1.253 true /*split*/, 1.254 false /*birth*/); 1.255 @@ -2754,8 +2809,10 @@ 1.256 } 1.257 } 1.258 if (fc == NULL) return; 1.259 + // Otherwise, split up that block. 1.260 assert((ssize_t)n >= 1, "Control point invariant"); 1.261 - // Otherwise, split up that block. 1.262 + assert(fc->isFree(), "Error: should be a free block"); 1.263 + _bt.verify_single_block((HeapWord*)fc, fc->size()); 1.264 const size_t nn = fc->size() / word_sz; 1.265 n = MIN2(nn, n); 1.266 assert((ssize_t)n >= 1, "Control point invariant"); 1.267 @@ -2773,6 +2830,7 @@ 1.268 // dictionary and return, leaving "fl" empty. 1.269 if (n == 0) { 1.270 returnChunkToDictionary(fc); 1.271 + assert(fl->count() == 0, "We never allocated any blocks"); 1.272 return; 1.273 } 1.274 1.275 @@ -2785,11 +2843,14 @@ 1.276 size_t prefix_size = n * word_sz; 1.277 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); 1.278 rem_fc->setSize(rem); 1.279 + rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. 1.280 rem_fc->linkNext(NULL); 1.281 - rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. 1.282 // Above must occur before BOT is updated below. 1.283 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error"); 1.284 + OrderAccess::storestore(); 1.285 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size); 1.286 + assert(fc->isFree(), "Error"); 1.287 + fc->setSize(prefix_size); 1.288 if (rem >= IndexSetSize) { 1.289 returnChunkToDictionary(rem_fc); 1.290 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/); 1.291 @@ -2815,11 +2876,12 @@ 1.292 for (ssize_t i = n-1; i > 0; i--) { 1.293 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); 1.294 ffc->setSize(word_sz); 1.295 + ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. 1.296 ffc->linkNext(NULL); 1.297 - ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. 1.298 // Above must occur before BOT is updated below. 1.299 + OrderAccess::storestore(); 1.300 // splitting from the right, fc_size == (n - i + 1) * wordsize 1.301 - _bt.mark_block((HeapWord*)ffc, word_sz); 1.302 + _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); 1.303 fc_size -= word_sz; 1.304 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); 1.305 _bt.verify_single_block((HeapWord*)ffc, ffc->size()); 1.306 @@ -2828,9 +2890,11 @@ 1.307 fl->returnChunkAtHead(ffc); 1.308 } 1.309 // First chunk 1.310 + assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block"); 1.311 + // The blocks above should show their new sizes before the first block below 1.312 fc->setSize(word_sz); 1.313 + fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above 1.314 fc->linkNext(NULL); 1.315 - fc->linkPrev(NULL); 1.316 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); 1.317 _bt.verify_single_block((HeapWord*)fc, fc->size()); 1.318 fl->returnChunkAtHead(fc);