src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

changeset 2071
be3f9c242c9d
parent 1934
e9ff18c4ace7
child 2132
179464550c7d
equal deleted inserted replaced
2068:7fcd5f39bd7a 2071:be3f9c242c9d
1 /* 1 /*
2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
400 } 400 }
401 } 401 }
402 return res; 402 return res;
403 } 403 }
404 404
405 void LinearAllocBlock::print_on(outputStream* st) const {
406 st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
407 ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
408 _ptr, _word_size, _refillSize, _allocation_size_limit);
409 }
410
411 void CompactibleFreeListSpace::print_on(outputStream* st) const {
412 st->print_cr("COMPACTIBLE FREELIST SPACE");
413 st->print_cr(" Space:");
414 Space::print_on(st);
415
416 st->print_cr("promoInfo:");
417 _promoInfo.print_on(st);
418
419 st->print_cr("_smallLinearAllocBlock");
420 _smallLinearAllocBlock.print_on(st);
421
422 // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
423
424 st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
425 _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
426 }
427
405 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st) 428 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
406 const { 429 const {
407 reportIndexedFreeListStatistics(); 430 reportIndexedFreeListStatistics();
408 gclog_or_tty->print_cr("Layout of Indexed Freelists"); 431 gclog_or_tty->print_cr("Layout of Indexed Freelists");
409 gclog_or_tty->print_cr("---------------------------"); 432 gclog_or_tty->print_cr("---------------------------");
555 } 578 }
556 579
557 void CompactibleFreeListSpace::set_end(HeapWord* value) { 580 void CompactibleFreeListSpace::set_end(HeapWord* value) {
558 HeapWord* prevEnd = end(); 581 HeapWord* prevEnd = end();
559 assert(prevEnd != value, "unnecessary set_end call"); 582 assert(prevEnd != value, "unnecessary set_end call");
560 assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block"); 583 assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
584 "New end is below unallocated block");
561 _end = value; 585 _end = value;
562 if (prevEnd != NULL) { 586 if (prevEnd != NULL) {
563 // Resize the underlying block offset table. 587 // Resize the underlying block offset table.
564 _bt.resize(pointer_delta(value, bottom())); 588 _bt.resize(pointer_delta(value, bottom()));
565 if (value <= prevEnd) { 589 if (value <= prevEnd) {
566 assert(value >= unallocated_block(), "New end is below unallocated block"); 590 assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
591 "New end is below unallocated block");
567 } else { 592 } else {
568 // Now, take this new chunk and add it to the free blocks. 593 // Now, take this new chunk and add it to the free blocks.
569 // Note that the BOT has not yet been updated for this block. 594 // Note that the BOT has not yet been updated for this block.
570 size_t newFcSize = pointer_delta(value, prevEnd); 595 size_t newFcSize = pointer_delta(value, prevEnd);
571 // XXX This is REALLY UGLY and should be fixed up. XXX 596 // XXX This is REALLY UGLY and should be fixed up. XXX
936 return _bt.block_start_careful(p); 961 return _bt.block_start_careful(p);
937 } 962 }
938 963
939 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { 964 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
940 NOT_PRODUCT(verify_objects_initialized()); 965 NOT_PRODUCT(verify_objects_initialized());
941 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
942 // This must be volatile, or else there is a danger that the compiler 966 // This must be volatile, or else there is a danger that the compiler
943 // will compile the code below into a sometimes-infinite loop, by keeping 967 // will compile the code below into a sometimes-infinite loop, by keeping
944 // the value read the first time in a register. 968 // the value read the first time in a register.
945 while (true) { 969 while (true) {
946 // We must do this until we get a consistent view of the object. 970 // We must do this until we get a consistent view of the object.
955 } 979 }
956 } else { 980 } else {
957 // must read from what 'p' points to in each loop. 981 // must read from what 'p' points to in each loop.
958 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); 982 klassOop k = ((volatile oopDesc*)p)->klass_or_null();
959 if (k != NULL) { 983 if (k != NULL) {
960 assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop."); 984 assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
961 oop o = (oop)p; 985 oop o = (oop)p;
962 assert(o->is_parsable(), "Should be parsable"); 986 assert(o->is_parsable(), "Should be parsable");
963 assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); 987 assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
964 size_t res = o->size_given_klass(k->klass_part()); 988 size_t res = o->size_given_klass(k->klass_part());
965 res = adjustObjectSize(res); 989 res = adjustObjectSize(res);
1229 // if successful, the above also adjusts block offset table 1253 // if successful, the above also adjusts block offset table
1230 // Note that this call will refill the LinAB to 1254 // Note that this call will refill the LinAB to
1231 // satisfy the request. This is different that 1255 // satisfy the request. This is different that
1232 // evm. 1256 // evm.
1233 // Don't record chunk off a LinAB? smallSplitBirth(size); 1257 // Don't record chunk off a LinAB? smallSplitBirth(size);
1234
1235 } else { 1258 } else {
1236 // Raid the exact free lists larger than size, even if they are not 1259 // Raid the exact free lists larger than size, even if they are not
1237 // overpopulated. 1260 // overpopulated.
1238 res = (HeapWord*) getChunkFromGreater(size); 1261 res = (HeapWord*) getChunkFromGreater(size);
1239 } 1262 }
1447 splitBirth(size); 1470 splitBirth(size);
1448 repairLinearAllocBlock(blk); 1471 repairLinearAllocBlock(blk);
1449 // Update BOT last so that other (parallel) GC threads see a consistent 1472 // Update BOT last so that other (parallel) GC threads see a consistent
1450 // view of the BOT and free blocks. 1473 // view of the BOT and free blocks.
1451 // Above must occur before BOT is updated below. 1474 // Above must occur before BOT is updated below.
1475 OrderAccess::storestore();
1452 _bt.split_block(res, blk_size, size); // adjust block offset table 1476 _bt.split_block(res, blk_size, size); // adjust block offset table
1453 } 1477 }
1454 return res; 1478 return res;
1455 } 1479 }
1456 1480
1475 splitBirth(size); 1499 splitBirth(size);
1476 repairLinearAllocBlock(blk); 1500 repairLinearAllocBlock(blk);
1477 // Update BOT last so that other (parallel) GC threads see a consistent 1501 // Update BOT last so that other (parallel) GC threads see a consistent
1478 // view of the BOT and free blocks. 1502 // view of the BOT and free blocks.
1479 // Above must occur before BOT is updated below. 1503 // Above must occur before BOT is updated below.
1504 OrderAccess::storestore();
1480 _bt.split_block(res, blk_size, size); // adjust block offset table 1505 _bt.split_block(res, blk_size, size); // adjust block offset table
1481 _bt.allocated(res, size); 1506 _bt.allocated(res, size);
1482 } 1507 }
1483 return res; 1508 return res;
1484 } 1509 }
1854 ffc->setSize(rem_size); 1879 ffc->setSize(rem_size);
1855 ffc->linkNext(NULL); 1880 ffc->linkNext(NULL);
1856 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. 1881 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
1857 // Above must occur before BOT is updated below. 1882 // Above must occur before BOT is updated below.
1858 // adjust block offset table 1883 // adjust block offset table
1884 OrderAccess::storestore();
1885 assert(chunk->isFree() && ffc->isFree(), "Error");
1859 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); 1886 _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1860 if (rem_size < SmallForDictionary) { 1887 if (rem_size < SmallForDictionary) {
1861 bool is_par = (SharedHeap::heap()->n_par_threads() > 0); 1888 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1862 if (is_par) _indexedFreeListParLocks[rem_size]->lock(); 1889 if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1863 returnChunkToFreeList(ffc); 1890 returnChunkToFreeList(ffc);
1909 1936
1910 void CompactibleFreeListSpace::save_marks() { 1937 void CompactibleFreeListSpace::save_marks() {
1911 // mark the "end" of the used space at the time of this call; 1938 // mark the "end" of the used space at the time of this call;
1912 // note, however, that promoted objects from this point 1939 // note, however, that promoted objects from this point
1913 // on are tracked in the _promoInfo below. 1940 // on are tracked in the _promoInfo below.
1914 set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? 1941 set_saved_mark_word(unallocated_block());
1915 unallocated_block() : end());
1916 // inform allocator that promotions should be tracked. 1942 // inform allocator that promotions should be tracked.
1917 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); 1943 assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1918 _promoInfo.startTrackingPromotions(); 1944 _promoInfo.startTrackingPromotions();
1919 } 1945 }
1920 1946
2236 splitBirth(to1); 2262 splitBirth(to1);
2237 splitBirth(to2); 2263 splitBirth(to2);
2238 } 2264 }
2239 2265
2240 void CompactibleFreeListSpace::print() const { 2266 void CompactibleFreeListSpace::print() const {
2241 tty->print(" CompactibleFreeListSpace"); 2267 Space::print_on(tty);
2242 Space::print();
2243 } 2268 }
2244 2269
2245 void CompactibleFreeListSpace::prepare_for_verify() { 2270 void CompactibleFreeListSpace::prepare_for_verify() {
2246 assert_locked(); 2271 assert_locked();
2247 repairLinearAllocationBlocks(); 2272 repairLinearAllocationBlocks();
2251 2276
2252 class VerifyAllBlksClosure: public BlkClosure { 2277 class VerifyAllBlksClosure: public BlkClosure {
2253 private: 2278 private:
2254 const CompactibleFreeListSpace* _sp; 2279 const CompactibleFreeListSpace* _sp;
2255 const MemRegion _span; 2280 const MemRegion _span;
2281 HeapWord* _last_addr;
2282 size_t _last_size;
2283 bool _last_was_obj;
2284 bool _last_was_live;
2256 2285
2257 public: 2286 public:
2258 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, 2287 VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2259 MemRegion span) : _sp(sp), _span(span) { } 2288 MemRegion span) : _sp(sp), _span(span),
2289 _last_addr(NULL), _last_size(0),
2290 _last_was_obj(false), _last_was_live(false) { }
2260 2291
2261 virtual size_t do_blk(HeapWord* addr) { 2292 virtual size_t do_blk(HeapWord* addr) {
2262 size_t res; 2293 size_t res;
2294 bool was_obj = false;
2295 bool was_live = false;
2263 if (_sp->block_is_obj(addr)) { 2296 if (_sp->block_is_obj(addr)) {
2297 was_obj = true;
2264 oop p = oop(addr); 2298 oop p = oop(addr);
2265 guarantee(p->is_oop(), "Should be an oop"); 2299 guarantee(p->is_oop(), "Should be an oop");
2266 res = _sp->adjustObjectSize(p->size()); 2300 res = _sp->adjustObjectSize(p->size());
2267 if (_sp->obj_is_alive(addr)) { 2301 if (_sp->obj_is_alive(addr)) {
2302 was_live = true;
2268 p->verify(); 2303 p->verify();
2269 } 2304 }
2270 } else { 2305 } else {
2271 FreeChunk* fc = (FreeChunk*)addr; 2306 FreeChunk* fc = (FreeChunk*)addr;
2272 res = fc->size(); 2307 res = fc->size();
2273 if (FLSVerifyLists && !fc->cantCoalesce()) { 2308 if (FLSVerifyLists && !fc->cantCoalesce()) {
2274 guarantee(_sp->verifyChunkInFreeLists(fc), 2309 guarantee(_sp->verifyChunkInFreeLists(fc),
2275 "Chunk should be on a free list"); 2310 "Chunk should be on a free list");
2276 } 2311 }
2277 } 2312 }
2278 guarantee(res != 0, "Livelock: no rank reduction!"); 2313 if (res == 0) {
2314 gclog_or_tty->print_cr("Livelock: no rank reduction!");
2315 gclog_or_tty->print_cr(
2316 " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2317 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2318 addr, res, was_obj ?"true":"false", was_live ?"true":"false",
2319 _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2320 _sp->print_on(gclog_or_tty);
2321 guarantee(false, "Seppuku!");
2322 }
2323 _last_addr = addr;
2324 _last_size = res;
2325 _last_was_obj = was_obj;
2326 _last_was_live = was_live;
2279 return res; 2327 return res;
2280 } 2328 }
2281 }; 2329 };
2282 2330
2283 class VerifyAllOopsClosure: public OopClosure { 2331 class VerifyAllOopsClosure: public OopClosure {
2519 } 2567 }
2520 } 2568 }
2521 2569
2522 HeapWord* CFLS_LAB::alloc(size_t word_sz) { 2570 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2523 FreeChunk* res; 2571 FreeChunk* res;
2524 word_sz = _cfls->adjustObjectSize(word_sz); 2572 guarantee(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2525 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { 2573 if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
2526 // This locking manages sync with other large object allocations. 2574 // This locking manages sync with other large object allocations.
2527 MutexLockerEx x(_cfls->parDictionaryAllocLock(), 2575 MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2528 Mutex::_no_safepoint_check_flag); 2576 Mutex::_no_safepoint_check_flag);
2529 res = _cfls->getChunkFromDictionaryExact(word_sz); 2577 res = _cfls->getChunkFromDictionaryExact(word_sz);
2665 size_t cur_sz; 2713 size_t cur_sz;
2666 for (k = 1, cur_sz = k * word_sz, found = false; 2714 for (k = 1, cur_sz = k * word_sz, found = false;
2667 (cur_sz < CompactibleFreeListSpace::IndexSetSize) && 2715 (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2668 (CMSSplitIndexedFreeListBlocks || k <= 1); 2716 (CMSSplitIndexedFreeListBlocks || k <= 1);
2669 k++, cur_sz = k * word_sz) { 2717 k++, cur_sz = k * word_sz) {
2670 FreeList* gfl = &_indexedFreeList[cur_sz];
2671 FreeList fl_for_cur_sz; // Empty. 2718 FreeList fl_for_cur_sz; // Empty.
2672 fl_for_cur_sz.set_size(cur_sz); 2719 fl_for_cur_sz.set_size(cur_sz);
2673 { 2720 {
2674 MutexLockerEx x(_indexedFreeListParLocks[cur_sz], 2721 MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2675 Mutex::_no_safepoint_check_flag); 2722 Mutex::_no_safepoint_check_flag);
2723 FreeList* gfl = &_indexedFreeList[cur_sz];
2676 if (gfl->count() != 0) { 2724 if (gfl->count() != 0) {
2677 // nn is the number of chunks of size cur_sz that 2725 // nn is the number of chunks of size cur_sz that
2678 // we'd need to split k-ways each, in order to create 2726 // we'd need to split k-ways each, in order to create
2679 // "n" chunks of size word_sz each. 2727 // "n" chunks of size word_sz each.
2680 const size_t nn = MAX2(n/k, (size_t)1); 2728 const size_t nn = MAX2(n/k, (size_t)1);
2683 if (k > 1) { 2731 if (k > 1) {
2684 // Update split death stats for the cur_sz-size blocks list: 2732 // Update split death stats for the cur_sz-size blocks list:
2685 // we increment the split death count by the number of blocks 2733 // we increment the split death count by the number of blocks
2686 // we just took from the cur_sz-size blocks list and which 2734 // we just took from the cur_sz-size blocks list and which
2687 // we will be splitting below. 2735 // we will be splitting below.
2688 ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() + 2736 ssize_t deaths = gfl->splitDeaths() +
2689 fl_for_cur_sz.count(); 2737 fl_for_cur_sz.count();
2690 _indexedFreeList[cur_sz].set_splitDeaths(deaths); 2738 gfl->set_splitDeaths(deaths);
2691 } 2739 }
2692 } 2740 }
2693 } 2741 }
2694 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1. 2742 // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
2695 if (found) { 2743 if (found) {
2701 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) { 2749 while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
2702 // Must do this in reverse order, so that anybody attempting to 2750 // Must do this in reverse order, so that anybody attempting to
2703 // access the main chunk sees it as a single free block until we 2751 // access the main chunk sees it as a single free block until we
2704 // change it. 2752 // change it.
2705 size_t fc_size = fc->size(); 2753 size_t fc_size = fc->size();
2754 assert(fc->isFree(), "Error");
2706 for (int i = k-1; i >= 0; i--) { 2755 for (int i = k-1; i >= 0; i--) {
2707 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); 2756 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2757 assert((i != 0) ||
2758 ((fc == ffc) && ffc->isFree() &&
2759 (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2760 "Counting error");
2708 ffc->setSize(word_sz); 2761 ffc->setSize(word_sz);
2762 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2709 ffc->linkNext(NULL); 2763 ffc->linkNext(NULL);
2710 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2711 // Above must occur before BOT is updated below. 2764 // Above must occur before BOT is updated below.
2712 // splitting from the right, fc_size == (k - i + 1) * wordsize 2765 OrderAccess::storestore();
2713 _bt.mark_block((HeapWord*)ffc, word_sz); 2766 // splitting from the right, fc_size == i * word_sz
2767 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2714 fc_size -= word_sz; 2768 fc_size -= word_sz;
2715 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); 2769 assert(fc_size == i*word_sz, "Error");
2770 _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2716 _bt.verify_single_block((HeapWord*)fc, fc_size); 2771 _bt.verify_single_block((HeapWord*)fc, fc_size);
2717 _bt.verify_single_block((HeapWord*)ffc, ffc->size()); 2772 _bt.verify_single_block((HeapWord*)ffc, word_sz);
2718 // Push this on "fl". 2773 // Push this on "fl".
2719 fl->returnChunkAtHead(ffc); 2774 fl->returnChunkAtHead(ffc);
2720 } 2775 }
2721 // TRAP 2776 // TRAP
2722 assert(fl->tail()->next() == NULL, "List invariant."); 2777 assert(fl->tail()->next() == NULL, "List invariant.");
2742 while (n > 0) { 2797 while (n > 0) {
2743 fc = dictionary()->getChunk(MAX2(n * word_sz, 2798 fc = dictionary()->getChunk(MAX2(n * word_sz,
2744 _dictionary->minSize()), 2799 _dictionary->minSize()),
2745 FreeBlockDictionary::atLeast); 2800 FreeBlockDictionary::atLeast);
2746 if (fc != NULL) { 2801 if (fc != NULL) {
2747 _bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk 2802 _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
2748 dictionary()->dictCensusUpdate(fc->size(), 2803 dictionary()->dictCensusUpdate(fc->size(),
2749 true /*split*/, 2804 true /*split*/,
2750 false /*birth*/); 2805 false /*birth*/);
2751 break; 2806 break;
2752 } else { 2807 } else {
2753 n--; 2808 n--;
2754 } 2809 }
2755 } 2810 }
2756 if (fc == NULL) return; 2811 if (fc == NULL) return;
2812 // Otherwise, split up that block.
2757 assert((ssize_t)n >= 1, "Control point invariant"); 2813 assert((ssize_t)n >= 1, "Control point invariant");
2758 // Otherwise, split up that block. 2814 assert(fc->isFree(), "Error: should be a free block");
2815 _bt.verify_single_block((HeapWord*)fc, fc->size());
2759 const size_t nn = fc->size() / word_sz; 2816 const size_t nn = fc->size() / word_sz;
2760 n = MIN2(nn, n); 2817 n = MIN2(nn, n);
2761 assert((ssize_t)n >= 1, "Control point invariant"); 2818 assert((ssize_t)n >= 1, "Control point invariant");
2762 rem = fc->size() - n * word_sz; 2819 rem = fc->size() - n * word_sz;
2763 // If there is a remainder, and it's too small, allocate one fewer. 2820 // If there is a remainder, and it's too small, allocate one fewer.
2771 // enough to leave a viable remainder. We are unable to 2828 // enough to leave a viable remainder. We are unable to
2772 // allocate even one block. Return fc to the 2829 // allocate even one block. Return fc to the
2773 // dictionary and return, leaving "fl" empty. 2830 // dictionary and return, leaving "fl" empty.
2774 if (n == 0) { 2831 if (n == 0) {
2775 returnChunkToDictionary(fc); 2832 returnChunkToDictionary(fc);
2833 assert(fl->count() == 0, "We never allocated any blocks");
2776 return; 2834 return;
2777 } 2835 }
2778 2836
2779 // First return the remainder, if any. 2837 // First return the remainder, if any.
2780 // Note that we hold the lock until we decide if we're going to give 2838 // Note that we hold the lock until we decide if we're going to give
2783 // hit if the block is a small block.) 2841 // hit if the block is a small block.)
2784 if (rem > 0) { 2842 if (rem > 0) {
2785 size_t prefix_size = n * word_sz; 2843 size_t prefix_size = n * word_sz;
2786 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); 2844 rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2787 rem_fc->setSize(rem); 2845 rem_fc->setSize(rem);
2846 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2788 rem_fc->linkNext(NULL); 2847 rem_fc->linkNext(NULL);
2789 rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2790 // Above must occur before BOT is updated below. 2848 // Above must occur before BOT is updated below.
2791 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error"); 2849 assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2850 OrderAccess::storestore();
2792 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size); 2851 _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2852 assert(fc->isFree(), "Error");
2853 fc->setSize(prefix_size);
2793 if (rem >= IndexSetSize) { 2854 if (rem >= IndexSetSize) {
2794 returnChunkToDictionary(rem_fc); 2855 returnChunkToDictionary(rem_fc);
2795 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/); 2856 dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
2796 rem_fc = NULL; 2857 rem_fc = NULL;
2797 } 2858 }
2813 size_t fc_size = n * word_sz; 2874 size_t fc_size = n * word_sz;
2814 // All but first chunk in this loop 2875 // All but first chunk in this loop
2815 for (ssize_t i = n-1; i > 0; i--) { 2876 for (ssize_t i = n-1; i > 0; i--) {
2816 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); 2877 FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2817 ffc->setSize(word_sz); 2878 ffc->setSize(word_sz);
2879 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2818 ffc->linkNext(NULL); 2880 ffc->linkNext(NULL);
2819 ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2820 // Above must occur before BOT is updated below. 2881 // Above must occur before BOT is updated below.
2882 OrderAccess::storestore();
2821 // splitting from the right, fc_size == (n - i + 1) * wordsize 2883 // splitting from the right, fc_size == (n - i + 1) * wordsize
2822 _bt.mark_block((HeapWord*)ffc, word_sz); 2884 _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2823 fc_size -= word_sz; 2885 fc_size -= word_sz;
2824 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); 2886 _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2825 _bt.verify_single_block((HeapWord*)ffc, ffc->size()); 2887 _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2826 _bt.verify_single_block((HeapWord*)fc, fc_size); 2888 _bt.verify_single_block((HeapWord*)fc, fc_size);
2827 // Push this on "fl". 2889 // Push this on "fl".
2828 fl->returnChunkAtHead(ffc); 2890 fl->returnChunkAtHead(ffc);
2829 } 2891 }
2830 // First chunk 2892 // First chunk
2893 assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
2894 // The blocks above should show their new sizes before the first block below
2831 fc->setSize(word_sz); 2895 fc->setSize(word_sz);
2896 fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above
2832 fc->linkNext(NULL); 2897 fc->linkNext(NULL);
2833 fc->linkPrev(NULL);
2834 _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); 2898 _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2835 _bt.verify_single_block((HeapWord*)fc, fc->size()); 2899 _bt.verify_single_block((HeapWord*)fc, fc->size());
2836 fl->returnChunkAtHead(fc); 2900 fl->returnChunkAtHead(fc);
2837 2901
2838 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks"); 2902 assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");

mercurial