Wed, 23 Dec 2009 09:23:54 -0800
6631166: CMS: better heuristics when combatting fragmentation
Summary: Autonomic per-worker free block cache sizing, tunable coalition policies, fixes to per-size block statistics, retuned gain and bandwidth of some feedback loop filters to allow quicker reactivity to abrupt changes in ambient demand, and other heuristics to reduce fragmentation of the CMS old gen. Also tightened some assertions, including those related to locking.
Reviewed-by: jmasa
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 //
26 // Free block maintenance for Concurrent Mark Sweep Generation
27 //
28 // The main data structure for free blocks are
29 // . an indexed array of small free blocks, and
30 // . a dictionary of large free blocks
31 //
33 // No virtuals in FreeChunk (don't want any vtables).
35 // A FreeChunk is merely a chunk that can be in a doubly linked list
36 // and has a size field. NOTE: FreeChunks are distinguished from allocated
37 // objects in two ways (by the sweeper), depending on whether the VM is 32 or
38 // 64 bits.
39 // In 32 bits or 64 bits without CompressedOops, the second word (prev) has the
40 // LSB set to indicate a free chunk; allocated objects' klass() pointers
41 // don't have their LSB set. The corresponding bit in the CMSBitMap is
42 // set when the chunk is allocated. There are also blocks that "look free"
43 // but are not part of the free list and should not be coalesced into larger
44 // free blocks. These free blocks have their two LSB's set.
46 class FreeChunk VALUE_OBJ_CLASS_SPEC {
47 friend class VMStructs;
48 // For 64 bit compressed oops, the markOop encodes both the size and the
49 // indication that this is a FreeChunk and not an object.
50 volatile size_t _size;
51 FreeChunk* _prev;
52 FreeChunk* _next;
54 markOop mark() const volatile { return (markOop)_size; }
55 void set_mark(markOop m) { _size = (size_t)m; }
57 public:
58 NOT_PRODUCT(static const size_t header_size();)
60 // Returns "true" if the address indicates that the block represents
61 // a free chunk.
62 static bool indicatesFreeChunk(const HeapWord* addr) {
63 // Force volatile read from addr because value might change between
64 // calls. We really want the read of _mark and _prev from this pointer
65 // to be volatile but making the fields volatile causes all sorts of
66 // compilation errors.
67 return ((volatile FreeChunk*)addr)->isFree();
68 }
70 bool isFree() const volatile {
71 LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
72 return (((intptr_t)_prev) & 0x1) == 0x1;
73 }
74 bool cantCoalesce() const {
75 assert(isFree(), "can't get coalesce bit on not free");
76 return (((intptr_t)_prev) & 0x2) == 0x2;
77 }
78 void dontCoalesce() {
79 // the block should be free
80 assert(isFree(), "Should look like a free block");
81 _prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
82 }
83 FreeChunk* prev() const {
84 return (FreeChunk*)(((intptr_t)_prev) & ~(0x3));
85 }
87 debug_only(void* prev_addr() const { return (void*)&_prev; })
88 debug_only(void* next_addr() const { return (void*)&_next; })
89 debug_only(void* size_addr() const { return (void*)&_size; })
91 size_t size() const volatile {
92 LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
93 return _size;
94 }
95 void setSize(size_t sz) {
96 LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
97 _size = sz;
98 }
100 FreeChunk* next() const { return _next; }
102 void linkAfter(FreeChunk* ptr) {
103 linkNext(ptr);
104 if (ptr != NULL) ptr->linkPrev(this);
105 }
106 void linkAfterNonNull(FreeChunk* ptr) {
107 assert(ptr != NULL, "precondition violation");
108 linkNext(ptr);
109 ptr->linkPrev(this);
110 }
111 void linkNext(FreeChunk* ptr) { _next = ptr; }
112 void linkPrev(FreeChunk* ptr) {
113 LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
114 _prev = (FreeChunk*)((intptr_t)ptr | 0x1);
115 }
116 void clearPrev() { _prev = NULL; }
117 void clearNext() { _next = NULL; }
118 void markNotFree() {
119 LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::prototype());)
120 // Also set _prev to null
121 _prev = NULL;
122 }
124 // Return the address past the end of this chunk
125 HeapWord* end() const { return ((HeapWord*) this) + size(); }
127 // debugging
128 void verify() const PRODUCT_RETURN;
129 void verifyList() const PRODUCT_RETURN;
130 void mangleAllocated(size_t size) PRODUCT_RETURN;
131 void mangleFreed(size_t size) PRODUCT_RETURN;
133 void print_on(outputStream* st);
134 };
136 // Alignment helpers etc.
137 #define numQuanta(x,y) ((x+y-1)/y)
138 enum AlignmentConstants {
139 MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
140 };