Mon, 09 Mar 2009 13:28:46 -0700
6814575: Update copyright year
Summary: Update copyright for files that have been modified in 2009, up to 03/09
Reviewed-by: katleman, tbell, ohair
1 /*
2 * Copyright 2006-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 /*
26 * The NUMA-aware allocator (MutableNUMASpace) is basically a modification
27 * of MutableSpace which preserves interfaces but implements different
28 * functionality. The space is split into chunks for each locality group
29 * (resizing for adaptive size policy is also supported). For each thread
30 * allocations are performed in the chunk corresponding to the home locality
31 * group of the thread. Whenever any chunk fills-in the young generation
32 * collection occurs.
33 * The chunks can be also be adaptively resized. The idea behind the adaptive
34 * sizing is to reduce the loss of the space in the eden due to fragmentation.
35 * The main cause of fragmentation is uneven allocation rates of threads.
36 * The allocation rate difference between locality groups may be caused either by
37 * application specifics or by uneven LWP distribution by the OS. Besides,
38 * application can have less threads then the number of locality groups.
39 * In order to resize the chunk we measure the allocation rate of the
40 * application between collections. After that we reshape the chunks to reflect
41 * the allocation rate pattern. The AdaptiveWeightedAverage exponentially
42 * decaying average is used to smooth the measurements. The NUMASpaceResizeRate
43 * parameter is used to control the adaptation speed by restricting the number of
44 * bytes that can be moved during the adaptation phase.
45 * Chunks may contain pages from a wrong locality group. The page-scanner has
46 * been introduced to address the problem. Remote pages typically appear due to
47 * the memory shortage in the target locality group. Besides Solaris would
48 * allocate a large page from the remote locality group even if there are small
49 * local pages available. The page-scanner scans the pages right after the
50 * collection and frees remote pages in hope that subsequent reallocation would
51 * be more successful. This approach proved to be useful on systems with high
52 * load where multiple processes are competing for the memory.
53 */
55 class MutableNUMASpace : public MutableSpace {
56 friend class VMStructs;
58 class LGRPSpace : public CHeapObj {
59 int _lgrp_id;
60 MutableSpace* _space;
61 MemRegion _invalid_region;
62 AdaptiveWeightedAverage *_alloc_rate;
63 bool _allocation_failed;
65 struct SpaceStats {
66 size_t _local_space, _remote_space, _unbiased_space, _uncommited_space;
67 size_t _large_pages, _small_pages;
69 SpaceStats() {
70 _local_space = 0;
71 _remote_space = 0;
72 _unbiased_space = 0;
73 _uncommited_space = 0;
74 _large_pages = 0;
75 _small_pages = 0;
76 }
77 };
79 SpaceStats _space_stats;
81 char* _last_page_scanned;
82 char* last_page_scanned() { return _last_page_scanned; }
83 void set_last_page_scanned(char* p) { _last_page_scanned = p; }
84 public:
85 LGRPSpace(int l, size_t alignment) : _lgrp_id(l), _last_page_scanned(NULL), _allocation_failed(false) {
86 _space = new MutableSpace(alignment);
87 _alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
88 }
89 ~LGRPSpace() {
90 delete _space;
91 delete _alloc_rate;
92 }
94 void add_invalid_region(MemRegion r) {
95 if (!_invalid_region.is_empty()) {
96 _invalid_region.set_start(MIN2(_invalid_region.start(), r.start()));
97 _invalid_region.set_end(MAX2(_invalid_region.end(), r.end()));
98 } else {
99 _invalid_region = r;
100 }
101 }
103 static bool equals(void* lgrp_id_value, LGRPSpace* p) {
104 return *(int*)lgrp_id_value == p->lgrp_id();
105 }
107 // Report a failed allocation.
108 void set_allocation_failed() { _allocation_failed = true; }
110 void sample() {
111 // If there was a failed allocation make allocation rate equal
112 // to the size of the whole chunk. This ensures the progress of
113 // the adaptation process.
114 size_t alloc_rate_sample;
115 if (_allocation_failed) {
116 alloc_rate_sample = space()->capacity_in_bytes();
117 _allocation_failed = false;
118 } else {
119 alloc_rate_sample = space()->used_in_bytes();
120 }
121 alloc_rate()->sample(alloc_rate_sample);
122 }
124 MemRegion invalid_region() const { return _invalid_region; }
125 void set_invalid_region(MemRegion r) { _invalid_region = r; }
126 int lgrp_id() const { return _lgrp_id; }
127 MutableSpace* space() const { return _space; }
128 AdaptiveWeightedAverage* alloc_rate() const { return _alloc_rate; }
129 void clear_alloc_rate() { _alloc_rate->clear(); }
130 SpaceStats* space_stats() { return &_space_stats; }
131 void clear_space_stats() { _space_stats = SpaceStats(); }
133 void accumulate_statistics(size_t page_size);
134 void scan_pages(size_t page_size, size_t page_count);
135 };
137 GrowableArray<LGRPSpace*>* _lgrp_spaces;
138 size_t _page_size;
139 unsigned _adaptation_cycles, _samples_count;
141 void set_page_size(size_t psz) { _page_size = psz; }
142 size_t page_size() const { return _page_size; }
144 unsigned adaptation_cycles() { return _adaptation_cycles; }
145 void set_adaptation_cycles(int v) { _adaptation_cycles = v; }
147 unsigned samples_count() { return _samples_count; }
148 void increment_samples_count() { ++_samples_count; }
150 size_t _base_space_size;
151 void set_base_space_size(size_t v) { _base_space_size = v; }
152 size_t base_space_size() const { return _base_space_size; }
154 // Check if the NUMA topology has changed. Add and remove spaces if needed.
155 // The update can be forced by setting the force parameter equal to true.
156 bool update_layout(bool force);
157 // Bias region towards the lgrp.
158 void bias_region(MemRegion mr, int lgrp_id);
159 // Free pages in a given region.
160 void free_region(MemRegion mr);
161 // Get current chunk size.
162 size_t current_chunk_size(int i);
163 // Get default chunk size (equally divide the space).
164 size_t default_chunk_size();
165 // Adapt the chunk size to follow the allocation rate.
166 size_t adaptive_chunk_size(int i, size_t limit);
167 // Scan and free invalid pages.
168 void scan_pages(size_t page_count);
169 // Return the bottom_region and the top_region. Align them to page_size() boundary.
170 // |------------------new_region---------------------------------|
171 // |----bottom_region--|---intersection---|------top_region------|
172 void select_tails(MemRegion new_region, MemRegion intersection,
173 MemRegion* bottom_region, MemRegion *top_region);
174 // Try to merge the invalid region with the bottom or top region by decreasing
175 // the intersection area. Return the invalid_region aligned to the page_size()
176 // boundary if it's inside the intersection. Return non-empty invalid_region
177 // if it lies inside the intersection (also page-aligned).
178 // |------------------new_region---------------------------------|
179 // |----------------|-------invalid---|--------------------------|
180 // |----bottom_region--|---intersection---|------top_region------|
181 void merge_regions(MemRegion new_region, MemRegion* intersection,
182 MemRegion *invalid_region);
184 public:
185 GrowableArray<LGRPSpace*>* lgrp_spaces() const { return _lgrp_spaces; }
186 MutableNUMASpace(size_t alignment);
187 virtual ~MutableNUMASpace();
188 // Space initialization.
189 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space, bool setup_pages = SetupPages);
190 // Update space layout if necessary. Do all adaptive resizing job.
191 virtual void update();
192 // Update allocation rate averages.
193 virtual void accumulate_statistics();
195 virtual void clear(bool mangle_space);
196 virtual void mangle_unused_area() PRODUCT_RETURN;
197 virtual void mangle_unused_area_complete() PRODUCT_RETURN;
198 virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
199 virtual void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
200 virtual void check_mangled_unused_area_complete() PRODUCT_RETURN;
201 virtual void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
202 virtual void set_top_for_allocations() PRODUCT_RETURN;
204 virtual void ensure_parsability();
205 virtual size_t used_in_words() const;
206 virtual size_t free_in_words() const;
208 using MutableSpace::capacity_in_words;
209 virtual size_t capacity_in_words(Thread* thr) const;
210 virtual size_t tlab_capacity(Thread* thr) const;
211 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
213 // Allocation (return NULL if full)
214 virtual HeapWord* allocate(size_t word_size);
215 virtual HeapWord* cas_allocate(size_t word_size);
217 // Debugging
218 virtual void print_on(outputStream* st) const;
219 virtual void print_short_on(outputStream* st) const;
220 virtual void verify(bool allow_dirty);
222 virtual void set_top(HeapWord* value);
223 };