Fri, 10 Oct 2014 15:51:58 +0200
8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/heapRegion.hpp"
27 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
28 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/concurrentG1Refine.hpp"
31 #include "memory/allocation.hpp"
33 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
34 G1RegionToSpaceMapper* prev_bitmap,
35 G1RegionToSpaceMapper* next_bitmap,
36 G1RegionToSpaceMapper* bot,
37 G1RegionToSpaceMapper* cardtable,
38 G1RegionToSpaceMapper* card_counts) {
39 _allocated_heapregions_length = 0;
41 _heap_mapper = heap_storage;
43 _prev_bitmap_mapper = prev_bitmap;
44 _next_bitmap_mapper = next_bitmap;
46 _bot_mapper = bot;
47 _cardtable_mapper = cardtable;
49 _card_counts_mapper = card_counts;
51 MemRegion reserved = heap_storage->reserved();
52 _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
54 _available_map.resize(_regions.length(), false);
55 _available_map.clear();
56 }
58 bool HeapRegionManager::is_available(uint region) const {
59 return _available_map.at(region);
60 }
62 #ifdef ASSERT
63 bool HeapRegionManager::is_free(HeapRegion* hr) const {
64 return _free_list.contains(hr);
65 }
66 #endif
68 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
69 G1CollectedHeap* g1h = G1CollectedHeap::heap();
70 HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
71 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
72 assert(reserved().contains(mr), "invariant");
73 return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
74 }
76 void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
77 guarantee(num_regions > 0, "Must commit more than zero regions");
78 guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
80 _num_committed += (uint)num_regions;
82 _heap_mapper->commit_regions(index, num_regions);
84 // Also commit auxiliary data
85 _prev_bitmap_mapper->commit_regions(index, num_regions);
86 _next_bitmap_mapper->commit_regions(index, num_regions);
88 _bot_mapper->commit_regions(index, num_regions);
89 _cardtable_mapper->commit_regions(index, num_regions);
91 _card_counts_mapper->commit_regions(index, num_regions);
92 }
94 void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
95 guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
96 guarantee(_num_committed >= num_regions, "pre-condition");
98 // Print before uncommitting.
99 if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
100 for (uint i = start; i < start + num_regions; i++) {
101 HeapRegion* hr = at(i);
102 G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
103 }
104 }
106 _num_committed -= (uint)num_regions;
108 _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
109 _heap_mapper->uncommit_regions(start, num_regions);
111 // Also uncommit auxiliary data
112 _prev_bitmap_mapper->uncommit_regions(start, num_regions);
113 _next_bitmap_mapper->uncommit_regions(start, num_regions);
115 _bot_mapper->uncommit_regions(start, num_regions);
116 _cardtable_mapper->uncommit_regions(start, num_regions);
118 _card_counts_mapper->uncommit_regions(start, num_regions);
119 }
121 void HeapRegionManager::make_regions_available(uint start, uint num_regions) {
122 guarantee(num_regions > 0, "No point in calling this for zero regions");
123 commit_regions(start, num_regions);
124 for (uint i = start; i < start + num_regions; i++) {
125 if (_regions.get_by_index(i) == NULL) {
126 HeapRegion* new_hr = new_heap_region(i);
127 _regions.set_by_index(i, new_hr);
128 _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
129 }
130 }
132 _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
134 for (uint i = start; i < start + num_regions; i++) {
135 assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
136 HeapRegion* hr = at(i);
137 if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
138 G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
139 }
140 HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
141 MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
143 hr->initialize(mr);
144 insert_into_free_list(at(i));
145 }
146 }
148 uint HeapRegionManager::expand_by(uint num_regions) {
149 return expand_at(0, num_regions);
150 }
152 uint HeapRegionManager::expand_at(uint start, uint num_regions) {
153 if (num_regions == 0) {
154 return 0;
155 }
157 uint cur = start;
158 uint idx_last_found = 0;
159 uint num_last_found = 0;
161 uint expanded = 0;
163 while (expanded < num_regions &&
164 (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
165 uint to_expand = MIN2(num_regions - expanded, num_last_found);
166 make_regions_available(idx_last_found, to_expand);
167 expanded += to_expand;
168 cur = idx_last_found + num_last_found + 1;
169 }
171 verify_optional();
172 return expanded;
173 }
175 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
176 uint found = 0;
177 size_t length_found = 0;
178 uint cur = 0;
180 while (length_found < num && cur < max_length()) {
181 HeapRegion* hr = _regions.get_by_index(cur);
182 if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
183 // This region is a potential candidate for allocation into.
184 length_found++;
185 } else {
186 // This region is not a candidate. The next region is the next possible one.
187 found = cur + 1;
188 length_found = 0;
189 }
190 cur++;
191 }
193 if (length_found == num) {
194 for (uint i = found; i < (found + num); i++) {
195 HeapRegion* hr = _regions.get_by_index(i);
196 // sanity check
197 guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
198 err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
199 " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
200 }
201 return found;
202 } else {
203 return G1_NO_HRM_INDEX;
204 }
205 }
207 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
208 guarantee(r != NULL, "Start region must be a valid region");
209 guarantee(is_available(r->hrm_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrm_index()));
210 for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
211 HeapRegion* hr = _regions.get_by_index(i);
212 if (is_available(i)) {
213 return hr;
214 }
215 }
216 return NULL;
217 }
219 void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
220 uint len = max_length();
222 for (uint i = 0; i < len; i++) {
223 if (!is_available(i)) {
224 continue;
225 }
226 guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
227 bool res = blk->doHeapRegion(at(i));
228 if (res) {
229 blk->incomplete();
230 return;
231 }
232 }
233 }
235 uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
236 guarantee(res_idx != NULL, "checking");
237 guarantee(start_idx <= (max_length() + 1), "checking");
239 uint num_regions = 0;
241 uint cur = start_idx;
242 while (cur < max_length() && is_available(cur)) {
243 cur++;
244 }
245 if (cur == max_length()) {
246 return num_regions;
247 }
248 *res_idx = cur;
249 while (cur < max_length() && !is_available(cur)) {
250 cur++;
251 }
252 num_regions = cur - *res_idx;
253 #ifdef ASSERT
254 for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
255 assert(!is_available(i), "just checking");
256 }
257 assert(cur == max_length() || num_regions == 0 || is_available(cur),
258 err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
259 #endif
260 return num_regions;
261 }
263 uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
264 return num_regions * worker_i / num_workers;
265 }
267 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
268 const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
270 // Every worker will actually look at all regions, skipping over regions that
271 // are currently not committed.
272 // This also (potentially) iterates over regions newly allocated during GC. This
273 // is no problem except for some extra work.
274 for (uint count = 0; count < _allocated_heapregions_length; count++) {
275 const uint index = (start_index + count) % _allocated_heapregions_length;
276 assert(0 <= index && index < _allocated_heapregions_length, "sanity");
277 // Skip over unavailable regions
278 if (!is_available(index)) {
279 continue;
280 }
281 HeapRegion* r = _regions.get_by_index(index);
282 // We'll ignore "continues humongous" regions (we'll process them
283 // when we come across their corresponding "start humongous"
284 // region) and regions already claimed.
285 if (r->claim_value() == claim_value || r->continuesHumongous()) {
286 continue;
287 }
288 // OK, try to claim it
289 if (!r->claimHeapRegion(claim_value)) {
290 continue;
291 }
292 // Success!
293 if (r->startsHumongous()) {
294 // If the region is "starts humongous" we'll iterate over its
295 // "continues humongous" first; in fact we'll do them
296 // first. The order is important. In one case, calling the
297 // closure on the "starts humongous" region might de-allocate
298 // and clear all its "continues humongous" regions and, as a
299 // result, we might end up processing them twice. So, we'll do
300 // them first (note: most closures will ignore them anyway) and
301 // then we'll do the "starts humongous" region.
302 for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
303 HeapRegion* chr = _regions.get_by_index(ch_index);
305 assert(chr->continuesHumongous(), "Must be humongous region");
306 assert(chr->humongous_start_region() == r,
307 err_msg("Must work on humongous continuation of the original start region "
308 PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
309 assert(chr->claim_value() != claim_value,
310 "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
312 bool claim_result = chr->claimHeapRegion(claim_value);
313 // We should always be able to claim it; no one else should
314 // be trying to claim this region.
315 guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
317 bool res2 = blk->doHeapRegion(chr);
318 if (res2) {
319 return;
320 }
322 // Right now, this holds (i.e., no closure that actually
323 // does something with "continues humongous" regions
324 // clears them). We might have to weaken it in the future,
325 // but let's leave these two asserts here for extra safety.
326 assert(chr->continuesHumongous(), "should still be the case");
327 assert(chr->humongous_start_region() == r, "sanity");
328 }
329 }
331 bool res = blk->doHeapRegion(r);
332 if (res) {
333 return;
334 }
335 }
336 }
338 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
339 assert(length() > 0, "the region sequence should not be empty");
340 assert(length() <= _allocated_heapregions_length, "invariant");
341 assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
342 assert(num_regions_to_remove < length(), "We should never remove all regions");
344 if (num_regions_to_remove == 0) {
345 return 0;
346 }
348 uint removed = 0;
349 uint cur = _allocated_heapregions_length - 1;
350 uint idx_last_found = 0;
351 uint num_last_found = 0;
353 while ((removed < num_regions_to_remove) &&
354 (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
355 uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
357 uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
359 cur -= num_last_found;
360 removed += to_remove;
361 }
363 verify_optional();
365 return removed;
366 }
368 uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
369 guarantee(start_idx < _allocated_heapregions_length, "checking");
370 guarantee(res_idx != NULL, "checking");
372 uint num_regions_found = 0;
374 jlong cur = start_idx;
375 while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
376 cur--;
377 }
378 if (cur == -1) {
379 return num_regions_found;
380 }
381 jlong old_cur = cur;
382 // cur indexes the first empty region
383 while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
384 cur--;
385 }
386 *res_idx = cur + 1;
387 num_regions_found = old_cur - cur;
389 #ifdef ASSERT
390 for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
391 assert(at(i)->is_empty(), "just checking");
392 }
393 #endif
394 return num_regions_found;
395 }
397 void HeapRegionManager::verify() {
398 guarantee(length() <= _allocated_heapregions_length,
399 err_msg("invariant: _length: %u _allocated_length: %u",
400 length(), _allocated_heapregions_length));
401 guarantee(_allocated_heapregions_length <= max_length(),
402 err_msg("invariant: _allocated_length: %u _max_length: %u",
403 _allocated_heapregions_length, max_length()));
405 bool prev_committed = true;
406 uint num_committed = 0;
407 HeapWord* prev_end = heap_bottom();
408 for (uint i = 0; i < _allocated_heapregions_length; i++) {
409 if (!is_available(i)) {
410 prev_committed = false;
411 continue;
412 }
413 num_committed++;
414 HeapRegion* hr = _regions.get_by_index(i);
415 guarantee(hr != NULL, err_msg("invariant: i: %u", i));
416 guarantee(!prev_committed || hr->bottom() == prev_end,
417 err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
418 i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
419 guarantee(hr->hrm_index() == i,
420 err_msg("invariant: i: %u hrm_index(): %u", i, hr->hrm_index()));
421 // Asserts will fire if i is >= _length
422 HeapWord* addr = hr->bottom();
423 guarantee(addr_to_region(addr) == hr, "sanity");
424 // We cannot check whether the region is part of a particular set: at the time
425 // this method may be called, we have only completed allocation of the regions,
426 // but not put into a region set.
427 prev_committed = true;
428 if (hr->startsHumongous()) {
429 prev_end = hr->orig_end();
430 } else {
431 prev_end = hr->end();
432 }
433 }
434 for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
435 guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
436 }
438 guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
439 _free_list.verify();
440 }
442 #ifndef PRODUCT
443 void HeapRegionManager::verify_optional() {
444 verify();
445 }
446 #endif // PRODUCT