Thu, 22 Sep 2011 10:57:37 -0700
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/collectionSetChooser.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
29 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
30 #include "memory/space.inline.hpp"
32 CSetChooserCache::CSetChooserCache() {
33 for (int i = 0; i < CacheLength; ++i)
34 _cache[i] = NULL;
35 clear();
36 }
38 void CSetChooserCache::clear() {
39 _occupancy = 0;
40 _first = 0;
41 for (int i = 0; i < CacheLength; ++i) {
42 HeapRegion *hr = _cache[i];
43 if (hr != NULL)
44 hr->set_sort_index(-1);
45 _cache[i] = NULL;
46 }
47 }
49 #ifndef PRODUCT
50 bool CSetChooserCache::verify() {
51 int index = _first;
52 HeapRegion *prev = NULL;
53 for (int i = 0; i < _occupancy; ++i) {
54 guarantee(_cache[index] != NULL, "cache entry should not be empty");
55 HeapRegion *hr = _cache[index];
56 guarantee(!hr->is_young(), "should not be young!");
57 if (prev != NULL) {
58 guarantee(prev->gc_efficiency() >= hr->gc_efficiency(),
59 "cache should be correctly ordered");
60 }
61 guarantee(hr->sort_index() == get_sort_index(index),
62 "sort index should be correct");
63 index = trim_index(index + 1);
64 prev = hr;
65 }
67 for (int i = 0; i < (CacheLength - _occupancy); ++i) {
68 guarantee(_cache[index] == NULL, "cache entry should be empty");
69 index = trim_index(index + 1);
70 }
72 guarantee(index == _first, "we should have reached where we started from");
73 return true;
74 }
75 #endif // PRODUCT
77 void CSetChooserCache::insert(HeapRegion *hr) {
78 assert(!is_full(), "cache should not be empty");
79 hr->calc_gc_efficiency();
81 int empty_index;
82 if (_occupancy == 0) {
83 empty_index = _first;
84 } else {
85 empty_index = trim_index(_first + _occupancy);
86 assert(_cache[empty_index] == NULL, "last slot should be empty");
87 int last_index = trim_index(empty_index - 1);
88 HeapRegion *last = _cache[last_index];
89 assert(last != NULL,"as the cache is not empty, last should not be empty");
90 while (empty_index != _first &&
91 last->gc_efficiency() < hr->gc_efficiency()) {
92 _cache[empty_index] = last;
93 last->set_sort_index(get_sort_index(empty_index));
94 empty_index = last_index;
95 last_index = trim_index(last_index - 1);
96 last = _cache[last_index];
97 }
98 }
99 _cache[empty_index] = hr;
100 hr->set_sort_index(get_sort_index(empty_index));
102 ++_occupancy;
103 assert(verify(), "cache should be consistent");
104 }
106 HeapRegion *CSetChooserCache::remove_first() {
107 if (_occupancy > 0) {
108 assert(_cache[_first] != NULL, "cache should have at least one region");
109 HeapRegion *ret = _cache[_first];
110 _cache[_first] = NULL;
111 ret->set_sort_index(-1);
112 --_occupancy;
113 _first = trim_index(_first + 1);
114 assert(verify(), "cache should be consistent");
115 return ret;
116 } else {
117 return NULL;
118 }
119 }
121 // this is a bit expensive... but we expect that it should not be called
122 // to often.
123 void CSetChooserCache::remove(HeapRegion *hr) {
124 assert(_occupancy > 0, "cache should not be empty");
125 assert(hr->sort_index() < -1, "should already be in the cache");
126 int index = get_index(hr->sort_index());
127 assert(_cache[index] == hr, "index should be correct");
128 int next_index = trim_index(index + 1);
129 int last_index = trim_index(_first + _occupancy - 1);
130 while (index != last_index) {
131 assert(_cache[next_index] != NULL, "should not be null");
132 _cache[index] = _cache[next_index];
133 _cache[index]->set_sort_index(get_sort_index(index));
135 index = next_index;
136 next_index = trim_index(next_index+1);
137 }
138 assert(index == last_index, "should have reached the last one");
139 _cache[index] = NULL;
140 hr->set_sort_index(-1);
141 --_occupancy;
142 assert(verify(), "cache should be consistent");
143 }
145 static inline int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
146 if (hr1 == NULL) {
147 if (hr2 == NULL) return 0;
148 else return 1;
149 } else if (hr2 == NULL) {
150 return -1;
151 }
152 if (hr2->gc_efficiency() < hr1->gc_efficiency()) return -1;
153 else if (hr1->gc_efficiency() < hr2->gc_efficiency()) return 1;
154 else return 0;
155 }
157 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
158 return orderRegions(*hr1p, *hr2p);
159 }
161 CollectionSetChooser::CollectionSetChooser() :
162 // The line below is the worst bit of C++ hackery I've ever written
163 // (Detlefs, 11/23). You should think of it as equivalent to
164 // "_regions(100, true)": initialize the growable array and inform it
165 // that it should allocate its elem array(s) on the C heap.
166 //
167 // The first argument, however, is actually a comma expression
168 // (set_allocation_type(this, C_HEAP), 100). The purpose of the
169 // set_allocation_type() call is to replace the default allocation
170 // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
171 // allow to pass the assert in GenericGrowableArray() which checks
172 // that a growable array object must be on C heap if elements are.
173 //
174 // Note: containing object is allocated on C heap since it is CHeapObj.
175 //
176 _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
177 ResourceObj::C_HEAP),
178 100),
179 true),
180 _curMarkedIndex(0),
181 _numMarkedRegions(0),
182 _unmarked_age_1_returned_as_new(false),
183 _first_par_unreserved_idx(0)
184 {}
188 #ifndef PRODUCT
189 bool CollectionSetChooser::verify() {
190 int index = 0;
191 guarantee(_curMarkedIndex <= _numMarkedRegions,
192 "_curMarkedIndex should be within bounds");
193 while (index < _curMarkedIndex) {
194 guarantee(_markedRegions.at(index++) == NULL,
195 "all entries before _curMarkedIndex should be NULL");
196 }
197 HeapRegion *prev = NULL;
198 while (index < _numMarkedRegions) {
199 HeapRegion *curr = _markedRegions.at(index++);
200 if (curr != NULL) {
201 int si = curr->sort_index();
202 guarantee(!curr->is_young(), "should not be young!");
203 guarantee(si > -1 && si == (index-1), "sort index invariant");
204 if (prev != NULL) {
205 guarantee(orderRegions(prev, curr) != 1, "regions should be sorted");
206 }
207 prev = curr;
208 }
209 }
210 return _cache.verify();
211 }
212 #endif
214 bool
215 CollectionSetChooser::addRegionToCache() {
216 assert(!_cache.is_full(), "cache should not be full");
218 HeapRegion *hr = NULL;
219 while (hr == NULL && _curMarkedIndex < _numMarkedRegions) {
220 hr = _markedRegions.at(_curMarkedIndex++);
221 }
222 if (hr == NULL)
223 return false;
224 assert(!hr->is_young(), "should not be young!");
225 assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant");
226 _markedRegions.at_put(hr->sort_index(), NULL);
227 _cache.insert(hr);
228 assert(!_cache.is_empty(), "cache should not be empty");
229 assert(verify(), "cache should be consistent");
230 return false;
231 }
233 void
234 CollectionSetChooser::fillCache() {
235 while (!_cache.is_full() && addRegionToCache()) {
236 }
237 }
239 void
240 CollectionSetChooser::sortMarkedHeapRegions() {
241 guarantee(_cache.is_empty(), "cache should be empty");
242 // First trim any unused portion of the top in the parallel case.
243 if (_first_par_unreserved_idx > 0) {
244 if (G1PrintParCleanupStats) {
245 gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n",
246 _markedRegions.length(), _first_par_unreserved_idx);
247 }
248 assert(_first_par_unreserved_idx <= _markedRegions.length(),
249 "Or we didn't reserved enough length");
250 _markedRegions.trunc_to(_first_par_unreserved_idx);
251 }
252 _markedRegions.sort(orderRegions);
253 assert(_numMarkedRegions <= _markedRegions.length(), "Requirement");
254 assert(_numMarkedRegions == 0
255 || _markedRegions.at(_numMarkedRegions-1) != NULL,
256 "Testing _numMarkedRegions");
257 assert(_numMarkedRegions == _markedRegions.length()
258 || _markedRegions.at(_numMarkedRegions) == NULL,
259 "Testing _numMarkedRegions");
260 if (G1PrintParCleanupStats) {
261 gclog_or_tty->print_cr(" Sorted %d marked regions.", _numMarkedRegions);
262 }
263 for (int i = 0; i < _numMarkedRegions; i++) {
264 assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
265 _markedRegions.at(i)->set_sort_index(i);
266 }
267 if (G1PrintRegionLivenessInfo) {
268 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
269 for (int i = 0; i < _numMarkedRegions; ++i) {
270 HeapRegion* r = _markedRegions.at(i);
271 cl.doHeapRegion(r);
272 }
273 }
274 assert(verify(), "should now be sorted");
275 }
277 void
278 CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
279 assert(!hr->isHumongous(),
280 "Humongous regions shouldn't be added to the collection set");
281 assert(!hr->is_young(), "should not be young!");
282 _markedRegions.append(hr);
283 _numMarkedRegions++;
284 hr->calc_gc_efficiency();
285 }
287 void
288 CollectionSetChooser::
289 prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
290 _first_par_unreserved_idx = 0;
291 size_t max_waste = ParallelGCThreads * chunkSize;
292 // it should be aligned with respect to chunkSize
293 size_t aligned_n_regions =
294 (n_regions + (chunkSize - 1)) / chunkSize * chunkSize;
295 assert( aligned_n_regions % chunkSize == 0, "should be aligned" );
296 _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
297 }
299 jint
300 CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
301 jint res = Atomic::add(n_regions, &_first_par_unreserved_idx);
302 assert(_markedRegions.length() > res + n_regions - 1,
303 "Should already have been expanded");
304 return res - n_regions;
305 }
307 void
308 CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
309 assert(_markedRegions.at(index) == NULL, "precondition");
310 assert(!hr->is_young(), "should not be young!");
311 _markedRegions.at_put(index, hr);
312 hr->calc_gc_efficiency();
313 }
315 void
316 CollectionSetChooser::incNumMarkedHeapRegions(jint inc_by) {
317 (void)Atomic::add(inc_by, &_numMarkedRegions);
318 }
320 void
321 CollectionSetChooser::clearMarkedHeapRegions(){
322 for (int i = 0; i < _markedRegions.length(); i++) {
323 HeapRegion* r = _markedRegions.at(i);
324 if (r != NULL) r->set_sort_index(-1);
325 }
326 _markedRegions.clear();
327 _curMarkedIndex = 0;
328 _numMarkedRegions = 0;
329 _cache.clear();
330 };
332 void
333 CollectionSetChooser::updateAfterFullCollection() {
334 clearMarkedHeapRegions();
335 }
337 void CollectionSetChooser::removeRegion(HeapRegion *hr) {
338 int si = hr->sort_index();
339 assert(si == -1 || hr->is_marked(), "Sort index not valid.");
340 if (si > -1) {
341 assert(_markedRegions.at(si) == hr, "Sort index not valid." );
342 _markedRegions.at_put(si, NULL);
343 } else if (si < -1) {
344 assert(_cache.region_in_cache(hr), "should be in the cache");
345 _cache.remove(hr);
346 assert(hr->sort_index() == -1, "sort index invariant");
347 }
348 hr->set_sort_index(-1);
349 }
351 // if time_remaining < 0.0, then this method should try to return
352 // a region, whether it fits within the remaining time or not
353 HeapRegion*
354 CollectionSetChooser::getNextMarkedRegion(double time_remaining,
355 double avg_prediction) {
356 G1CollectedHeap* g1h = G1CollectedHeap::heap();
357 G1CollectorPolicy* g1p = g1h->g1_policy();
358 fillCache();
359 if (_cache.is_empty()) {
360 assert(_curMarkedIndex == _numMarkedRegions,
361 "if cache is empty, list should also be empty");
362 ergo_verbose0(ErgoCSetConstruction,
363 "stop adding old regions to CSet",
364 ergo_format_reason("cache is empty"));
365 return NULL;
366 }
368 HeapRegion *hr = _cache.get_first();
369 assert(hr != NULL, "if cache not empty, first entry should be non-null");
370 double predicted_time = g1h->predict_region_elapsed_time_ms(hr, false);
372 if (g1p->adaptive_young_list_length()) {
373 if (time_remaining - predicted_time < 0.0) {
374 g1h->check_if_region_is_too_expensive(predicted_time);
375 ergo_verbose2(ErgoCSetConstruction,
376 "stop adding old regions to CSet",
377 ergo_format_reason("predicted old region time higher than remaining time")
378 ergo_format_ms("predicted old region time")
379 ergo_format_ms("remaining time"),
380 predicted_time, time_remaining);
381 return NULL;
382 }
383 } else {
384 double threshold = 2.0 * avg_prediction;
385 if (predicted_time > threshold) {
386 ergo_verbose2(ErgoCSetConstruction,
387 "stop adding old regions to CSet",
388 ergo_format_reason("predicted old region time higher than threshold")
389 ergo_format_ms("predicted old region time")
390 ergo_format_ms("threshold"),
391 predicted_time, threshold);
392 return NULL;
393 }
394 }
396 HeapRegion *hr2 = _cache.remove_first();
397 assert(hr == hr2, "cache contents should not have changed");
399 return hr;
400 }