Mon, 01 Dec 2008 23:25:24 -0800
6778647: snap(), snap_policy() should be renamed setup(), setup_policy()
Summary: Renamed Reference{Policy,Pocessor} methods from snap{,_policy}() to setup{,_policy}()
Reviewed-by: apetrusenko
1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_g1MarkSweep.cpp.incl"
28 class HeapRegion;
30 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
31 bool clear_all_softrefs) {
32 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
34 // hook up weak ref data so it can be used during Mark-Sweep
35 assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
36 assert(rp != NULL, "should be non-NULL");
37 GenMarkSweep::_ref_processor = rp;
38 rp->setup_policy(clear_all_softrefs);
40 // When collecting the permanent generation methodOops may be moving,
41 // so we either have to flush all bcp data or convert it into bci.
42 CodeCache::gc_prologue();
43 Threads::gc_prologue();
45 // Increment the invocation count for the permanent generation, since it is
46 // implicitly collected whenever we do a full mark sweep collection.
47 SharedHeap* sh = SharedHeap::heap();
48 sh->perm_gen()->stat_record()->invocations++;
50 bool marked_for_unloading = false;
52 allocate_stacks();
54 // We should save the marks of the currently locked biased monitors.
55 // The marking doesn't preserve the marks of biased objects.
56 BiasedLocking::preserve_marks();
58 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
60 if (G1VerifyConcMark) {
61 G1CollectedHeap* g1h = G1CollectedHeap::heap();
62 g1h->checkConcurrentMark();
63 }
65 mark_sweep_phase2();
67 // Don't add any more derived pointers during phase3
68 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
70 mark_sweep_phase3();
72 mark_sweep_phase4();
74 GenMarkSweep::restore_marks();
75 BiasedLocking::restore_marks();
76 GenMarkSweep::deallocate_stacks();
78 // We must invalidate the perm-gen rs, so that it gets rebuilt.
79 GenRemSet* rs = sh->rem_set();
80 rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
82 // "free at last gc" is calculated from these.
83 // CHF: cheating for now!!!
84 // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
85 // Universe::set_heap_used_at_last_gc(Universe::heap()->used());
87 Threads::gc_epilogue();
88 CodeCache::gc_epilogue();
90 // refs processing: clean slate
91 GenMarkSweep::_ref_processor = NULL;
92 }
95 void G1MarkSweep::allocate_stacks() {
96 GenMarkSweep::_preserved_count_max = 0;
97 GenMarkSweep::_preserved_marks = NULL;
98 GenMarkSweep::_preserved_count = 0;
99 GenMarkSweep::_preserved_mark_stack = NULL;
100 GenMarkSweep::_preserved_oop_stack = NULL;
102 GenMarkSweep::_marking_stack =
103 new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
105 size_t size = SystemDictionary::number_of_classes() * 2;
106 GenMarkSweep::_revisit_klass_stack =
107 new (ResourceObj::C_HEAP) GrowableArray<Klass*>((int)size, true);
108 }
110 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
111 bool clear_all_softrefs) {
112 // Recursively traverse all live objects and mark them
113 EventMark m("1 mark object");
114 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
115 GenMarkSweep::trace(" 1");
117 SharedHeap* sh = SharedHeap::heap();
119 sh->process_strong_roots(true, // Collecting permanent generation.
120 SharedHeap::SO_SystemClasses,
121 &GenMarkSweep::follow_root_closure,
122 &GenMarkSweep::follow_root_closure);
124 // Process reference objects found during marking
125 ReferenceProcessor* rp = GenMarkSweep::ref_processor();
126 rp->setup_policy(clear_all_softrefs);
127 rp->process_discovered_references(&GenMarkSweep::is_alive,
128 &GenMarkSweep::keep_alive,
129 &GenMarkSweep::follow_stack_closure,
130 NULL);
132 // Follow system dictionary roots and unload classes
133 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
134 assert(GenMarkSweep::_marking_stack->is_empty(),
135 "stack should be empty by now");
137 // Follow code cache roots (has to be done after system dictionary,
138 // assumes all live klasses are marked)
139 CodeCache::do_unloading(&GenMarkSweep::is_alive,
140 &GenMarkSweep::keep_alive,
141 purged_class);
142 GenMarkSweep::follow_stack();
144 // Update subklass/sibling/implementor links of live klasses
145 GenMarkSweep::follow_weak_klass_links();
146 assert(GenMarkSweep::_marking_stack->is_empty(),
147 "stack should be empty by now");
149 // Visit symbol and interned string tables and delete unmarked oops
150 SymbolTable::unlink(&GenMarkSweep::is_alive);
151 StringTable::unlink(&GenMarkSweep::is_alive);
153 assert(GenMarkSweep::_marking_stack->is_empty(),
154 "stack should be empty by now");
155 }
157 class G1PrepareCompactClosure: public HeapRegionClosure {
158 ModRefBarrierSet* _mrbs;
159 CompactPoint _cp;
160 bool _popular_only;
162 void free_humongous_region(HeapRegion* hr) {
163 HeapWord* bot = hr->bottom();
164 HeapWord* end = hr->end();
165 assert(hr->startsHumongous(),
166 "Only the start of a humongous region should be freed.");
167 G1CollectedHeap::heap()->free_region(hr);
168 hr->prepare_for_compaction(&_cp);
169 // Also clear the part of the card table that will be unused after
170 // compaction.
171 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
172 }
174 public:
175 G1PrepareCompactClosure(CompactibleSpace* cs, bool popular_only) :
176 _cp(NULL, cs, cs->initialize_threshold()),
177 _mrbs(G1CollectedHeap::heap()->mr_bs()),
178 _popular_only(popular_only)
179 {}
180 bool doHeapRegion(HeapRegion* hr) {
181 if (_popular_only && !hr->popular())
182 return true; // terminate early
183 else if (!_popular_only && hr->popular())
184 return false; // skip this one.
186 if (hr->isHumongous()) {
187 if (hr->startsHumongous()) {
188 oop obj = oop(hr->bottom());
189 if (obj->is_gc_marked()) {
190 obj->forward_to(obj);
191 } else {
192 free_humongous_region(hr);
193 }
194 } else {
195 assert(hr->continuesHumongous(), "Invalid humongous.");
196 }
197 } else {
198 hr->prepare_for_compaction(&_cp);
199 // Also clear the part of the card table that will be unused after
200 // compaction.
201 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
202 }
203 return false;
204 }
205 };
206 // Stolen verbatim from g1CollectedHeap.cpp
207 class FindFirstRegionClosure: public HeapRegionClosure {
208 HeapRegion* _a_region;
209 bool _find_popular;
210 public:
211 FindFirstRegionClosure(bool find_popular) :
212 _a_region(NULL), _find_popular(find_popular) {}
213 bool doHeapRegion(HeapRegion* r) {
214 if (r->popular() == _find_popular) {
215 _a_region = r;
216 return true;
217 } else {
218 return false;
219 }
220 }
221 HeapRegion* result() { return _a_region; }
222 };
224 void G1MarkSweep::mark_sweep_phase2() {
225 // Now all live objects are marked, compute the new object addresses.
227 // It is imperative that we traverse perm_gen LAST. If dead space is
228 // allowed a range of dead object may get overwritten by a dead int
229 // array. If perm_gen is not traversed last a klassOop may get
230 // overwritten. This is fine since it is dead, but if the class has dead
231 // instances we have to skip them, and in order to find their size we
232 // need the klassOop!
233 //
234 // It is not required that we traverse spaces in the same order in
235 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
236 // tracking expects us to do so. See comment under phase4.
238 G1CollectedHeap* g1h = G1CollectedHeap::heap();
239 Generation* pg = g1h->perm_gen();
241 EventMark m("2 compute new addresses");
242 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
243 GenMarkSweep::trace("2");
245 // First we compact the popular regions.
246 if (G1NumPopularRegions > 0) {
247 CompactibleSpace* sp = g1h->first_compactible_space();
248 FindFirstRegionClosure cl(true /*find_popular*/);
249 g1h->heap_region_iterate(&cl);
250 HeapRegion *r = cl.result();
251 assert(r->popular(), "should have found a popular region.");
252 assert(r == sp, "first popular heap region should "
253 "== first compactible space");
254 G1PrepareCompactClosure blk(sp, true/*popular_only*/);
255 g1h->heap_region_iterate(&blk);
256 }
258 // Now we do the regular regions.
259 FindFirstRegionClosure cl(false /*find_popular*/);
260 g1h->heap_region_iterate(&cl);
261 HeapRegion *r = cl.result();
262 assert(!r->popular(), "should have founda non-popular region.");
263 CompactibleSpace* sp = r;
264 if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
265 sp = r->next_compaction_space();
266 }
268 G1PrepareCompactClosure blk(sp, false/*popular_only*/);
269 g1h->heap_region_iterate(&blk);
271 CompactPoint perm_cp(pg, NULL, NULL);
272 pg->prepare_for_compaction(&perm_cp);
273 }
275 class G1AdjustPointersClosure: public HeapRegionClosure {
276 public:
277 bool doHeapRegion(HeapRegion* r) {
278 if (r->isHumongous()) {
279 if (r->startsHumongous()) {
280 // We must adjust the pointers on the single H object.
281 oop obj = oop(r->bottom());
282 debug_only(GenMarkSweep::track_interior_pointers(obj));
283 // point all the oops to the new location
284 obj->adjust_pointers();
285 debug_only(GenMarkSweep::check_interior_pointers());
286 }
287 } else {
288 // This really ought to be "as_CompactibleSpace"...
289 r->adjust_pointers();
290 }
291 return false;
292 }
293 };
295 void G1MarkSweep::mark_sweep_phase3() {
296 G1CollectedHeap* g1h = G1CollectedHeap::heap();
297 Generation* pg = g1h->perm_gen();
299 // Adjust the pointers to reflect the new locations
300 EventMark m("3 adjust pointers");
301 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
302 GenMarkSweep::trace("3");
304 SharedHeap* sh = SharedHeap::heap();
306 sh->process_strong_roots(true, // Collecting permanent generation.
307 SharedHeap::SO_AllClasses,
308 &GenMarkSweep::adjust_root_pointer_closure,
309 &GenMarkSweep::adjust_pointer_closure);
311 g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
313 // Now adjust pointers in remaining weak roots. (All of which should
314 // have been cleared if they pointed to non-surviving objects.)
315 g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
316 &GenMarkSweep::adjust_pointer_closure);
318 GenMarkSweep::adjust_marks();
320 G1AdjustPointersClosure blk;
321 g1h->heap_region_iterate(&blk);
322 pg->adjust_pointers();
323 }
325 class G1SpaceCompactClosure: public HeapRegionClosure {
326 public:
327 G1SpaceCompactClosure() {}
329 bool doHeapRegion(HeapRegion* hr) {
330 if (hr->isHumongous()) {
331 if (hr->startsHumongous()) {
332 oop obj = oop(hr->bottom());
333 if (obj->is_gc_marked()) {
334 obj->init_mark();
335 } else {
336 assert(hr->is_empty(), "Should have been cleared in phase 2.");
337 }
338 hr->reset_during_compaction();
339 }
340 } else {
341 hr->compact();
342 }
343 return false;
344 }
345 };
347 void G1MarkSweep::mark_sweep_phase4() {
348 // All pointers are now adjusted, move objects accordingly
350 // It is imperative that we traverse perm_gen first in phase4. All
351 // classes must be allocated earlier than their instances, and traversing
352 // perm_gen first makes sure that all klassOops have moved to their new
353 // location before any instance does a dispatch through it's klass!
355 // The ValidateMarkSweep live oops tracking expects us to traverse spaces
356 // in the same order in phase2, phase3 and phase4. We don't quite do that
357 // here (perm_gen first rather than last), so we tell the validate code
358 // to use a higher index (saved from phase2) when verifying perm_gen.
359 G1CollectedHeap* g1h = G1CollectedHeap::heap();
360 Generation* pg = g1h->perm_gen();
362 EventMark m("4 compact heap");
363 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
364 GenMarkSweep::trace("4");
366 pg->compact();
368 G1SpaceCompactClosure blk;
369 g1h->heap_region_iterate(&blk);
371 }
373 // Local Variables: ***
374 // c-indentation-style: gnu ***
375 // End: ***