Thu, 20 Jan 2011 13:57:12 -0800
Merge
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/icBuffer.hpp"
32 #include "gc_implementation/g1/g1MarkSweep.hpp"
33 #include "memory/gcLocker.hpp"
34 #include "memory/genCollectedHeap.hpp"
35 #include "memory/modRefBarrierSet.hpp"
36 #include "memory/referencePolicy.hpp"
37 #include "memory/space.hpp"
38 #include "oops/instanceRefKlass.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "runtime/aprofiler.hpp"
42 #include "runtime/biasedLocking.hpp"
43 #include "runtime/fprofiler.hpp"
44 #include "runtime/synchronizer.hpp"
45 #include "runtime/thread.hpp"
46 #include "runtime/vmThread.hpp"
47 #include "utilities/copy.hpp"
48 #include "utilities/events.hpp"
50 class HeapRegion;
52 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
53 bool clear_all_softrefs) {
54 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
56 SharedHeap* sh = SharedHeap::heap();
57 #ifdef ASSERT
58 if (sh->collector_policy()->should_clear_all_soft_refs()) {
59 assert(clear_all_softrefs, "Policy should have been checked earler");
60 }
61 #endif
62 // hook up weak ref data so it can be used during Mark-Sweep
63 assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
64 assert(rp != NULL, "should be non-NULL");
65 GenMarkSweep::_ref_processor = rp;
66 rp->setup_policy(clear_all_softrefs);
68 // When collecting the permanent generation methodOops may be moving,
69 // so we either have to flush all bcp data or convert it into bci.
70 CodeCache::gc_prologue();
71 Threads::gc_prologue();
73 // Increment the invocation count for the permanent generation, since it is
74 // implicitly collected whenever we do a full mark sweep collection.
75 sh->perm_gen()->stat_record()->invocations++;
77 bool marked_for_unloading = false;
79 allocate_stacks();
81 // We should save the marks of the currently locked biased monitors.
82 // The marking doesn't preserve the marks of biased objects.
83 BiasedLocking::preserve_marks();
85 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
87 if (VerifyDuringGC) {
88 G1CollectedHeap* g1h = G1CollectedHeap::heap();
89 g1h->checkConcurrentMark();
90 }
92 mark_sweep_phase2();
94 // Don't add any more derived pointers during phase3
95 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
97 mark_sweep_phase3();
99 mark_sweep_phase4();
101 GenMarkSweep::restore_marks();
102 BiasedLocking::restore_marks();
103 GenMarkSweep::deallocate_stacks();
105 // We must invalidate the perm-gen rs, so that it gets rebuilt.
106 GenRemSet* rs = sh->rem_set();
107 rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
109 // "free at last gc" is calculated from these.
110 // CHF: cheating for now!!!
111 // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
112 // Universe::set_heap_used_at_last_gc(Universe::heap()->used());
114 Threads::gc_epilogue();
115 CodeCache::gc_epilogue();
116 JvmtiExport::gc_epilogue();
118 // refs processing: clean slate
119 GenMarkSweep::_ref_processor = NULL;
120 }
123 void G1MarkSweep::allocate_stacks() {
124 GenMarkSweep::_preserved_count_max = 0;
125 GenMarkSweep::_preserved_marks = NULL;
126 GenMarkSweep::_preserved_count = 0;
127 }
129 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
130 bool clear_all_softrefs) {
131 // Recursively traverse all live objects and mark them
132 EventMark m("1 mark object");
133 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
134 GenMarkSweep::trace(" 1");
136 SharedHeap* sh = SharedHeap::heap();
138 sh->process_strong_roots(true, // activeate StrongRootsScope
139 true, // Collecting permanent generation.
140 SharedHeap::SO_SystemClasses,
141 &GenMarkSweep::follow_root_closure,
142 &GenMarkSweep::follow_code_root_closure,
143 &GenMarkSweep::follow_root_closure);
145 // Process reference objects found during marking
146 ReferenceProcessor* rp = GenMarkSweep::ref_processor();
147 rp->setup_policy(clear_all_softrefs);
148 rp->process_discovered_references(&GenMarkSweep::is_alive,
149 &GenMarkSweep::keep_alive,
150 &GenMarkSweep::follow_stack_closure,
151 NULL);
153 // Follow system dictionary roots and unload classes
154 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
155 assert(GenMarkSweep::_marking_stack.is_empty(),
156 "stack should be empty by now");
158 // Follow code cache roots (has to be done after system dictionary,
159 // assumes all live klasses are marked)
160 CodeCache::do_unloading(&GenMarkSweep::is_alive,
161 &GenMarkSweep::keep_alive,
162 purged_class);
163 GenMarkSweep::follow_stack();
165 // Update subklass/sibling/implementor links of live klasses
166 GenMarkSweep::follow_weak_klass_links();
167 assert(GenMarkSweep::_marking_stack.is_empty(),
168 "stack should be empty by now");
170 // Visit memoized MDO's and clear any unmarked weak refs
171 GenMarkSweep::follow_mdo_weak_refs();
172 assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
175 // Visit symbol and interned string tables and delete unmarked oops
176 SymbolTable::unlink(&GenMarkSweep::is_alive);
177 StringTable::unlink(&GenMarkSweep::is_alive);
179 assert(GenMarkSweep::_marking_stack.is_empty(),
180 "stack should be empty by now");
181 }
183 class G1PrepareCompactClosure: public HeapRegionClosure {
184 G1CollectedHeap* _g1h;
185 ModRefBarrierSet* _mrbs;
186 CompactPoint _cp;
187 size_t _pre_used;
188 FreeRegionList _free_list;
189 HumongousRegionSet _humongous_proxy_set;
191 void free_humongous_region(HeapRegion* hr) {
192 HeapWord* end = hr->end();
193 assert(hr->startsHumongous(),
194 "Only the start of a humongous region should be freed.");
195 _g1h->free_humongous_region(hr, &_pre_used, &_free_list,
196 &_humongous_proxy_set, false /* par */);
197 // Do we also need to do this for the continues humongous regions
198 // we just collapsed?
199 hr->prepare_for_compaction(&_cp);
200 // Also clear the part of the card table that will be unused after
201 // compaction.
202 _mrbs->clear(MemRegion(hr->compaction_top(), end));
203 }
205 public:
206 G1PrepareCompactClosure(CompactibleSpace* cs)
207 : _g1h(G1CollectedHeap::heap()),
208 _mrbs(G1CollectedHeap::heap()->mr_bs()),
209 _cp(NULL, cs, cs->initialize_threshold()),
210 _pre_used(0),
211 _free_list("Local Free List for G1MarkSweep"),
212 _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
214 void update_sets() {
215 // We'll recalculate total used bytes and recreate the free list
216 // at the end of the GC, so no point in updating those values here.
217 _g1h->update_sets_after_freeing_regions(0, /* pre_used */
218 NULL, /* free_list */
219 &_humongous_proxy_set,
220 false /* par */);
221 _free_list.remove_all();
222 }
224 bool doHeapRegion(HeapRegion* hr) {
225 if (hr->isHumongous()) {
226 if (hr->startsHumongous()) {
227 oop obj = oop(hr->bottom());
228 if (obj->is_gc_marked()) {
229 obj->forward_to(obj);
230 } else {
231 free_humongous_region(hr);
232 }
233 } else {
234 assert(hr->continuesHumongous(), "Invalid humongous.");
235 }
236 } else {
237 hr->prepare_for_compaction(&_cp);
238 // Also clear the part of the card table that will be unused after
239 // compaction.
240 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
241 }
242 return false;
243 }
244 };
246 // Finds the first HeapRegion.
247 class FindFirstRegionClosure: public HeapRegionClosure {
248 HeapRegion* _a_region;
249 public:
250 FindFirstRegionClosure() : _a_region(NULL) {}
251 bool doHeapRegion(HeapRegion* r) {
252 _a_region = r;
253 return true;
254 }
255 HeapRegion* result() { return _a_region; }
256 };
258 void G1MarkSweep::mark_sweep_phase2() {
259 // Now all live objects are marked, compute the new object addresses.
261 // It is imperative that we traverse perm_gen LAST. If dead space is
262 // allowed a range of dead object may get overwritten by a dead int
263 // array. If perm_gen is not traversed last a klassOop may get
264 // overwritten. This is fine since it is dead, but if the class has dead
265 // instances we have to skip them, and in order to find their size we
266 // need the klassOop!
267 //
268 // It is not required that we traverse spaces in the same order in
269 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
270 // tracking expects us to do so. See comment under phase4.
272 G1CollectedHeap* g1h = G1CollectedHeap::heap();
273 Generation* pg = g1h->perm_gen();
275 EventMark m("2 compute new addresses");
276 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
277 GenMarkSweep::trace("2");
279 FindFirstRegionClosure cl;
280 g1h->heap_region_iterate(&cl);
281 HeapRegion *r = cl.result();
282 CompactibleSpace* sp = r;
283 if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
284 sp = r->next_compaction_space();
285 }
287 G1PrepareCompactClosure blk(sp);
288 g1h->heap_region_iterate(&blk);
289 blk.update_sets();
291 CompactPoint perm_cp(pg, NULL, NULL);
292 pg->prepare_for_compaction(&perm_cp);
293 }
295 class G1AdjustPointersClosure: public HeapRegionClosure {
296 public:
297 bool doHeapRegion(HeapRegion* r) {
298 if (r->isHumongous()) {
299 if (r->startsHumongous()) {
300 // We must adjust the pointers on the single H object.
301 oop obj = oop(r->bottom());
302 debug_only(GenMarkSweep::track_interior_pointers(obj));
303 // point all the oops to the new location
304 obj->adjust_pointers();
305 debug_only(GenMarkSweep::check_interior_pointers());
306 }
307 } else {
308 // This really ought to be "as_CompactibleSpace"...
309 r->adjust_pointers();
310 }
311 return false;
312 }
313 };
315 void G1MarkSweep::mark_sweep_phase3() {
316 G1CollectedHeap* g1h = G1CollectedHeap::heap();
317 Generation* pg = g1h->perm_gen();
319 // Adjust the pointers to reflect the new locations
320 EventMark m("3 adjust pointers");
321 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
322 GenMarkSweep::trace("3");
324 SharedHeap* sh = SharedHeap::heap();
326 sh->process_strong_roots(true, // activate StrongRootsScope
327 true, // Collecting permanent generation.
328 SharedHeap::SO_AllClasses,
329 &GenMarkSweep::adjust_root_pointer_closure,
330 NULL, // do not touch code cache here
331 &GenMarkSweep::adjust_pointer_closure);
333 g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
335 // Now adjust pointers in remaining weak roots. (All of which should
336 // have been cleared if they pointed to non-surviving objects.)
337 g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
338 &GenMarkSweep::adjust_pointer_closure);
340 GenMarkSweep::adjust_marks();
342 G1AdjustPointersClosure blk;
343 g1h->heap_region_iterate(&blk);
344 pg->adjust_pointers();
345 }
347 class G1SpaceCompactClosure: public HeapRegionClosure {
348 public:
349 G1SpaceCompactClosure() {}
351 bool doHeapRegion(HeapRegion* hr) {
352 if (hr->isHumongous()) {
353 if (hr->startsHumongous()) {
354 oop obj = oop(hr->bottom());
355 if (obj->is_gc_marked()) {
356 obj->init_mark();
357 } else {
358 assert(hr->is_empty(), "Should have been cleared in phase 2.");
359 }
360 hr->reset_during_compaction();
361 }
362 } else {
363 hr->compact();
364 }
365 return false;
366 }
367 };
369 void G1MarkSweep::mark_sweep_phase4() {
370 // All pointers are now adjusted, move objects accordingly
372 // It is imperative that we traverse perm_gen first in phase4. All
373 // classes must be allocated earlier than their instances, and traversing
374 // perm_gen first makes sure that all klassOops have moved to their new
375 // location before any instance does a dispatch through it's klass!
377 // The ValidateMarkSweep live oops tracking expects us to traverse spaces
378 // in the same order in phase2, phase3 and phase4. We don't quite do that
379 // here (perm_gen first rather than last), so we tell the validate code
380 // to use a higher index (saved from phase2) when verifying perm_gen.
381 G1CollectedHeap* g1h = G1CollectedHeap::heap();
382 Generation* pg = g1h->perm_gen();
384 EventMark m("4 compact heap");
385 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
386 GenMarkSweep::trace("4");
388 pg->compact();
390 G1SpaceCompactClosure blk;
391 g1h->heap_region_iterate(&blk);
393 }
395 // Local Variables: ***
396 // c-indentation-style: gnu ***
397 // End: ***