Tue, 15 Sep 2009 21:53:47 -0700
6863023: need non-perm oops in code cache for JSR 292
Summary: Make a special root-list for those few nmethods which might contain non-perm oops.
Reviewed-by: twisti, kvn, never, jmasa, ysr
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_g1MarkSweep.cpp.incl"
28 class HeapRegion;
30 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
31 bool clear_all_softrefs) {
32 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
34 // hook up weak ref data so it can be used during Mark-Sweep
35 assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
36 assert(rp != NULL, "should be non-NULL");
37 GenMarkSweep::_ref_processor = rp;
38 rp->setup_policy(clear_all_softrefs);
40 // When collecting the permanent generation methodOops may be moving,
41 // so we either have to flush all bcp data or convert it into bci.
42 CodeCache::gc_prologue();
43 Threads::gc_prologue();
45 // Increment the invocation count for the permanent generation, since it is
46 // implicitly collected whenever we do a full mark sweep collection.
47 SharedHeap* sh = SharedHeap::heap();
48 sh->perm_gen()->stat_record()->invocations++;
50 bool marked_for_unloading = false;
52 allocate_stacks();
54 // We should save the marks of the currently locked biased monitors.
55 // The marking doesn't preserve the marks of biased objects.
56 BiasedLocking::preserve_marks();
58 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
60 if (VerifyDuringGC) {
61 G1CollectedHeap* g1h = G1CollectedHeap::heap();
62 g1h->checkConcurrentMark();
63 }
65 mark_sweep_phase2();
67 // Don't add any more derived pointers during phase3
68 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
70 mark_sweep_phase3();
72 mark_sweep_phase4();
74 GenMarkSweep::restore_marks();
75 BiasedLocking::restore_marks();
76 GenMarkSweep::deallocate_stacks();
78 // We must invalidate the perm-gen rs, so that it gets rebuilt.
79 GenRemSet* rs = sh->rem_set();
80 rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
82 // "free at last gc" is calculated from these.
83 // CHF: cheating for now!!!
84 // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
85 // Universe::set_heap_used_at_last_gc(Universe::heap()->used());
87 Threads::gc_epilogue();
88 CodeCache::gc_epilogue();
90 // refs processing: clean slate
91 GenMarkSweep::_ref_processor = NULL;
92 }
95 void G1MarkSweep::allocate_stacks() {
96 GenMarkSweep::_preserved_count_max = 0;
97 GenMarkSweep::_preserved_marks = NULL;
98 GenMarkSweep::_preserved_count = 0;
99 GenMarkSweep::_preserved_mark_stack = NULL;
100 GenMarkSweep::_preserved_oop_stack = NULL;
102 GenMarkSweep::_marking_stack =
103 new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
105 size_t size = SystemDictionary::number_of_classes() * 2;
106 GenMarkSweep::_revisit_klass_stack =
107 new (ResourceObj::C_HEAP) GrowableArray<Klass*>((int)size, true);
108 }
110 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
111 bool clear_all_softrefs) {
112 // Recursively traverse all live objects and mark them
113 EventMark m("1 mark object");
114 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
115 GenMarkSweep::trace(" 1");
117 SharedHeap* sh = SharedHeap::heap();
119 sh->process_strong_roots(true, // activeate StrongRootsScope
120 true, // Collecting permanent generation.
121 SharedHeap::SO_SystemClasses,
122 &GenMarkSweep::follow_root_closure,
123 &GenMarkSweep::follow_code_root_closure,
124 &GenMarkSweep::follow_root_closure);
126 // Process reference objects found during marking
127 ReferenceProcessor* rp = GenMarkSweep::ref_processor();
128 rp->setup_policy(clear_all_softrefs);
129 rp->process_discovered_references(&GenMarkSweep::is_alive,
130 &GenMarkSweep::keep_alive,
131 &GenMarkSweep::follow_stack_closure,
132 NULL);
134 // Follow system dictionary roots and unload classes
135 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
136 assert(GenMarkSweep::_marking_stack->is_empty(),
137 "stack should be empty by now");
139 // Follow code cache roots (has to be done after system dictionary,
140 // assumes all live klasses are marked)
141 CodeCache::do_unloading(&GenMarkSweep::is_alive,
142 &GenMarkSweep::keep_alive,
143 purged_class);
144 GenMarkSweep::follow_stack();
146 // Update subklass/sibling/implementor links of live klasses
147 GenMarkSweep::follow_weak_klass_links();
148 assert(GenMarkSweep::_marking_stack->is_empty(),
149 "stack should be empty by now");
151 // Visit symbol and interned string tables and delete unmarked oops
152 SymbolTable::unlink(&GenMarkSweep::is_alive);
153 StringTable::unlink(&GenMarkSweep::is_alive);
155 assert(GenMarkSweep::_marking_stack->is_empty(),
156 "stack should be empty by now");
157 }
159 class G1PrepareCompactClosure: public HeapRegionClosure {
160 ModRefBarrierSet* _mrbs;
161 CompactPoint _cp;
163 void free_humongous_region(HeapRegion* hr) {
164 HeapWord* bot = hr->bottom();
165 HeapWord* end = hr->end();
166 assert(hr->startsHumongous(),
167 "Only the start of a humongous region should be freed.");
168 G1CollectedHeap::heap()->free_region(hr);
169 hr->prepare_for_compaction(&_cp);
170 // Also clear the part of the card table that will be unused after
171 // compaction.
172 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
173 }
175 public:
176 G1PrepareCompactClosure(CompactibleSpace* cs) :
177 _cp(NULL, cs, cs->initialize_threshold()),
178 _mrbs(G1CollectedHeap::heap()->mr_bs())
179 {}
180 bool doHeapRegion(HeapRegion* hr) {
181 if (hr->isHumongous()) {
182 if (hr->startsHumongous()) {
183 oop obj = oop(hr->bottom());
184 if (obj->is_gc_marked()) {
185 obj->forward_to(obj);
186 } else {
187 free_humongous_region(hr);
188 }
189 } else {
190 assert(hr->continuesHumongous(), "Invalid humongous.");
191 }
192 } else {
193 hr->prepare_for_compaction(&_cp);
194 // Also clear the part of the card table that will be unused after
195 // compaction.
196 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
197 }
198 return false;
199 }
200 };
202 // Finds the first HeapRegion.
203 class FindFirstRegionClosure: public HeapRegionClosure {
204 HeapRegion* _a_region;
205 public:
206 FindFirstRegionClosure() : _a_region(NULL) {}
207 bool doHeapRegion(HeapRegion* r) {
208 _a_region = r;
209 return true;
210 }
211 HeapRegion* result() { return _a_region; }
212 };
214 void G1MarkSweep::mark_sweep_phase2() {
215 // Now all live objects are marked, compute the new object addresses.
217 // It is imperative that we traverse perm_gen LAST. If dead space is
218 // allowed a range of dead object may get overwritten by a dead int
219 // array. If perm_gen is not traversed last a klassOop may get
220 // overwritten. This is fine since it is dead, but if the class has dead
221 // instances we have to skip them, and in order to find their size we
222 // need the klassOop!
223 //
224 // It is not required that we traverse spaces in the same order in
225 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
226 // tracking expects us to do so. See comment under phase4.
228 G1CollectedHeap* g1h = G1CollectedHeap::heap();
229 Generation* pg = g1h->perm_gen();
231 EventMark m("2 compute new addresses");
232 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
233 GenMarkSweep::trace("2");
235 FindFirstRegionClosure cl;
236 g1h->heap_region_iterate(&cl);
237 HeapRegion *r = cl.result();
238 CompactibleSpace* sp = r;
239 if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
240 sp = r->next_compaction_space();
241 }
243 G1PrepareCompactClosure blk(sp);
244 g1h->heap_region_iterate(&blk);
246 CompactPoint perm_cp(pg, NULL, NULL);
247 pg->prepare_for_compaction(&perm_cp);
248 }
250 class G1AdjustPointersClosure: public HeapRegionClosure {
251 public:
252 bool doHeapRegion(HeapRegion* r) {
253 if (r->isHumongous()) {
254 if (r->startsHumongous()) {
255 // We must adjust the pointers on the single H object.
256 oop obj = oop(r->bottom());
257 debug_only(GenMarkSweep::track_interior_pointers(obj));
258 // point all the oops to the new location
259 obj->adjust_pointers();
260 debug_only(GenMarkSweep::check_interior_pointers());
261 }
262 } else {
263 // This really ought to be "as_CompactibleSpace"...
264 r->adjust_pointers();
265 }
266 return false;
267 }
268 };
270 void G1MarkSweep::mark_sweep_phase3() {
271 G1CollectedHeap* g1h = G1CollectedHeap::heap();
272 Generation* pg = g1h->perm_gen();
274 // Adjust the pointers to reflect the new locations
275 EventMark m("3 adjust pointers");
276 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
277 GenMarkSweep::trace("3");
279 SharedHeap* sh = SharedHeap::heap();
281 sh->process_strong_roots(true, // activate StrongRootsScope
282 true, // Collecting permanent generation.
283 SharedHeap::SO_AllClasses,
284 &GenMarkSweep::adjust_root_pointer_closure,
285 NULL, // do not touch code cache here
286 &GenMarkSweep::adjust_pointer_closure);
288 g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
290 // Now adjust pointers in remaining weak roots. (All of which should
291 // have been cleared if they pointed to non-surviving objects.)
292 g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
293 &GenMarkSweep::adjust_pointer_closure);
295 GenMarkSweep::adjust_marks();
297 G1AdjustPointersClosure blk;
298 g1h->heap_region_iterate(&blk);
299 pg->adjust_pointers();
300 }
302 class G1SpaceCompactClosure: public HeapRegionClosure {
303 public:
304 G1SpaceCompactClosure() {}
306 bool doHeapRegion(HeapRegion* hr) {
307 if (hr->isHumongous()) {
308 if (hr->startsHumongous()) {
309 oop obj = oop(hr->bottom());
310 if (obj->is_gc_marked()) {
311 obj->init_mark();
312 } else {
313 assert(hr->is_empty(), "Should have been cleared in phase 2.");
314 }
315 hr->reset_during_compaction();
316 }
317 } else {
318 hr->compact();
319 }
320 return false;
321 }
322 };
324 void G1MarkSweep::mark_sweep_phase4() {
325 // All pointers are now adjusted, move objects accordingly
327 // It is imperative that we traverse perm_gen first in phase4. All
328 // classes must be allocated earlier than their instances, and traversing
329 // perm_gen first makes sure that all klassOops have moved to their new
330 // location before any instance does a dispatch through it's klass!
332 // The ValidateMarkSweep live oops tracking expects us to traverse spaces
333 // in the same order in phase2, phase3 and phase4. We don't quite do that
334 // here (perm_gen first rather than last), so we tell the validate code
335 // to use a higher index (saved from phase2) when verifying perm_gen.
336 G1CollectedHeap* g1h = G1CollectedHeap::heap();
337 Generation* pg = g1h->perm_gen();
339 EventMark m("4 compact heap");
340 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
341 GenMarkSweep::trace("4");
343 pg->compact();
345 G1SpaceCompactClosure blk;
346 g1h->heap_region_iterate(&blk);
348 }
350 // Local Variables: ***
351 // c-indentation-style: gnu ***
352 // End: ***