Sat, 01 Sep 2012 13:25:18 -0400
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
28 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
29 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
30 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
31 #include "gc_implementation/shared/liveRange.hpp"
32 #include "gc_implementation/shared/markSweep.inline.hpp"
33 #include "gc_implementation/shared/spaceDecorator.hpp"
34 #include "oops/oop.inline.hpp"
36 PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL;
39 void PSMarkSweepDecorator::set_destination_decorator_tenured() {
40 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
41 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
43 _destination_decorator = heap->old_gen()->object_mark_sweep();
44 }
46 void PSMarkSweepDecorator::advance_destination_decorator() {
47 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
48 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
50 assert(_destination_decorator != NULL, "Sanity");
52 PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep();
53 PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep();
54 PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep();
55 PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep();
57 if ( _destination_decorator == first ) {
58 _destination_decorator = second;
59 } else if ( _destination_decorator == second ) {
60 _destination_decorator = third;
61 } else if ( _destination_decorator == third ) {
62 _destination_decorator = fourth;
63 } else {
64 fatal("PSMarkSweep attempting to advance past last compaction area");
65 }
66 }
68 PSMarkSweepDecorator* PSMarkSweepDecorator::destination_decorator() {
69 assert(_destination_decorator != NULL, "Sanity");
71 return _destination_decorator;
72 }
74 // FIX ME FIX ME FIX ME FIX ME!!!!!!!!!
75 // The object forwarding code is duplicated. Factor this out!!!!!
76 //
77 // This method "precompacts" objects inside its space to dest. It places forwarding
78 // pointers into markOops for use by adjust_pointers. If "dest" should overflow, we
79 // finish by compacting into our own space.
81 void PSMarkSweepDecorator::precompact() {
82 // Reset our own compact top.
83 set_compaction_top(space()->bottom());
85 /* We allow some amount of garbage towards the bottom of the space, so
86 * we don't start compacting before there is a significant gain to be made.
87 * Occasionally, we want to ensure a full compaction, which is determined
88 * by the MarkSweepAlwaysCompactCount parameter. This is a significant
89 * performance improvement!
90 */
91 bool skip_dead = (MarkSweepAlwaysCompactCount < 1)
92 || ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
94 size_t allowed_deadspace = 0;
95 if (skip_dead) {
96 const size_t ratio = allowed_dead_ratio();
97 allowed_deadspace = space()->capacity_in_words() * ratio / 100;
98 }
100 // Fetch the current destination decorator
101 PSMarkSweepDecorator* dest = destination_decorator();
102 ObjectStartArray* start_array = dest->start_array();
104 HeapWord* compact_top = dest->compaction_top();
105 HeapWord* compact_end = dest->space()->end();
107 HeapWord* q = space()->bottom();
108 HeapWord* t = space()->top();
110 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last
111 live object. */
112 HeapWord* first_dead = space()->end(); /* The first dead object. */
113 LiveRange* liveRange = NULL; /* The current live range, recorded in the
114 first header of preceding free area. */
115 _first_dead = first_dead;
117 const intx interval = PrefetchScanIntervalInBytes;
119 while (q < t) {
120 assert(oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
121 oop(q)->mark()->has_bias_pattern(),
122 "these are the only valid states during a mark sweep");
123 if (oop(q)->is_gc_marked()) {
124 /* prefetch beyond q */
125 Prefetch::write(q, interval);
126 size_t size = oop(q)->size();
128 size_t compaction_max_size = pointer_delta(compact_end, compact_top);
130 // This should only happen if a space in the young gen overflows the
131 // old gen. If that should happen, we null out the start_array, because
132 // the young spaces are not covered by one.
133 while(size > compaction_max_size) {
134 // First record the last compact_top
135 dest->set_compaction_top(compact_top);
137 // Advance to the next compaction decorator
138 advance_destination_decorator();
139 dest = destination_decorator();
141 // Update compaction info
142 start_array = dest->start_array();
143 compact_top = dest->compaction_top();
144 compact_end = dest->space()->end();
145 assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
146 assert(compact_end > compact_top, "Must always be space remaining");
147 compaction_max_size =
148 pointer_delta(compact_end, compact_top);
149 }
151 // store the forwarding pointer into the mark word
152 if (q != compact_top) {
153 oop(q)->forward_to(oop(compact_top));
154 assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
155 } else {
156 // if the object isn't moving we can just set the mark to the default
157 // mark and handle it specially later on.
158 oop(q)->init_mark();
159 assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
160 }
162 // Update object start array
163 if (start_array) {
164 start_array->allocate_block(compact_top);
165 }
167 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
168 compact_top += size;
169 assert(compact_top <= dest->space()->end(),
170 "Exceeding space in destination");
172 q += size;
173 end_of_live = q;
174 } else {
175 /* run over all the contiguous dead objects */
176 HeapWord* end = q;
177 do {
178 /* prefetch beyond end */
179 Prefetch::write(end, interval);
180 end += oop(end)->size();
181 } while (end < t && (!oop(end)->is_gc_marked()));
183 /* see if we might want to pretend this object is alive so that
184 * we don't have to compact quite as often.
185 */
186 if (allowed_deadspace > 0 && q == compact_top) {
187 size_t sz = pointer_delta(end, q);
188 if (insert_deadspace(allowed_deadspace, q, sz)) {
189 size_t compaction_max_size = pointer_delta(compact_end, compact_top);
191 // This should only happen if a space in the young gen overflows the
192 // old gen. If that should happen, we null out the start_array, because
193 // the young spaces are not covered by one.
194 while (sz > compaction_max_size) {
195 // First record the last compact_top
196 dest->set_compaction_top(compact_top);
198 // Advance to the next compaction decorator
199 advance_destination_decorator();
200 dest = destination_decorator();
202 // Update compaction info
203 start_array = dest->start_array();
204 compact_top = dest->compaction_top();
205 compact_end = dest->space()->end();
206 assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
207 assert(compact_end > compact_top, "Must always be space remaining");
208 compaction_max_size =
209 pointer_delta(compact_end, compact_top);
210 }
212 // store the forwarding pointer into the mark word
213 if (q != compact_top) {
214 oop(q)->forward_to(oop(compact_top));
215 assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
216 } else {
217 // if the object isn't moving we can just set the mark to the default
218 // mark and handle it specially later on.
219 oop(q)->init_mark();
220 assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
221 }
223 // Update object start array
224 if (start_array) {
225 start_array->allocate_block(compact_top);
226 }
228 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
229 compact_top += sz;
230 assert(compact_top <= dest->space()->end(),
231 "Exceeding space in destination");
233 q = end;
234 end_of_live = end;
235 continue;
236 }
237 }
239 /* for the previous LiveRange, record the end of the live objects. */
240 if (liveRange) {
241 liveRange->set_end(q);
242 }
244 /* record the current LiveRange object.
245 * liveRange->start() is overlaid on the mark word.
246 */
247 liveRange = (LiveRange*)q;
248 liveRange->set_start(end);
249 liveRange->set_end(end);
251 /* see if this is the first dead region. */
252 if (q < first_dead) {
253 first_dead = q;
254 }
256 /* move on to the next object */
257 q = end;
258 }
259 }
261 assert(q == t, "just checking");
262 if (liveRange != NULL) {
263 liveRange->set_end(q);
264 }
265 _end_of_live = end_of_live;
266 if (end_of_live < first_dead) {
267 first_dead = end_of_live;
268 }
269 _first_dead = first_dead;
271 // Update compaction top
272 dest->set_compaction_top(compact_top);
273 }
275 bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
276 HeapWord* q, size_t deadlength) {
277 if (allowed_deadspace_words >= deadlength) {
278 allowed_deadspace_words -= deadlength;
279 CollectedHeap::fill_with_object(q, deadlength);
280 oop(q)->set_mark(oop(q)->mark()->set_marked());
281 assert((int) deadlength == oop(q)->size(), "bad filler object size");
282 // Recall that we required "q == compaction_top".
283 return true;
284 } else {
285 allowed_deadspace_words = 0;
286 return false;
287 }
288 }
290 void PSMarkSweepDecorator::adjust_pointers() {
291 // adjust all the interior pointers to point at the new locations of objects
292 // Used by MarkSweep::mark_sweep_phase3()
294 HeapWord* q = space()->bottom();
295 HeapWord* t = _end_of_live; // Established by "prepare_for_compaction".
297 assert(_first_dead <= _end_of_live, "Stands to reason, no?");
299 if (q < t && _first_dead > q &&
300 !oop(q)->is_gc_marked()) {
301 // we have a chunk of the space which hasn't moved and we've
302 // reinitialized the mark word during the previous pass, so we can't
303 // use is_gc_marked for the traversal.
304 HeapWord* end = _first_dead;
306 while (q < end) {
307 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
308 // point all the oops to the new location
309 size_t size = oop(q)->adjust_pointers();
310 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
311 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
312 q += size;
313 }
315 if (_first_dead == t) {
316 q = t;
317 } else {
318 // $$$ This is funky. Using this to read the previously written
319 // LiveRange. See also use below.
320 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();
321 }
322 }
323 const intx interval = PrefetchScanIntervalInBytes;
325 debug_only(HeapWord* prev_q = NULL);
326 while (q < t) {
327 // prefetch beyond q
328 Prefetch::write(q, interval);
329 if (oop(q)->is_gc_marked()) {
330 // q is alive
331 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
332 // point all the oops to the new location
333 size_t size = oop(q)->adjust_pointers();
334 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
335 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
336 debug_only(prev_q = q);
337 q += size;
338 } else {
339 // q is not a live object, so its mark should point at the next
340 // live object
341 debug_only(prev_q = q);
342 q = (HeapWord*) oop(q)->mark()->decode_pointer();
343 assert(q > prev_q, "we should be moving forward through memory");
344 }
345 }
347 assert(q == t, "just checking");
348 }
350 void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
351 // Copy all live objects to their new location
352 // Used by MarkSweep::mark_sweep_phase4()
354 HeapWord* q = space()->bottom();
355 HeapWord* const t = _end_of_live;
356 debug_only(HeapWord* prev_q = NULL);
358 if (q < t && _first_dead > q &&
359 !oop(q)->is_gc_marked()) {
360 #ifdef ASSERT
361 // we have a chunk of the space which hasn't moved and we've reinitialized the
362 // mark word during the previous pass, so we can't use is_gc_marked for the
363 // traversal.
364 HeapWord* const end = _first_dead;
366 while (q < end) {
367 size_t size = oop(q)->size();
368 assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
369 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));
370 debug_only(prev_q = q);
371 q += size;
372 }
373 #endif
375 if (_first_dead == t) {
376 q = t;
377 } else {
378 // $$$ Funky
379 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();
380 }
381 }
383 const intx scan_interval = PrefetchScanIntervalInBytes;
384 const intx copy_interval = PrefetchCopyIntervalInBytes;
386 while (q < t) {
387 if (!oop(q)->is_gc_marked()) {
388 // mark is pointer to next marked oop
389 debug_only(prev_q = q);
390 q = (HeapWord*) oop(q)->mark()->decode_pointer();
391 assert(q > prev_q, "we should be moving forward through memory");
392 } else {
393 // prefetch beyond q
394 Prefetch::read(q, scan_interval);
396 // size and destination
397 size_t size = oop(q)->size();
398 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
400 // prefetch beyond compaction_top
401 Prefetch::write(compaction_top, copy_interval);
403 // copy object and reinit its mark
404 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, compaction_top));
405 assert(q != compaction_top, "everything in this pass should be moving");
406 Copy::aligned_conjoint_words(q, compaction_top, size);
407 oop(compaction_top)->init_mark();
408 assert(oop(compaction_top)->klass() != NULL, "should have a class");
410 debug_only(prev_q = q);
411 q += size;
412 }
413 }
415 assert(compaction_top() >= space()->bottom() && compaction_top() <= space()->end(),
416 "should point inside space");
417 space()->set_top(compaction_top());
419 if (mangle_free_space) {
420 space()->mangle_unused_area();
421 }
422 }