Thu, 24 Mar 2011 15:47:01 -0700
7029036: Card-table verification hangs with all framework collectors, except G1, even before the first GC
Summary: When verifying clean card ranges, use memory-range-bounded iteration over oops of objects overlapping that range, thus avoiding the otherwise quadratic worst-case cost of scanning large object arrays.
Reviewed-by: jmasa, jwilhelm, tonyp
1 /*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "memory/compactingPermGenGen.hpp"
29 #include "memory/filemap.hpp"
30 #include "memory/genOopClosures.inline.hpp"
31 #include "memory/generation.inline.hpp"
32 #include "memory/generationSpec.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "runtime/java.hpp"
35 #ifndef SERIALGC
36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
37 #endif
40 // An ObjectClosure helper: Recursively adjust all pointers in an object
41 // and all objects by referenced it. Clear marks on objects in order to
42 // prevent visiting any object twice. This helper is used when the
43 // RedefineClasses() API has been called.
45 class AdjustSharedObjectClosure : public ObjectClosure {
46 public:
47 void do_object(oop obj) {
48 if (obj->is_shared_readwrite()) {
49 if (obj->mark()->is_marked()) {
50 obj->init_mark(); // Don't revisit this object.
51 obj->adjust_pointers(); // Adjust this object's references.
52 }
53 }
54 }
55 };
58 // An OopClosure helper: Recursively adjust all pointers in an object
59 // and all objects by referenced it. Clear marks on objects in order
60 // to prevent visiting any object twice.
62 class RecursiveAdjustSharedObjectClosure : public OopClosure {
63 protected:
64 template <class T> inline void do_oop_work(T* p) {
65 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
66 if (obj->is_shared_readwrite()) {
67 if (obj->mark()->is_marked()) {
68 obj->init_mark(); // Don't revisit this object.
69 obj->oop_iterate(this); // Recurse - adjust objects referenced.
70 obj->adjust_pointers(); // Adjust this object's references.
72 // Special case: if a class has a read-only constant pool,
73 // then the read-write objects referenced by the pool must
74 // have their marks reset.
76 if (obj->klass() == Universe::instanceKlassKlassObj()) {
77 instanceKlass* ik = instanceKlass::cast((klassOop)obj);
78 constantPoolOop cp = ik->constants();
79 if (cp->is_shared_readonly()) {
80 cp->oop_iterate(this);
81 }
82 }
83 }
84 }
85 }
86 public:
87 virtual void do_oop(oop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
88 virtual void do_oop(narrowOop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
89 };
92 // We need to go through all placeholders in the system dictionary and
93 // try to resolve them into shared classes. Other threads might be in
94 // the process of loading a shared class and have strong roots on
95 // their stack to the class without having added the class to the
96 // dictionary yet. This means the class will be marked during phase 1
97 // but will not be unmarked during the application of the
98 // RecursiveAdjustSharedObjectClosure to the SystemDictionary.
99 class TraversePlaceholdersClosure {
100 public:
101 static void placeholders_do(Symbol* sym, oop loader) {
102 if (CompactingPermGenGen::is_shared(sym)) {
103 oop k = SystemDictionary::find_shared_class(sym);
104 if (k != NULL) {
105 RecursiveAdjustSharedObjectClosure clo;
106 clo.do_oop(&k);
107 }
108 }
109 }
110 };
112 void CompactingPermGenGen::initialize_performance_counters() {
114 const char* gen_name = "perm";
116 // Generation Counters - generation 2, 1 subspace
117 _gen_counters = new GenerationCounters(gen_name, 2, 1, &_virtual_space);
119 _space_counters = new CSpaceCounters(gen_name, 0,
120 _virtual_space.reserved_size(),
121 _the_space, _gen_counters);
122 }
124 void CompactingPermGenGen::update_counters() {
125 if (UsePerfData) {
126 _space_counters->update_all();
127 _gen_counters->update_all();
128 }
129 }
132 CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs,
133 ReservedSpace shared_rs,
134 size_t initial_byte_size,
135 int level, GenRemSet* remset,
136 ContiguousSpace* space,
137 PermanentGenerationSpec* spec_) :
138 OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion,
139 level, remset, space) {
141 set_spec(spec_);
142 if (!UseSharedSpaces && !DumpSharedSpaces) {
143 spec()->disable_sharing();
144 }
146 // Break virtual space into address ranges for all spaces.
148 if (spec()->enable_shared_spaces()) {
149 shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size());
150 misccode_end = shared_end;
151 misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size());
152 miscdata_end = misccode_bottom;
153 miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size());
154 readwrite_end = miscdata_bottom;
155 readwrite_bottom =
156 readwrite_end - heap_word_size(spec()->read_write_size());
157 readonly_end = readwrite_bottom;
158 readonly_bottom =
159 readonly_end - heap_word_size(spec()->read_only_size());
160 shared_bottom = readonly_bottom;
161 unshared_end = shared_bottom;
162 assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch");
163 } else {
164 shared_end = (HeapWord*)(rs.base() + rs.size());
165 misccode_end = shared_end;
166 misccode_bottom = shared_end;
167 miscdata_end = shared_end;
168 miscdata_bottom = shared_end;
169 readwrite_end = shared_end;
170 readwrite_bottom = shared_end;
171 readonly_end = shared_end;
172 readonly_bottom = shared_end;
173 shared_bottom = shared_end;
174 unshared_end = shared_bottom;
175 }
176 unshared_bottom = (HeapWord*) rs.base();
178 // Verify shared and unshared spaces adjacent.
179 assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch");
180 assert(unshared_end > unshared_bottom, "shared space mismatch");
182 // Split reserved memory into pieces.
184 ReservedSpace ro_rs = shared_rs.first_part(spec()->read_only_size(),
185 UseSharedSpaces);
186 ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size());
187 ReservedSpace rw_rs = tmp_rs1.first_part(spec()->read_write_size(),
188 UseSharedSpaces);
189 ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size());
190 ReservedSpace md_rs = tmp_rs2.first_part(spec()->misc_data_size(),
191 UseSharedSpaces);
192 ReservedSpace mc_rs = tmp_rs2.last_part(spec()->misc_data_size());
194 _shared_space_size = spec()->read_only_size()
195 + spec()->read_write_size()
196 + spec()->misc_data_size()
197 + spec()->misc_code_size();
199 // Allocate the unshared (default) space.
200 _the_space = new ContigPermSpace(_bts,
201 MemRegion(unshared_bottom, heap_word_size(initial_byte_size)));
202 if (_the_space == NULL)
203 vm_exit_during_initialization("Could not allocate an unshared"
204 " CompactingPermGen Space");
206 // Allocate shared spaces
207 if (spec()->enable_shared_spaces()) {
209 // If mapping a shared file, the space is not committed, don't
210 // mangle.
211 NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;)
212 NOT_PRODUCT(if (UseSharedSpaces) ZapUnusedHeapArea = false;)
214 // Commit the memory behind the shared spaces if dumping (not
215 // mapping).
216 if (DumpSharedSpaces) {
217 _ro_vs.initialize(ro_rs, spec()->read_only_size());
218 _rw_vs.initialize(rw_rs, spec()->read_write_size());
219 _md_vs.initialize(md_rs, spec()->misc_data_size());
220 _mc_vs.initialize(mc_rs, spec()->misc_code_size());
221 }
223 // Allocate the shared spaces.
224 _ro_bts = new BlockOffsetSharedArray(
225 MemRegion(readonly_bottom,
226 heap_word_size(spec()->read_only_size())),
227 heap_word_size(spec()->read_only_size()));
228 _ro_space = new OffsetTableContigSpace(_ro_bts,
229 MemRegion(readonly_bottom, readonly_end));
230 _rw_bts = new BlockOffsetSharedArray(
231 MemRegion(readwrite_bottom,
232 heap_word_size(spec()->read_write_size())),
233 heap_word_size(spec()->read_write_size()));
234 _rw_space = new OffsetTableContigSpace(_rw_bts,
235 MemRegion(readwrite_bottom, readwrite_end));
237 // Restore mangling flag.
238 NOT_PRODUCT(ZapUnusedHeapArea = old_ZapUnusedHeapArea;)
240 if (_ro_space == NULL || _rw_space == NULL)
241 vm_exit_during_initialization("Could not allocate a shared space");
243 // Cover both shared spaces entirely with cards.
244 _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
246 if (UseSharedSpaces) {
248 // Map in the regions in the shared file.
249 FileMapInfo* mapinfo = FileMapInfo::current_info();
250 size_t image_alignment = mapinfo->alignment();
251 CollectedHeap* ch = Universe::heap();
252 if ((!mapinfo->map_space(ro, ro_rs, _ro_space)) ||
253 (!mapinfo->map_space(rw, rw_rs, _rw_space)) ||
254 (!mapinfo->map_space(md, md_rs, NULL)) ||
255 (!mapinfo->map_space(mc, mc_rs, NULL)) ||
256 // check the alignment constraints
257 (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap ||
258 image_alignment !=
259 ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) {
260 // Base addresses didn't match; skip sharing, but continue
261 shared_rs.release();
262 spec()->disable_sharing();
263 // If -Xshare:on is specified, print out the error message and exit VM,
264 // otherwise, set UseSharedSpaces to false and continue.
265 if (RequireSharedSpaces) {
266 vm_exit_during_initialization("Unable to use shared archive.", NULL);
267 } else {
268 FLAG_SET_DEFAULT(UseSharedSpaces, false);
269 }
271 // Note: freeing the block offset array objects does not
272 // currently free up the underlying storage.
273 delete _ro_bts;
274 _ro_bts = NULL;
275 delete _ro_space;
276 _ro_space = NULL;
277 delete _rw_bts;
278 _rw_bts = NULL;
279 delete _rw_space;
280 _rw_space = NULL;
281 shared_end = (HeapWord*)(rs.base() + rs.size());
282 _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom));
283 }
284 }
286 // Reserved region includes shared spaces for oop.is_in_reserved().
287 _reserved.set_end(shared_end);
289 } else {
290 _ro_space = NULL;
291 _rw_space = NULL;
292 }
293 }
296 // Do a complete scan of the shared read write space to catch all
297 // objects which contain references to any younger generation. Forward
298 // the pointers. Avoid space_iterate, as actually visiting all the
299 // objects in the space will page in more objects than we need.
300 // Instead, use the system dictionary as strong roots into the read
301 // write space.
302 //
303 // If a RedefineClasses() call has been made, then we have to iterate
304 // over the entire shared read-write space in order to find all the
305 // objects that need to be forwarded. For example, it is possible for
306 // an nmethod to be found and marked in GC phase-1 only for the nmethod
307 // to be freed by the time we reach GC phase-3. The underlying method
308 // is still marked, but we can't (easily) find it in GC phase-3 so we
309 // blow up in GC phase-4. With RedefineClasses() we want replaced code
310 // (EMCP or obsolete) to go away (i.e., be collectible) once it is no
311 // longer being executed by any thread so we keep minimal attachments
312 // to the replaced code. However, we can't guarantee when those EMCP
313 // or obsolete methods will be collected so they may still be out there
314 // even after we've severed our minimal attachments.
316 void CompactingPermGenGen::pre_adjust_pointers() {
317 if (spec()->enable_shared_spaces()) {
318 if (JvmtiExport::has_redefined_a_class()) {
319 // RedefineClasses() requires a brute force approach
320 AdjustSharedObjectClosure blk;
321 rw_space()->object_iterate(&blk);
322 } else {
323 RecursiveAdjustSharedObjectClosure blk;
324 Universe::oops_do(&blk);
325 StringTable::oops_do(&blk);
326 SystemDictionary::always_strong_classes_do(&blk);
327 SystemDictionary::placeholders_do(TraversePlaceholdersClosure::placeholders_do);
328 }
329 }
330 }
333 #ifdef ASSERT
334 class VerifyMarksClearedClosure : public ObjectClosure {
335 public:
336 void do_object(oop obj) {
337 assert(SharedSkipVerify || !obj->mark()->is_marked(),
338 "Shared oop still marked?");
339 }
340 };
341 #endif
344 void CompactingPermGenGen::post_compact() {
345 #ifdef ASSERT
346 if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
347 VerifyMarksClearedClosure blk;
348 rw_space()->object_iterate(&blk);
349 }
350 #endif
351 }
354 // Do not use in time-critical operations due to the possibility of paging
355 // in otherwise untouched or previously unread portions of the perm gen,
356 // for instance, the shared spaces. NOTE: Because CompactingPermGenGen
357 // derives from OneContigSpaceCardGeneration which is supposed to have a
358 // single space, and does not override its object_iterate() method,
359 // object iteration via that interface does not look at the objects in
360 // the shared spaces when using CDS. This should be fixed; see CR 6897798.
361 void CompactingPermGenGen::space_iterate(SpaceClosure* blk, bool usedOnly) {
362 OneContigSpaceCardGeneration::space_iterate(blk, usedOnly);
363 if (spec()->enable_shared_spaces()) {
364 // Making the rw_space walkable will page in the entire space, and
365 // is to be avoided in the case of time-critical operations.
366 // However, this is required for Verify and heap dump operations.
367 blk->do_space(ro_space());
368 blk->do_space(rw_space());
369 }
370 }
373 void CompactingPermGenGen::print_on(outputStream* st) const {
374 OneContigSpaceCardGeneration::print_on(st);
375 if (spec()->enable_shared_spaces()) {
376 st->print(" ro");
377 ro_space()->print_on(st);
378 st->print(" rw");
379 rw_space()->print_on(st);
380 } else {
381 st->print_cr("No shared spaces configured.");
382 }
383 }
386 // References from the perm gen to the younger generation objects may
387 // occur in static fields in Java classes or in constant pool references
388 // to String objects.
390 void CompactingPermGenGen::younger_refs_iterate(OopsInGenClosure* blk) {
391 OneContigSpaceCardGeneration::younger_refs_iterate(blk);
392 if (spec()->enable_shared_spaces()) {
393 blk->set_generation(this);
394 // ro_space has no younger gen refs.
395 _rs->younger_refs_in_space_iterate(rw_space(), blk);
396 blk->reset_generation();
397 }
398 }
401 // Shared spaces are addressed in pre_adjust_pointers.
402 void CompactingPermGenGen::adjust_pointers() {
403 the_space()->adjust_pointers();
404 }
407 void CompactingPermGenGen::compact() {
408 the_space()->compact();
409 }
412 size_t CompactingPermGenGen::contiguous_available() const {
413 // Don't include shared spaces.
414 return OneContigSpaceCardGeneration::contiguous_available()
415 - _shared_space_size;
416 }
418 size_t CompactingPermGenGen::max_capacity() const {
419 // Don't include shared spaces.
420 assert(UseSharedSpaces || (_shared_space_size == 0),
421 "If not used, the size of shared spaces should be 0");
422 return OneContigSpaceCardGeneration::max_capacity()
423 - _shared_space_size;
424 }
427 // No young generation references, clear this generation's main space's
428 // card table entries. Do NOT clear the card table entries for the
429 // read-only space (always clear) or the read-write space (valuable
430 // information).
432 void CompactingPermGenGen::clear_remembered_set() {
433 _rs->clear(MemRegion(the_space()->bottom(), the_space()->end()));
434 }
437 // Objects in this generation's main space may have moved, invalidate
438 // that space's cards. Do NOT invalidate the card table entries for the
439 // read-only or read-write spaces, as those objects never move.
441 void CompactingPermGenGen::invalidate_remembered_set() {
442 _rs->invalidate(used_region());
443 }
446 void CompactingPermGenGen::verify(bool allow_dirty) {
447 the_space()->verify(allow_dirty);
448 if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
449 ro_space()->verify(allow_dirty);
450 rw_space()->verify(allow_dirty);
451 }
452 }
455 HeapWord* CompactingPermGenGen::unshared_bottom;
456 HeapWord* CompactingPermGenGen::unshared_end;
457 HeapWord* CompactingPermGenGen::shared_bottom;
458 HeapWord* CompactingPermGenGen::shared_end;
459 HeapWord* CompactingPermGenGen::readonly_bottom;
460 HeapWord* CompactingPermGenGen::readonly_end;
461 HeapWord* CompactingPermGenGen::readwrite_bottom;
462 HeapWord* CompactingPermGenGen::readwrite_end;
463 HeapWord* CompactingPermGenGen::miscdata_bottom;
464 HeapWord* CompactingPermGenGen::miscdata_end;
465 HeapWord* CompactingPermGenGen::misccode_bottom;
466 HeapWord* CompactingPermGenGen::misccode_end;
468 // JVM/TI RedefineClasses() support:
469 bool CompactingPermGenGen::remap_shared_readonly_as_readwrite() {
470 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
472 if (UseSharedSpaces) {
473 // remap the shared readonly space to shared readwrite, private
474 FileMapInfo* mapinfo = FileMapInfo::current_info();
475 if (!mapinfo->remap_shared_readonly_as_readwrite()) {
476 return false;
477 }
478 }
479 return true;
480 }