duke@435: /* stefank@2314: * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "classfile/symbolTable.hpp" stefank@2314: #include "classfile/systemDictionary.hpp" stefank@2314: #include "memory/compactingPermGenGen.hpp" stefank@2314: #include "memory/filemap.hpp" stefank@2314: #include "memory/genOopClosures.inline.hpp" stefank@2314: #include "memory/generation.inline.hpp" stefank@2314: #include "memory/generationSpec.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "runtime/java.hpp" stefank@2314: #ifndef SERIALGC stefank@2314: #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" stefank@2314: #endif duke@435: duke@435: dcubed@482: // An ObjectClosure helper: Recursively adjust all pointers in an object dcubed@482: // and all objects by referenced it. Clear marks on objects in order to dcubed@482: // prevent visiting any object twice. This helper is used when the dcubed@482: // RedefineClasses() API has been called. dcubed@482: dcubed@482: class AdjustSharedObjectClosure : public ObjectClosure { dcubed@482: public: dcubed@482: void do_object(oop obj) { dcubed@482: if (obj->is_shared_readwrite()) { dcubed@482: if (obj->mark()->is_marked()) { dcubed@482: obj->init_mark(); // Don't revisit this object. dcubed@482: obj->adjust_pointers(); // Adjust this object's references. dcubed@482: } dcubed@482: } dcubed@482: } dcubed@482: }; dcubed@482: dcubed@482: dcubed@482: // An OopClosure helper: Recursively adjust all pointers in an object dcubed@482: // and all objects by referenced it. Clear marks on objects in order dcubed@482: // to prevent visiting any object twice. duke@435: duke@435: class RecursiveAdjustSharedObjectClosure : public OopClosure { coleenp@548: protected: coleenp@548: template inline void do_oop_work(T* p) { coleenp@548: oop obj = oopDesc::load_decode_heap_oop_not_null(p); duke@435: if (obj->is_shared_readwrite()) { duke@435: if (obj->mark()->is_marked()) { duke@435: obj->init_mark(); // Don't revisit this object. duke@435: obj->oop_iterate(this); // Recurse - adjust objects referenced. duke@435: obj->adjust_pointers(); // Adjust this object's references. duke@435: duke@435: // Special case: if a class has a read-only constant pool, duke@435: // then the read-write objects referenced by the pool must duke@435: // have their marks reset. duke@435: duke@435: if (obj->klass() == Universe::instanceKlassKlassObj()) { duke@435: instanceKlass* ik = instanceKlass::cast((klassOop)obj); duke@435: constantPoolOop cp = ik->constants(); duke@435: if (cp->is_shared_readonly()) { duke@435: cp->oop_iterate(this); duke@435: } duke@435: } duke@435: } duke@435: } coleenp@548: } coleenp@548: public: coleenp@548: virtual void do_oop(oop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); } coleenp@548: virtual void do_oop(narrowOop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); } duke@435: }; duke@435: duke@435: duke@435: // We need to go through all placeholders in the system dictionary and duke@435: // try to resolve them into shared classes. Other threads might be in duke@435: // the process of loading a shared class and have strong roots on duke@435: // their stack to the class without having added the class to the duke@435: // dictionary yet. This means the class will be marked during phase 1 duke@435: // but will not be unmarked during the application of the coleenp@2497: // RecursiveAdjustSharedObjectClosure to the SystemDictionary. coleenp@2497: class TraversePlaceholdersClosure { coleenp@2497: public: coleenp@2497: static void placeholders_do(Symbol* sym, oop loader) { coleenp@2497: if (CompactingPermGenGen::is_shared(sym)) { duke@435: oop k = SystemDictionary::find_shared_class(sym); duke@435: if (k != NULL) { duke@435: RecursiveAdjustSharedObjectClosure clo; duke@435: clo.do_oop(&k); duke@435: } duke@435: } duke@435: } duke@435: }; duke@435: duke@435: void CompactingPermGenGen::initialize_performance_counters() { duke@435: duke@435: const char* gen_name = "perm"; duke@435: duke@435: // Generation Counters - generation 2, 1 subspace duke@435: _gen_counters = new GenerationCounters(gen_name, 2, 1, &_virtual_space); duke@435: duke@435: _space_counters = new CSpaceCounters(gen_name, 0, duke@435: _virtual_space.reserved_size(), duke@435: _the_space, _gen_counters); duke@435: } duke@435: duke@435: void CompactingPermGenGen::update_counters() { duke@435: if (UsePerfData) { duke@435: _space_counters->update_all(); duke@435: _gen_counters->update_all(); duke@435: } duke@435: } duke@435: duke@435: duke@435: CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs, duke@435: ReservedSpace shared_rs, duke@435: size_t initial_byte_size, duke@435: int level, GenRemSet* remset, duke@435: ContiguousSpace* space, duke@435: PermanentGenerationSpec* spec_) : duke@435: OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion, duke@435: level, remset, space) { duke@435: duke@435: set_spec(spec_); duke@435: if (!UseSharedSpaces && !DumpSharedSpaces) { duke@435: spec()->disable_sharing(); duke@435: } duke@435: duke@435: // Break virtual space into address ranges for all spaces. duke@435: duke@435: if (spec()->enable_shared_spaces()) { duke@435: shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size()); duke@435: misccode_end = shared_end; duke@435: misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size()); duke@435: miscdata_end = misccode_bottom; duke@435: miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size()); duke@435: readwrite_end = miscdata_bottom; duke@435: readwrite_bottom = duke@435: readwrite_end - heap_word_size(spec()->read_write_size()); duke@435: readonly_end = readwrite_bottom; duke@435: readonly_bottom = duke@435: readonly_end - heap_word_size(spec()->read_only_size()); duke@435: shared_bottom = readonly_bottom; duke@435: unshared_end = shared_bottom; duke@435: assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch"); duke@435: } else { duke@435: shared_end = (HeapWord*)(rs.base() + rs.size()); duke@435: misccode_end = shared_end; duke@435: misccode_bottom = shared_end; duke@435: miscdata_end = shared_end; duke@435: miscdata_bottom = shared_end; duke@435: readwrite_end = shared_end; duke@435: readwrite_bottom = shared_end; duke@435: readonly_end = shared_end; duke@435: readonly_bottom = shared_end; duke@435: shared_bottom = shared_end; duke@435: unshared_end = shared_bottom; duke@435: } duke@435: unshared_bottom = (HeapWord*) rs.base(); duke@435: duke@435: // Verify shared and unshared spaces adjacent. duke@435: assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch"); duke@435: assert(unshared_end > unshared_bottom, "shared space mismatch"); duke@435: duke@435: // Split reserved memory into pieces. duke@435: duke@435: ReservedSpace ro_rs = shared_rs.first_part(spec()->read_only_size(), duke@435: UseSharedSpaces); duke@435: ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size()); duke@435: ReservedSpace rw_rs = tmp_rs1.first_part(spec()->read_write_size(), duke@435: UseSharedSpaces); duke@435: ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size()); duke@435: ReservedSpace md_rs = tmp_rs2.first_part(spec()->misc_data_size(), duke@435: UseSharedSpaces); duke@435: ReservedSpace mc_rs = tmp_rs2.last_part(spec()->misc_data_size()); duke@435: duke@435: _shared_space_size = spec()->read_only_size() duke@435: + spec()->read_write_size() duke@435: + spec()->misc_data_size() duke@435: + spec()->misc_code_size(); duke@435: duke@435: // Allocate the unshared (default) space. duke@435: _the_space = new ContigPermSpace(_bts, duke@435: MemRegion(unshared_bottom, heap_word_size(initial_byte_size))); duke@435: if (_the_space == NULL) duke@435: vm_exit_during_initialization("Could not allocate an unshared" duke@435: " CompactingPermGen Space"); duke@435: duke@435: // Allocate shared spaces duke@435: if (spec()->enable_shared_spaces()) { duke@435: duke@435: // If mapping a shared file, the space is not committed, don't duke@435: // mangle. duke@435: NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;) duke@435: NOT_PRODUCT(if (UseSharedSpaces) ZapUnusedHeapArea = false;) duke@435: duke@435: // Commit the memory behind the shared spaces if dumping (not duke@435: // mapping). duke@435: if (DumpSharedSpaces) { duke@435: _ro_vs.initialize(ro_rs, spec()->read_only_size()); duke@435: _rw_vs.initialize(rw_rs, spec()->read_write_size()); duke@435: _md_vs.initialize(md_rs, spec()->misc_data_size()); duke@435: _mc_vs.initialize(mc_rs, spec()->misc_code_size()); duke@435: } duke@435: duke@435: // Allocate the shared spaces. duke@435: _ro_bts = new BlockOffsetSharedArray( duke@435: MemRegion(readonly_bottom, duke@435: heap_word_size(spec()->read_only_size())), duke@435: heap_word_size(spec()->read_only_size())); duke@435: _ro_space = new OffsetTableContigSpace(_ro_bts, duke@435: MemRegion(readonly_bottom, readonly_end)); duke@435: _rw_bts = new BlockOffsetSharedArray( duke@435: MemRegion(readwrite_bottom, duke@435: heap_word_size(spec()->read_write_size())), duke@435: heap_word_size(spec()->read_write_size())); duke@435: _rw_space = new OffsetTableContigSpace(_rw_bts, duke@435: MemRegion(readwrite_bottom, readwrite_end)); duke@435: duke@435: // Restore mangling flag. duke@435: NOT_PRODUCT(ZapUnusedHeapArea = old_ZapUnusedHeapArea;) duke@435: duke@435: if (_ro_space == NULL || _rw_space == NULL) duke@435: vm_exit_during_initialization("Could not allocate a shared space"); duke@435: duke@435: // Cover both shared spaces entirely with cards. duke@435: _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end)); duke@435: duke@435: if (UseSharedSpaces) { duke@435: duke@435: // Map in the regions in the shared file. duke@435: FileMapInfo* mapinfo = FileMapInfo::current_info(); duke@435: size_t image_alignment = mapinfo->alignment(); duke@435: CollectedHeap* ch = Universe::heap(); duke@435: if ((!mapinfo->map_space(ro, ro_rs, _ro_space)) || duke@435: (!mapinfo->map_space(rw, rw_rs, _rw_space)) || duke@435: (!mapinfo->map_space(md, md_rs, NULL)) || duke@435: (!mapinfo->map_space(mc, mc_rs, NULL)) || duke@435: // check the alignment constraints duke@435: (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap || duke@435: image_alignment != duke@435: ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) { duke@435: // Base addresses didn't match; skip sharing, but continue duke@435: shared_rs.release(); duke@435: spec()->disable_sharing(); duke@435: // If -Xshare:on is specified, print out the error message and exit VM, duke@435: // otherwise, set UseSharedSpaces to false and continue. duke@435: if (RequireSharedSpaces) { duke@435: vm_exit_during_initialization("Unable to use shared archive.", NULL); duke@435: } else { duke@435: FLAG_SET_DEFAULT(UseSharedSpaces, false); duke@435: } duke@435: duke@435: // Note: freeing the block offset array objects does not duke@435: // currently free up the underlying storage. duke@435: delete _ro_bts; duke@435: _ro_bts = NULL; duke@435: delete _ro_space; duke@435: _ro_space = NULL; duke@435: delete _rw_bts; duke@435: _rw_bts = NULL; duke@435: delete _rw_space; duke@435: _rw_space = NULL; duke@435: shared_end = (HeapWord*)(rs.base() + rs.size()); duke@435: _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom)); duke@435: } duke@435: } duke@435: duke@435: // Reserved region includes shared spaces for oop.is_in_reserved(). duke@435: _reserved.set_end(shared_end); duke@435: duke@435: } else { duke@435: _ro_space = NULL; duke@435: _rw_space = NULL; duke@435: } duke@435: } duke@435: duke@435: duke@435: // Do a complete scan of the shared read write space to catch all duke@435: // objects which contain references to any younger generation. Forward duke@435: // the pointers. Avoid space_iterate, as actually visiting all the duke@435: // objects in the space will page in more objects than we need. duke@435: // Instead, use the system dictionary as strong roots into the read duke@435: // write space. dcubed@482: // dcubed@482: // If a RedefineClasses() call has been made, then we have to iterate dcubed@482: // over the entire shared read-write space in order to find all the dcubed@482: // objects that need to be forwarded. For example, it is possible for dcubed@482: // an nmethod to be found and marked in GC phase-1 only for the nmethod dcubed@482: // to be freed by the time we reach GC phase-3. The underlying method dcubed@482: // is still marked, but we can't (easily) find it in GC phase-3 so we dcubed@482: // blow up in GC phase-4. With RedefineClasses() we want replaced code dcubed@482: // (EMCP or obsolete) to go away (i.e., be collectible) once it is no dcubed@482: // longer being executed by any thread so we keep minimal attachments dcubed@482: // to the replaced code. However, we can't guarantee when those EMCP dcubed@482: // or obsolete methods will be collected so they may still be out there dcubed@482: // even after we've severed our minimal attachments. duke@435: duke@435: void CompactingPermGenGen::pre_adjust_pointers() { duke@435: if (spec()->enable_shared_spaces()) { dcubed@482: if (JvmtiExport::has_redefined_a_class()) { dcubed@482: // RedefineClasses() requires a brute force approach dcubed@482: AdjustSharedObjectClosure blk; dcubed@482: rw_space()->object_iterate(&blk); dcubed@482: } else { dcubed@482: RecursiveAdjustSharedObjectClosure blk; dcubed@482: Universe::oops_do(&blk); dcubed@482: StringTable::oops_do(&blk); dcubed@482: SystemDictionary::always_strong_classes_do(&blk); coleenp@2497: SystemDictionary::placeholders_do(TraversePlaceholdersClosure::placeholders_do); dcubed@482: } duke@435: } duke@435: } duke@435: duke@435: duke@435: #ifdef ASSERT duke@435: class VerifyMarksClearedClosure : public ObjectClosure { duke@435: public: duke@435: void do_object(oop obj) { duke@435: assert(SharedSkipVerify || !obj->mark()->is_marked(), duke@435: "Shared oop still marked?"); duke@435: } duke@435: }; duke@435: #endif duke@435: duke@435: duke@435: void CompactingPermGenGen::post_compact() { duke@435: #ifdef ASSERT duke@435: if (!SharedSkipVerify && spec()->enable_shared_spaces()) { duke@435: VerifyMarksClearedClosure blk; duke@435: rw_space()->object_iterate(&blk); duke@435: } duke@435: #endif duke@435: } duke@435: duke@435: ysr@1486: // Do not use in time-critical operations due to the possibility of paging ysr@1486: // in otherwise untouched or previously unread portions of the perm gen, ysr@1486: // for instance, the shared spaces. NOTE: Because CompactingPermGenGen ysr@1486: // derives from OneContigSpaceCardGeneration which is supposed to have a ysr@1486: // single space, and does not override its object_iterate() method, ysr@1486: // object iteration via that interface does not look at the objects in ysr@1486: // the shared spaces when using CDS. This should be fixed; see CR 6897798. duke@435: void CompactingPermGenGen::space_iterate(SpaceClosure* blk, bool usedOnly) { duke@435: OneContigSpaceCardGeneration::space_iterate(blk, usedOnly); duke@435: if (spec()->enable_shared_spaces()) { duke@435: // Making the rw_space walkable will page in the entire space, and ysr@1486: // is to be avoided in the case of time-critical operations. ysr@1486: // However, this is required for Verify and heap dump operations. duke@435: blk->do_space(ro_space()); duke@435: blk->do_space(rw_space()); duke@435: } duke@435: } duke@435: duke@435: duke@435: void CompactingPermGenGen::print_on(outputStream* st) const { duke@435: OneContigSpaceCardGeneration::print_on(st); duke@435: if (spec()->enable_shared_spaces()) { duke@435: st->print(" ro"); duke@435: ro_space()->print_on(st); duke@435: st->print(" rw"); duke@435: rw_space()->print_on(st); duke@435: } else { duke@435: st->print_cr("No shared spaces configured."); duke@435: } duke@435: } duke@435: duke@435: duke@435: // References from the perm gen to the younger generation objects may duke@435: // occur in static fields in Java classes or in constant pool references duke@435: // to String objects. duke@435: duke@435: void CompactingPermGenGen::younger_refs_iterate(OopsInGenClosure* blk) { duke@435: OneContigSpaceCardGeneration::younger_refs_iterate(blk); duke@435: if (spec()->enable_shared_spaces()) { duke@435: blk->set_generation(this); duke@435: // ro_space has no younger gen refs. duke@435: _rs->younger_refs_in_space_iterate(rw_space(), blk); duke@435: blk->reset_generation(); duke@435: } duke@435: } duke@435: duke@435: duke@435: // Shared spaces are addressed in pre_adjust_pointers. duke@435: void CompactingPermGenGen::adjust_pointers() { duke@435: the_space()->adjust_pointers(); duke@435: } duke@435: duke@435: duke@435: void CompactingPermGenGen::compact() { duke@435: the_space()->compact(); duke@435: } duke@435: duke@435: duke@435: size_t CompactingPermGenGen::contiguous_available() const { duke@435: // Don't include shared spaces. duke@435: return OneContigSpaceCardGeneration::contiguous_available() duke@435: - _shared_space_size; duke@435: } duke@435: duke@435: size_t CompactingPermGenGen::max_capacity() const { duke@435: // Don't include shared spaces. duke@435: assert(UseSharedSpaces || (_shared_space_size == 0), duke@435: "If not used, the size of shared spaces should be 0"); duke@435: return OneContigSpaceCardGeneration::max_capacity() duke@435: - _shared_space_size; duke@435: } duke@435: duke@435: duke@435: // No young generation references, clear this generation's main space's duke@435: // card table entries. Do NOT clear the card table entries for the duke@435: // read-only space (always clear) or the read-write space (valuable duke@435: // information). duke@435: duke@435: void CompactingPermGenGen::clear_remembered_set() { duke@435: _rs->clear(MemRegion(the_space()->bottom(), the_space()->end())); duke@435: } duke@435: duke@435: duke@435: // Objects in this generation's main space may have moved, invalidate duke@435: // that space's cards. Do NOT invalidate the card table entries for the duke@435: // read-only or read-write spaces, as those objects never move. duke@435: duke@435: void CompactingPermGenGen::invalidate_remembered_set() { duke@435: _rs->invalidate(used_region()); duke@435: } duke@435: duke@435: duke@435: void CompactingPermGenGen::verify(bool allow_dirty) { duke@435: the_space()->verify(allow_dirty); duke@435: if (!SharedSkipVerify && spec()->enable_shared_spaces()) { duke@435: ro_space()->verify(allow_dirty); duke@435: rw_space()->verify(allow_dirty); duke@435: } duke@435: } duke@435: duke@435: duke@435: HeapWord* CompactingPermGenGen::unshared_bottom; duke@435: HeapWord* CompactingPermGenGen::unshared_end; duke@435: HeapWord* CompactingPermGenGen::shared_bottom; duke@435: HeapWord* CompactingPermGenGen::shared_end; duke@435: HeapWord* CompactingPermGenGen::readonly_bottom; duke@435: HeapWord* CompactingPermGenGen::readonly_end; duke@435: HeapWord* CompactingPermGenGen::readwrite_bottom; duke@435: HeapWord* CompactingPermGenGen::readwrite_end; duke@435: HeapWord* CompactingPermGenGen::miscdata_bottom; duke@435: HeapWord* CompactingPermGenGen::miscdata_end; duke@435: HeapWord* CompactingPermGenGen::misccode_bottom; duke@435: HeapWord* CompactingPermGenGen::misccode_end; duke@435: duke@435: // JVM/TI RedefineClasses() support: duke@435: bool CompactingPermGenGen::remap_shared_readonly_as_readwrite() { duke@435: assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); duke@435: duke@435: if (UseSharedSpaces) { duke@435: // remap the shared readonly space to shared readwrite, private duke@435: FileMapInfo* mapinfo = FileMapInfo::current_info(); duke@435: if (!mapinfo->remap_shared_readonly_as_readwrite()) { duke@435: return false; duke@435: } duke@435: } duke@435: return true; duke@435: }