duke@435: /* xdono@772: * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: // All heaps contains a "permanent generation," containing permanent duke@435: // (reflective) objects. This is like a regular generation in some ways, duke@435: // but unlike one in others, and so is split apart. duke@435: duke@435: class PermanentGenerationSpec; duke@435: duke@435: // This is the "generation" view of a CompactingPermGen. ysr@1486: // NOTE: the shared spaces used for CDS are here handled in ysr@1486: // a somewhat awkward and potentially buggy fashion, see CR 6801625. ysr@1486: // This infelicity should be fixed, see CR 6897789. duke@435: class CompactingPermGenGen: public OneContigSpaceCardGeneration { duke@435: friend class VMStructs; duke@435: // Abstractly, this is a subtype that gets access to protected fields. duke@435: friend class CompactingPermGen; duke@435: duke@435: private: duke@435: // Shared spaces duke@435: PermanentGenerationSpec* _spec; duke@435: size_t _shared_space_size; duke@435: VirtualSpace _ro_vs; duke@435: VirtualSpace _rw_vs; duke@435: VirtualSpace _md_vs; duke@435: VirtualSpace _mc_vs; duke@435: BlockOffsetSharedArray* _ro_bts; duke@435: BlockOffsetSharedArray* _rw_bts; duke@435: OffsetTableContigSpace* _ro_space; duke@435: OffsetTableContigSpace* _rw_space; duke@435: ysr@1486: // With shared spaces there is a dichotomy in the use of the duke@435: // _virtual_space of the generation. There is a portion of the duke@435: // _virtual_space that is used for the unshared part of the duke@435: // permanent generation and a portion that is reserved for the shared part. duke@435: // The _reserved field in the generation represents both the duke@435: // unshared and shared parts of the generation. The _reserved duke@435: // variable is initialized for only the unshared part but is duke@435: // later extended to include the shared part during initialization duke@435: // if shared spaces are being used. duke@435: // The reserved size for the _virtual_space for CompactingPermGenGen duke@435: // is the size of the space for the permanent generation including the duke@435: // the shared spaces. This can be seen by the use of MaxPermSize duke@435: // in the allocation of PermanentGenerationSpec. The space for the duke@435: // shared spaces is committed separately (???). duke@435: // In general at initialization only a part of the duke@435: // space for the unshared part of the permanent generation is duke@435: // committed and more is committed as the permanent generation is duke@435: // grown. In growing the permanent generation the capacity() and duke@435: // max_capacity() of the generation are used. For the permanent duke@435: // generation (implemented with a CompactingPermGenGen) the capacity() duke@435: // is taken from the capacity of the space (_the_space variable used for the duke@435: // unshared part of the generation) and the max_capacity() is based duke@435: // on the size of the _reserved variable (which includes the size of the duke@435: // shared spaces) minus the size of the shared spaces. duke@435: duke@435: // These values are redundant, but are called out separately to avoid duke@435: // going through heap/space/gen pointers for performance. duke@435: static HeapWord* unshared_bottom; duke@435: static HeapWord* unshared_end; duke@435: static HeapWord* shared_bottom; duke@435: static HeapWord* readonly_bottom; duke@435: static HeapWord* readonly_end; duke@435: static HeapWord* readwrite_bottom; duke@435: static HeapWord* readwrite_end; duke@435: static HeapWord* miscdata_bottom; duke@435: static HeapWord* miscdata_end; duke@435: static HeapWord* misccode_bottom; duke@435: static HeapWord* misccode_end; duke@435: static HeapWord* shared_end; duke@435: duke@435: // List of klassOops whose vtbl entries are used to patch others. duke@435: static void** _vtbl_list; duke@435: duke@435: // Performance Counters duke@435: GenerationCounters* _gen_counters; duke@435: CSpaceCounters* _space_counters; duke@435: duke@435: void initialize_performance_counters(); duke@435: duke@435: public: duke@435: duke@435: enum { duke@435: vtbl_list_size = 16, // number of entries in the shared space vtable list. acorn@843: num_virtuals = 200 // number of virtual methods in Klass (or duke@435: // subclass) objects, or greater. duke@435: }; duke@435: duke@435: enum { duke@435: ro = 0, // read-only shared space in the heap duke@435: rw = 1, // read-write shared space in the heap duke@435: md = 2, // miscellaneous data for initializing tables, etc. duke@435: mc = 3, // miscellaneous code - vtable replacement. duke@435: n_regions = 4 duke@435: }; duke@435: duke@435: CompactingPermGenGen(ReservedSpace rs, ReservedSpace shared_rs, duke@435: size_t initial_byte_size, int level, GenRemSet* remset, duke@435: ContiguousSpace* space, duke@435: PermanentGenerationSpec* perm_spec); duke@435: duke@435: const char* name() const { duke@435: return "compacting perm gen"; duke@435: } duke@435: duke@435: const char* short_name() const { duke@435: return "Perm"; duke@435: } duke@435: duke@435: // Return the maximum capacity for the object space. This duke@435: // explicitly does not include the shared spaces. duke@435: size_t max_capacity() const; duke@435: duke@435: void update_counters(); duke@435: duke@435: void compute_new_size() { duke@435: assert(false, "Should not call this -- handled at PermGen level."); duke@435: } duke@435: duke@435: bool must_be_youngest() const { return false; } duke@435: bool must_be_oldest() const { return false; } duke@435: duke@435: OffsetTableContigSpace* ro_space() const { return _ro_space; } duke@435: OffsetTableContigSpace* rw_space() const { return _rw_space; } duke@435: VirtualSpace* md_space() { return &_md_vs; } duke@435: VirtualSpace* mc_space() { return &_mc_vs; } duke@435: ContiguousSpace* unshared_space() const { return _the_space; } duke@435: duke@435: static bool inline is_shared(const oopDesc* p) { duke@435: return (HeapWord*)p >= shared_bottom && (HeapWord*)p < shared_end; duke@435: } duke@435: // RedefineClasses note: this tester is used to check residence of duke@435: // the specified oop in the shared readonly space and not whether duke@435: // the oop is readonly. duke@435: static bool inline is_shared_readonly(const oopDesc* p) { duke@435: return (HeapWord*)p >= readonly_bottom && (HeapWord*)p < readonly_end; duke@435: } duke@435: // RedefineClasses note: this tester is used to check residence of duke@435: // the specified oop in the shared readwrite space and not whether duke@435: // the oop is readwrite. duke@435: static bool inline is_shared_readwrite(const oopDesc* p) { duke@435: return (HeapWord*)p >= readwrite_bottom && (HeapWord*)p < readwrite_end; duke@435: } duke@435: duke@435: bool is_in_unshared(const void* p) const { duke@435: return OneContigSpaceCardGeneration::is_in(p); duke@435: } duke@435: duke@435: bool is_in_shared(const void* p) const { duke@435: return p >= shared_bottom && p < shared_end; duke@435: } duke@435: duke@435: inline bool is_in(const void* p) const { duke@435: return is_in_unshared(p) || is_in_shared(p); duke@435: } duke@435: duke@435: inline PermanentGenerationSpec* spec() const { return _spec; } duke@435: inline void set_spec(PermanentGenerationSpec* spec) { _spec = spec; } duke@435: duke@435: void pre_adjust_pointers(); duke@435: void adjust_pointers(); duke@435: void space_iterate(SpaceClosure* blk, bool usedOnly = false); duke@435: void print_on(outputStream* st) const; duke@435: void younger_refs_iterate(OopsInGenClosure* blk); duke@435: void compact(); duke@435: void post_compact(); duke@435: size_t contiguous_available() const; duke@435: duke@435: void clear_remembered_set(); duke@435: void invalidate_remembered_set(); duke@435: duke@435: inline bool block_is_obj(const HeapWord* addr) const { duke@435: if (addr < the_space()->top()) return true; duke@435: else if (addr < the_space()->end()) return false; duke@435: else if (addr < ro_space()->top()) return true; duke@435: else if (addr < ro_space()->end()) return false; duke@435: else if (addr < rw_space()->top()) return true; duke@435: else return false; duke@435: } duke@435: duke@435: duke@435: inline size_t block_size(const HeapWord* addr) const { duke@435: if (addr < the_space()->top()) { duke@435: return oop(addr)->size(); duke@435: } duke@435: else if (addr < the_space()->end()) { duke@435: assert(addr == the_space()->top(), "non-block head arg to block_size"); duke@435: return the_space()->end() - the_space()->top(); duke@435: } duke@435: duke@435: else if (addr < ro_space()->top()) { duke@435: return oop(addr)->size(); duke@435: } duke@435: else if (addr < ro_space()->end()) { duke@435: assert(addr == ro_space()->top(), "non-block head arg to block_size"); duke@435: return ro_space()->end() - ro_space()->top(); duke@435: } duke@435: duke@435: else if (addr < rw_space()->top()) { duke@435: return oop(addr)->size(); duke@435: } duke@435: else { duke@435: assert(addr == rw_space()->top(), "non-block head arg to block_size"); duke@435: return rw_space()->end() - rw_space()->top(); duke@435: } duke@435: } duke@435: duke@435: static void generate_vtable_methods(void** vtbl_list, duke@435: void** vtable, duke@435: char** md_top, char* md_end, duke@435: char** mc_top, char* mc_end); duke@435: duke@435: void verify(bool allow_dirty); duke@435: duke@435: // Serialization duke@435: static void initialize_oops() KERNEL_RETURN; duke@435: static void serialize_oops(SerializeOopClosure* soc); duke@435: void serialize_bts(SerializeOopClosure* soc); duke@435: duke@435: // Initiate dumping of shared file. duke@435: static jint dump_shared(GrowableArray* class_promote_order, TRAPS); duke@435: duke@435: // JVM/TI RedefineClasses() support: duke@435: // Remap the shared readonly space to shared readwrite, private if duke@435: // sharing is enabled. Simply returns true if sharing is not enabled duke@435: // or if the remapping has already been done by a prior call. duke@435: static bool remap_shared_readonly_as_readwrite(); duke@435: };