Thu, 12 Jun 2008 13:50:55 -0700
Merge
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_compactingPermGenGen.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | |
dcubed@482 | 29 | // An ObjectClosure helper: Recursively adjust all pointers in an object |
dcubed@482 | 30 | // and all objects by referenced it. Clear marks on objects in order to |
dcubed@482 | 31 | // prevent visiting any object twice. This helper is used when the |
dcubed@482 | 32 | // RedefineClasses() API has been called. |
dcubed@482 | 33 | |
dcubed@482 | 34 | class AdjustSharedObjectClosure : public ObjectClosure { |
dcubed@482 | 35 | public: |
dcubed@482 | 36 | void do_object(oop obj) { |
dcubed@482 | 37 | if (obj->is_shared_readwrite()) { |
dcubed@482 | 38 | if (obj->mark()->is_marked()) { |
dcubed@482 | 39 | obj->init_mark(); // Don't revisit this object. |
dcubed@482 | 40 | obj->adjust_pointers(); // Adjust this object's references. |
dcubed@482 | 41 | } |
dcubed@482 | 42 | } |
dcubed@482 | 43 | } |
dcubed@482 | 44 | }; |
dcubed@482 | 45 | |
dcubed@482 | 46 | |
dcubed@482 | 47 | // An OopClosure helper: Recursively adjust all pointers in an object |
dcubed@482 | 48 | // and all objects by referenced it. Clear marks on objects in order |
dcubed@482 | 49 | // to prevent visiting any object twice. |
duke@435 | 50 | |
duke@435 | 51 | class RecursiveAdjustSharedObjectClosure : public OopClosure { |
coleenp@548 | 52 | protected: |
coleenp@548 | 53 | template <class T> inline void do_oop_work(T* p) { |
coleenp@548 | 54 | oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
duke@435 | 55 | if (obj->is_shared_readwrite()) { |
duke@435 | 56 | if (obj->mark()->is_marked()) { |
duke@435 | 57 | obj->init_mark(); // Don't revisit this object. |
duke@435 | 58 | obj->oop_iterate(this); // Recurse - adjust objects referenced. |
duke@435 | 59 | obj->adjust_pointers(); // Adjust this object's references. |
duke@435 | 60 | |
duke@435 | 61 | // Special case: if a class has a read-only constant pool, |
duke@435 | 62 | // then the read-write objects referenced by the pool must |
duke@435 | 63 | // have their marks reset. |
duke@435 | 64 | |
duke@435 | 65 | if (obj->klass() == Universe::instanceKlassKlassObj()) { |
duke@435 | 66 | instanceKlass* ik = instanceKlass::cast((klassOop)obj); |
duke@435 | 67 | constantPoolOop cp = ik->constants(); |
duke@435 | 68 | if (cp->is_shared_readonly()) { |
duke@435 | 69 | cp->oop_iterate(this); |
duke@435 | 70 | } |
duke@435 | 71 | } |
duke@435 | 72 | } |
duke@435 | 73 | } |
coleenp@548 | 74 | } |
coleenp@548 | 75 | public: |
coleenp@548 | 76 | virtual void do_oop(oop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); } |
coleenp@548 | 77 | virtual void do_oop(narrowOop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); } |
duke@435 | 78 | }; |
duke@435 | 79 | |
duke@435 | 80 | |
duke@435 | 81 | // We need to go through all placeholders in the system dictionary and |
duke@435 | 82 | // try to resolve them into shared classes. Other threads might be in |
duke@435 | 83 | // the process of loading a shared class and have strong roots on |
duke@435 | 84 | // their stack to the class without having added the class to the |
duke@435 | 85 | // dictionary yet. This means the class will be marked during phase 1 |
duke@435 | 86 | // but will not be unmarked during the application of the |
duke@435 | 87 | // RecursiveAdjustSharedObjectClosure to the SystemDictionary. Note |
duke@435 | 88 | // that we must not call find_shared_class with non-read-only symbols |
duke@435 | 89 | // as doing so can cause hash codes to be computed, destroying |
duke@435 | 90 | // forwarding pointers. |
duke@435 | 91 | class TraversePlaceholdersClosure : public OopClosure { |
coleenp@548 | 92 | protected: |
coleenp@548 | 93 | template <class T> inline void do_oop_work(T* p) { |
coleenp@548 | 94 | oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
duke@435 | 95 | if (obj->klass() == Universe::symbolKlassObj() && |
duke@435 | 96 | obj->is_shared_readonly()) { |
duke@435 | 97 | symbolHandle sym((symbolOop) obj); |
duke@435 | 98 | oop k = SystemDictionary::find_shared_class(sym); |
duke@435 | 99 | if (k != NULL) { |
duke@435 | 100 | RecursiveAdjustSharedObjectClosure clo; |
duke@435 | 101 | clo.do_oop(&k); |
duke@435 | 102 | } |
duke@435 | 103 | } |
duke@435 | 104 | } |
coleenp@548 | 105 | public: |
coleenp@548 | 106 | virtual void do_oop(oop* p) { TraversePlaceholdersClosure::do_oop_work(p); } |
coleenp@548 | 107 | virtual void do_oop(narrowOop* p) { TraversePlaceholdersClosure::do_oop_work(p); } |
coleenp@548 | 108 | |
duke@435 | 109 | }; |
duke@435 | 110 | |
duke@435 | 111 | |
duke@435 | 112 | void CompactingPermGenGen::initialize_performance_counters() { |
duke@435 | 113 | |
duke@435 | 114 | const char* gen_name = "perm"; |
duke@435 | 115 | |
duke@435 | 116 | // Generation Counters - generation 2, 1 subspace |
duke@435 | 117 | _gen_counters = new GenerationCounters(gen_name, 2, 1, &_virtual_space); |
duke@435 | 118 | |
duke@435 | 119 | _space_counters = new CSpaceCounters(gen_name, 0, |
duke@435 | 120 | _virtual_space.reserved_size(), |
duke@435 | 121 | _the_space, _gen_counters); |
duke@435 | 122 | } |
duke@435 | 123 | |
duke@435 | 124 | void CompactingPermGenGen::update_counters() { |
duke@435 | 125 | if (UsePerfData) { |
duke@435 | 126 | _space_counters->update_all(); |
duke@435 | 127 | _gen_counters->update_all(); |
duke@435 | 128 | } |
duke@435 | 129 | } |
duke@435 | 130 | |
duke@435 | 131 | |
duke@435 | 132 | CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs, |
duke@435 | 133 | ReservedSpace shared_rs, |
duke@435 | 134 | size_t initial_byte_size, |
duke@435 | 135 | int level, GenRemSet* remset, |
duke@435 | 136 | ContiguousSpace* space, |
duke@435 | 137 | PermanentGenerationSpec* spec_) : |
duke@435 | 138 | OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion, |
duke@435 | 139 | level, remset, space) { |
duke@435 | 140 | |
duke@435 | 141 | set_spec(spec_); |
duke@435 | 142 | if (!UseSharedSpaces && !DumpSharedSpaces) { |
duke@435 | 143 | spec()->disable_sharing(); |
duke@435 | 144 | } |
duke@435 | 145 | |
duke@435 | 146 | // Break virtual space into address ranges for all spaces. |
duke@435 | 147 | |
duke@435 | 148 | if (spec()->enable_shared_spaces()) { |
duke@435 | 149 | shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size()); |
duke@435 | 150 | misccode_end = shared_end; |
duke@435 | 151 | misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size()); |
duke@435 | 152 | miscdata_end = misccode_bottom; |
duke@435 | 153 | miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size()); |
duke@435 | 154 | readwrite_end = miscdata_bottom; |
duke@435 | 155 | readwrite_bottom = |
duke@435 | 156 | readwrite_end - heap_word_size(spec()->read_write_size()); |
duke@435 | 157 | readonly_end = readwrite_bottom; |
duke@435 | 158 | readonly_bottom = |
duke@435 | 159 | readonly_end - heap_word_size(spec()->read_only_size()); |
duke@435 | 160 | shared_bottom = readonly_bottom; |
duke@435 | 161 | unshared_end = shared_bottom; |
duke@435 | 162 | assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch"); |
duke@435 | 163 | } else { |
duke@435 | 164 | shared_end = (HeapWord*)(rs.base() + rs.size()); |
duke@435 | 165 | misccode_end = shared_end; |
duke@435 | 166 | misccode_bottom = shared_end; |
duke@435 | 167 | miscdata_end = shared_end; |
duke@435 | 168 | miscdata_bottom = shared_end; |
duke@435 | 169 | readwrite_end = shared_end; |
duke@435 | 170 | readwrite_bottom = shared_end; |
duke@435 | 171 | readonly_end = shared_end; |
duke@435 | 172 | readonly_bottom = shared_end; |
duke@435 | 173 | shared_bottom = shared_end; |
duke@435 | 174 | unshared_end = shared_bottom; |
duke@435 | 175 | } |
duke@435 | 176 | unshared_bottom = (HeapWord*) rs.base(); |
duke@435 | 177 | |
duke@435 | 178 | // Verify shared and unshared spaces adjacent. |
duke@435 | 179 | assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch"); |
duke@435 | 180 | assert(unshared_end > unshared_bottom, "shared space mismatch"); |
duke@435 | 181 | |
duke@435 | 182 | // Split reserved memory into pieces. |
duke@435 | 183 | |
duke@435 | 184 | ReservedSpace ro_rs = shared_rs.first_part(spec()->read_only_size(), |
duke@435 | 185 | UseSharedSpaces); |
duke@435 | 186 | ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size()); |
duke@435 | 187 | ReservedSpace rw_rs = tmp_rs1.first_part(spec()->read_write_size(), |
duke@435 | 188 | UseSharedSpaces); |
duke@435 | 189 | ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size()); |
duke@435 | 190 | ReservedSpace md_rs = tmp_rs2.first_part(spec()->misc_data_size(), |
duke@435 | 191 | UseSharedSpaces); |
duke@435 | 192 | ReservedSpace mc_rs = tmp_rs2.last_part(spec()->misc_data_size()); |
duke@435 | 193 | |
duke@435 | 194 | _shared_space_size = spec()->read_only_size() |
duke@435 | 195 | + spec()->read_write_size() |
duke@435 | 196 | + spec()->misc_data_size() |
duke@435 | 197 | + spec()->misc_code_size(); |
duke@435 | 198 | |
duke@435 | 199 | // Allocate the unshared (default) space. |
duke@435 | 200 | _the_space = new ContigPermSpace(_bts, |
duke@435 | 201 | MemRegion(unshared_bottom, heap_word_size(initial_byte_size))); |
duke@435 | 202 | if (_the_space == NULL) |
duke@435 | 203 | vm_exit_during_initialization("Could not allocate an unshared" |
duke@435 | 204 | " CompactingPermGen Space"); |
duke@435 | 205 | |
duke@435 | 206 | // Allocate shared spaces |
duke@435 | 207 | if (spec()->enable_shared_spaces()) { |
duke@435 | 208 | |
duke@435 | 209 | // If mapping a shared file, the space is not committed, don't |
duke@435 | 210 | // mangle. |
duke@435 | 211 | NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;) |
duke@435 | 212 | NOT_PRODUCT(if (UseSharedSpaces) ZapUnusedHeapArea = false;) |
duke@435 | 213 | |
duke@435 | 214 | // Commit the memory behind the shared spaces if dumping (not |
duke@435 | 215 | // mapping). |
duke@435 | 216 | if (DumpSharedSpaces) { |
duke@435 | 217 | _ro_vs.initialize(ro_rs, spec()->read_only_size()); |
duke@435 | 218 | _rw_vs.initialize(rw_rs, spec()->read_write_size()); |
duke@435 | 219 | _md_vs.initialize(md_rs, spec()->misc_data_size()); |
duke@435 | 220 | _mc_vs.initialize(mc_rs, spec()->misc_code_size()); |
duke@435 | 221 | } |
duke@435 | 222 | |
duke@435 | 223 | // Allocate the shared spaces. |
duke@435 | 224 | _ro_bts = new BlockOffsetSharedArray( |
duke@435 | 225 | MemRegion(readonly_bottom, |
duke@435 | 226 | heap_word_size(spec()->read_only_size())), |
duke@435 | 227 | heap_word_size(spec()->read_only_size())); |
duke@435 | 228 | _ro_space = new OffsetTableContigSpace(_ro_bts, |
duke@435 | 229 | MemRegion(readonly_bottom, readonly_end)); |
duke@435 | 230 | _rw_bts = new BlockOffsetSharedArray( |
duke@435 | 231 | MemRegion(readwrite_bottom, |
duke@435 | 232 | heap_word_size(spec()->read_write_size())), |
duke@435 | 233 | heap_word_size(spec()->read_write_size())); |
duke@435 | 234 | _rw_space = new OffsetTableContigSpace(_rw_bts, |
duke@435 | 235 | MemRegion(readwrite_bottom, readwrite_end)); |
duke@435 | 236 | |
duke@435 | 237 | // Restore mangling flag. |
duke@435 | 238 | NOT_PRODUCT(ZapUnusedHeapArea = old_ZapUnusedHeapArea;) |
duke@435 | 239 | |
duke@435 | 240 | if (_ro_space == NULL || _rw_space == NULL) |
duke@435 | 241 | vm_exit_during_initialization("Could not allocate a shared space"); |
duke@435 | 242 | |
duke@435 | 243 | // Cover both shared spaces entirely with cards. |
duke@435 | 244 | _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end)); |
duke@435 | 245 | |
duke@435 | 246 | if (UseSharedSpaces) { |
duke@435 | 247 | |
duke@435 | 248 | // Map in the regions in the shared file. |
duke@435 | 249 | FileMapInfo* mapinfo = FileMapInfo::current_info(); |
duke@435 | 250 | size_t image_alignment = mapinfo->alignment(); |
duke@435 | 251 | CollectedHeap* ch = Universe::heap(); |
duke@435 | 252 | if ((!mapinfo->map_space(ro, ro_rs, _ro_space)) || |
duke@435 | 253 | (!mapinfo->map_space(rw, rw_rs, _rw_space)) || |
duke@435 | 254 | (!mapinfo->map_space(md, md_rs, NULL)) || |
duke@435 | 255 | (!mapinfo->map_space(mc, mc_rs, NULL)) || |
duke@435 | 256 | // check the alignment constraints |
duke@435 | 257 | (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap || |
duke@435 | 258 | image_alignment != |
duke@435 | 259 | ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) { |
duke@435 | 260 | // Base addresses didn't match; skip sharing, but continue |
duke@435 | 261 | shared_rs.release(); |
duke@435 | 262 | spec()->disable_sharing(); |
duke@435 | 263 | // If -Xshare:on is specified, print out the error message and exit VM, |
duke@435 | 264 | // otherwise, set UseSharedSpaces to false and continue. |
duke@435 | 265 | if (RequireSharedSpaces) { |
duke@435 | 266 | vm_exit_during_initialization("Unable to use shared archive.", NULL); |
duke@435 | 267 | } else { |
duke@435 | 268 | FLAG_SET_DEFAULT(UseSharedSpaces, false); |
duke@435 | 269 | } |
duke@435 | 270 | |
duke@435 | 271 | // Note: freeing the block offset array objects does not |
duke@435 | 272 | // currently free up the underlying storage. |
duke@435 | 273 | delete _ro_bts; |
duke@435 | 274 | _ro_bts = NULL; |
duke@435 | 275 | delete _ro_space; |
duke@435 | 276 | _ro_space = NULL; |
duke@435 | 277 | delete _rw_bts; |
duke@435 | 278 | _rw_bts = NULL; |
duke@435 | 279 | delete _rw_space; |
duke@435 | 280 | _rw_space = NULL; |
duke@435 | 281 | shared_end = (HeapWord*)(rs.base() + rs.size()); |
duke@435 | 282 | _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom)); |
duke@435 | 283 | } |
duke@435 | 284 | } |
duke@435 | 285 | |
duke@435 | 286 | // Reserved region includes shared spaces for oop.is_in_reserved(). |
duke@435 | 287 | _reserved.set_end(shared_end); |
duke@435 | 288 | |
duke@435 | 289 | } else { |
duke@435 | 290 | _ro_space = NULL; |
duke@435 | 291 | _rw_space = NULL; |
duke@435 | 292 | } |
duke@435 | 293 | } |
duke@435 | 294 | |
duke@435 | 295 | |
duke@435 | 296 | // Do a complete scan of the shared read write space to catch all |
duke@435 | 297 | // objects which contain references to any younger generation. Forward |
duke@435 | 298 | // the pointers. Avoid space_iterate, as actually visiting all the |
duke@435 | 299 | // objects in the space will page in more objects than we need. |
duke@435 | 300 | // Instead, use the system dictionary as strong roots into the read |
duke@435 | 301 | // write space. |
dcubed@482 | 302 | // |
dcubed@482 | 303 | // If a RedefineClasses() call has been made, then we have to iterate |
dcubed@482 | 304 | // over the entire shared read-write space in order to find all the |
dcubed@482 | 305 | // objects that need to be forwarded. For example, it is possible for |
dcubed@482 | 306 | // an nmethod to be found and marked in GC phase-1 only for the nmethod |
dcubed@482 | 307 | // to be freed by the time we reach GC phase-3. The underlying method |
dcubed@482 | 308 | // is still marked, but we can't (easily) find it in GC phase-3 so we |
dcubed@482 | 309 | // blow up in GC phase-4. With RedefineClasses() we want replaced code |
dcubed@482 | 310 | // (EMCP or obsolete) to go away (i.e., be collectible) once it is no |
dcubed@482 | 311 | // longer being executed by any thread so we keep minimal attachments |
dcubed@482 | 312 | // to the replaced code. However, we can't guarantee when those EMCP |
dcubed@482 | 313 | // or obsolete methods will be collected so they may still be out there |
dcubed@482 | 314 | // even after we've severed our minimal attachments. |
duke@435 | 315 | |
duke@435 | 316 | void CompactingPermGenGen::pre_adjust_pointers() { |
duke@435 | 317 | if (spec()->enable_shared_spaces()) { |
dcubed@482 | 318 | if (JvmtiExport::has_redefined_a_class()) { |
dcubed@482 | 319 | // RedefineClasses() requires a brute force approach |
dcubed@482 | 320 | AdjustSharedObjectClosure blk; |
dcubed@482 | 321 | rw_space()->object_iterate(&blk); |
dcubed@482 | 322 | } else { |
dcubed@482 | 323 | RecursiveAdjustSharedObjectClosure blk; |
dcubed@482 | 324 | Universe::oops_do(&blk); |
dcubed@482 | 325 | StringTable::oops_do(&blk); |
dcubed@482 | 326 | SystemDictionary::always_strong_classes_do(&blk); |
dcubed@482 | 327 | TraversePlaceholdersClosure tpc; |
dcubed@482 | 328 | SystemDictionary::placeholders_do(&tpc); |
dcubed@482 | 329 | } |
duke@435 | 330 | } |
duke@435 | 331 | } |
duke@435 | 332 | |
duke@435 | 333 | |
duke@435 | 334 | #ifdef ASSERT |
duke@435 | 335 | class VerifyMarksClearedClosure : public ObjectClosure { |
duke@435 | 336 | public: |
duke@435 | 337 | void do_object(oop obj) { |
duke@435 | 338 | assert(SharedSkipVerify || !obj->mark()->is_marked(), |
duke@435 | 339 | "Shared oop still marked?"); |
duke@435 | 340 | } |
duke@435 | 341 | }; |
duke@435 | 342 | #endif |
duke@435 | 343 | |
duke@435 | 344 | |
duke@435 | 345 | void CompactingPermGenGen::post_compact() { |
duke@435 | 346 | #ifdef ASSERT |
duke@435 | 347 | if (!SharedSkipVerify && spec()->enable_shared_spaces()) { |
duke@435 | 348 | VerifyMarksClearedClosure blk; |
duke@435 | 349 | rw_space()->object_iterate(&blk); |
duke@435 | 350 | } |
duke@435 | 351 | #endif |
duke@435 | 352 | } |
duke@435 | 353 | |
duke@435 | 354 | |
duke@435 | 355 | void CompactingPermGenGen::space_iterate(SpaceClosure* blk, bool usedOnly) { |
duke@435 | 356 | OneContigSpaceCardGeneration::space_iterate(blk, usedOnly); |
duke@435 | 357 | if (spec()->enable_shared_spaces()) { |
duke@435 | 358 | #ifdef PRODUCT |
duke@435 | 359 | // Making the rw_space walkable will page in the entire space, and |
duke@435 | 360 | // is to be avoided. However, this is required for Verify options. |
duke@435 | 361 | ShouldNotReachHere(); |
duke@435 | 362 | #endif |
duke@435 | 363 | |
duke@435 | 364 | blk->do_space(ro_space()); |
duke@435 | 365 | blk->do_space(rw_space()); |
duke@435 | 366 | } |
duke@435 | 367 | } |
duke@435 | 368 | |
duke@435 | 369 | |
duke@435 | 370 | void CompactingPermGenGen::print_on(outputStream* st) const { |
duke@435 | 371 | OneContigSpaceCardGeneration::print_on(st); |
duke@435 | 372 | if (spec()->enable_shared_spaces()) { |
duke@435 | 373 | st->print(" ro"); |
duke@435 | 374 | ro_space()->print_on(st); |
duke@435 | 375 | st->print(" rw"); |
duke@435 | 376 | rw_space()->print_on(st); |
duke@435 | 377 | } else { |
duke@435 | 378 | st->print_cr("No shared spaces configured."); |
duke@435 | 379 | } |
duke@435 | 380 | } |
duke@435 | 381 | |
duke@435 | 382 | |
duke@435 | 383 | // References from the perm gen to the younger generation objects may |
duke@435 | 384 | // occur in static fields in Java classes or in constant pool references |
duke@435 | 385 | // to String objects. |
duke@435 | 386 | |
duke@435 | 387 | void CompactingPermGenGen::younger_refs_iterate(OopsInGenClosure* blk) { |
duke@435 | 388 | OneContigSpaceCardGeneration::younger_refs_iterate(blk); |
duke@435 | 389 | if (spec()->enable_shared_spaces()) { |
duke@435 | 390 | blk->set_generation(this); |
duke@435 | 391 | // ro_space has no younger gen refs. |
duke@435 | 392 | _rs->younger_refs_in_space_iterate(rw_space(), blk); |
duke@435 | 393 | blk->reset_generation(); |
duke@435 | 394 | } |
duke@435 | 395 | } |
duke@435 | 396 | |
duke@435 | 397 | |
duke@435 | 398 | // Shared spaces are addressed in pre_adjust_pointers. |
duke@435 | 399 | void CompactingPermGenGen::adjust_pointers() { |
duke@435 | 400 | the_space()->adjust_pointers(); |
duke@435 | 401 | } |
duke@435 | 402 | |
duke@435 | 403 | |
duke@435 | 404 | void CompactingPermGenGen::compact() { |
duke@435 | 405 | the_space()->compact(); |
duke@435 | 406 | } |
duke@435 | 407 | |
duke@435 | 408 | |
duke@435 | 409 | size_t CompactingPermGenGen::contiguous_available() const { |
duke@435 | 410 | // Don't include shared spaces. |
duke@435 | 411 | return OneContigSpaceCardGeneration::contiguous_available() |
duke@435 | 412 | - _shared_space_size; |
duke@435 | 413 | } |
duke@435 | 414 | |
duke@435 | 415 | size_t CompactingPermGenGen::max_capacity() const { |
duke@435 | 416 | // Don't include shared spaces. |
duke@435 | 417 | assert(UseSharedSpaces || (_shared_space_size == 0), |
duke@435 | 418 | "If not used, the size of shared spaces should be 0"); |
duke@435 | 419 | return OneContigSpaceCardGeneration::max_capacity() |
duke@435 | 420 | - _shared_space_size; |
duke@435 | 421 | } |
duke@435 | 422 | |
duke@435 | 423 | |
duke@435 | 424 | |
duke@435 | 425 | bool CompactingPermGenGen::grow_by(size_t bytes) { |
duke@435 | 426 | // Don't allow _virtual_size to expand into shared spaces. |
duke@435 | 427 | size_t max_bytes = _virtual_space.uncommitted_size() - _shared_space_size; |
duke@435 | 428 | if (bytes > _shared_space_size) { |
duke@435 | 429 | bytes = _shared_space_size; |
duke@435 | 430 | } |
duke@435 | 431 | return OneContigSpaceCardGeneration::grow_by(bytes); |
duke@435 | 432 | } |
duke@435 | 433 | |
duke@435 | 434 | |
duke@435 | 435 | void CompactingPermGenGen::grow_to_reserved() { |
duke@435 | 436 | // Don't allow _virtual_size to expand into shared spaces. |
duke@435 | 437 | if (_virtual_space.uncommitted_size() > _shared_space_size) { |
duke@435 | 438 | size_t remaining_bytes = |
duke@435 | 439 | _virtual_space.uncommitted_size() - _shared_space_size; |
duke@435 | 440 | bool success = OneContigSpaceCardGeneration::grow_by(remaining_bytes); |
duke@435 | 441 | DEBUG_ONLY(if (!success) warning("grow to reserved failed");) |
duke@435 | 442 | } |
duke@435 | 443 | } |
duke@435 | 444 | |
duke@435 | 445 | |
duke@435 | 446 | // No young generation references, clear this generation's main space's |
duke@435 | 447 | // card table entries. Do NOT clear the card table entries for the |
duke@435 | 448 | // read-only space (always clear) or the read-write space (valuable |
duke@435 | 449 | // information). |
duke@435 | 450 | |
duke@435 | 451 | void CompactingPermGenGen::clear_remembered_set() { |
duke@435 | 452 | _rs->clear(MemRegion(the_space()->bottom(), the_space()->end())); |
duke@435 | 453 | } |
duke@435 | 454 | |
duke@435 | 455 | |
duke@435 | 456 | // Objects in this generation's main space may have moved, invalidate |
duke@435 | 457 | // that space's cards. Do NOT invalidate the card table entries for the |
duke@435 | 458 | // read-only or read-write spaces, as those objects never move. |
duke@435 | 459 | |
duke@435 | 460 | void CompactingPermGenGen::invalidate_remembered_set() { |
duke@435 | 461 | _rs->invalidate(used_region()); |
duke@435 | 462 | } |
duke@435 | 463 | |
duke@435 | 464 | |
duke@435 | 465 | void CompactingPermGenGen::verify(bool allow_dirty) { |
duke@435 | 466 | the_space()->verify(allow_dirty); |
duke@435 | 467 | if (!SharedSkipVerify && spec()->enable_shared_spaces()) { |
duke@435 | 468 | ro_space()->verify(allow_dirty); |
duke@435 | 469 | rw_space()->verify(allow_dirty); |
duke@435 | 470 | } |
duke@435 | 471 | } |
duke@435 | 472 | |
duke@435 | 473 | |
duke@435 | 474 | HeapWord* CompactingPermGenGen::unshared_bottom; |
duke@435 | 475 | HeapWord* CompactingPermGenGen::unshared_end; |
duke@435 | 476 | HeapWord* CompactingPermGenGen::shared_bottom; |
duke@435 | 477 | HeapWord* CompactingPermGenGen::shared_end; |
duke@435 | 478 | HeapWord* CompactingPermGenGen::readonly_bottom; |
duke@435 | 479 | HeapWord* CompactingPermGenGen::readonly_end; |
duke@435 | 480 | HeapWord* CompactingPermGenGen::readwrite_bottom; |
duke@435 | 481 | HeapWord* CompactingPermGenGen::readwrite_end; |
duke@435 | 482 | HeapWord* CompactingPermGenGen::miscdata_bottom; |
duke@435 | 483 | HeapWord* CompactingPermGenGen::miscdata_end; |
duke@435 | 484 | HeapWord* CompactingPermGenGen::misccode_bottom; |
duke@435 | 485 | HeapWord* CompactingPermGenGen::misccode_end; |
duke@435 | 486 | |
duke@435 | 487 | // JVM/TI RedefineClasses() support: |
duke@435 | 488 | bool CompactingPermGenGen::remap_shared_readonly_as_readwrite() { |
duke@435 | 489 | assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); |
duke@435 | 490 | |
duke@435 | 491 | if (UseSharedSpaces) { |
duke@435 | 492 | // remap the shared readonly space to shared readwrite, private |
duke@435 | 493 | FileMapInfo* mapinfo = FileMapInfo::current_info(); |
duke@435 | 494 | if (!mapinfo->remap_shared_readonly_as_readwrite()) { |
duke@435 | 495 | return false; |
duke@435 | 496 | } |
duke@435 | 497 | } |
duke@435 | 498 | return true; |
duke@435 | 499 | } |
duke@435 | 500 | |
duke@435 | 501 | void** CompactingPermGenGen::_vtbl_list; |