src/share/vm/memory/compactingPermGenGen.cpp

changeset 435
a61af66fc99e
child 482
2c106685d6d0
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/memory/compactingPermGenGen.cpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,457 @@
     1.4 +/*
     1.5 + * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "incls/_precompiled.incl"
    1.29 +#include "incls/_compactingPermGenGen.cpp.incl"
    1.30 +
    1.31 +
    1.32 +// Recursively adjust all pointers in an object and all objects by
    1.33 +// referenced it.  Clear marks on objects in order to prevent visiting
    1.34 +// any object twice.
    1.35 +
    1.36 +class RecursiveAdjustSharedObjectClosure : public OopClosure {
    1.37 +public:
    1.38 +  void do_oop(oop* o) {
    1.39 +    oop obj = *o;
    1.40 +    if (obj->is_shared_readwrite()) {
    1.41 +      if (obj->mark()->is_marked()) {
    1.42 +        obj->init_mark();         // Don't revisit this object.
    1.43 +        obj->oop_iterate(this);   // Recurse - adjust objects referenced.
    1.44 +        obj->adjust_pointers();   // Adjust this object's references.
    1.45 +
    1.46 +        // Special case: if a class has a read-only constant pool,
    1.47 +        // then the read-write objects referenced by the pool must
    1.48 +        // have their marks reset.
    1.49 +
    1.50 +        if (obj->klass() == Universe::instanceKlassKlassObj()) {
    1.51 +          instanceKlass* ik = instanceKlass::cast((klassOop)obj);
    1.52 +          constantPoolOop cp = ik->constants();
    1.53 +          if (cp->is_shared_readonly()) {
    1.54 +            cp->oop_iterate(this);
    1.55 +          }
    1.56 +        }
    1.57 +      }
    1.58 +    }
    1.59 +  };
    1.60 +};
    1.61 +
    1.62 +
    1.63 +// We need to go through all placeholders in the system dictionary and
    1.64 +// try to resolve them into shared classes. Other threads might be in
    1.65 +// the process of loading a shared class and have strong roots on
    1.66 +// their stack to the class without having added the class to the
    1.67 +// dictionary yet. This means the class will be marked during phase 1
    1.68 +// but will not be unmarked during the application of the
    1.69 +// RecursiveAdjustSharedObjectClosure to the SystemDictionary. Note
    1.70 +// that we must not call find_shared_class with non-read-only symbols
    1.71 +// as doing so can cause hash codes to be computed, destroying
    1.72 +// forwarding pointers.
    1.73 +class TraversePlaceholdersClosure : public OopClosure {
    1.74 + public:
    1.75 +  void do_oop(oop* o) {
    1.76 +    oop obj = *o;
    1.77 +    if (obj->klass() == Universe::symbolKlassObj() &&
    1.78 +        obj->is_shared_readonly()) {
    1.79 +      symbolHandle sym((symbolOop) obj);
    1.80 +      oop k = SystemDictionary::find_shared_class(sym);
    1.81 +      if (k != NULL) {
    1.82 +        RecursiveAdjustSharedObjectClosure clo;
    1.83 +        clo.do_oop(&k);
    1.84 +      }
    1.85 +    }
    1.86 +  }
    1.87 +};
    1.88 +
    1.89 +
    1.90 +void CompactingPermGenGen::initialize_performance_counters() {
    1.91 +
    1.92 +  const char* gen_name = "perm";
    1.93 +
    1.94 +  // Generation Counters - generation 2, 1 subspace
    1.95 +  _gen_counters = new GenerationCounters(gen_name, 2, 1, &_virtual_space);
    1.96 +
    1.97 +  _space_counters = new CSpaceCounters(gen_name, 0,
    1.98 +                                       _virtual_space.reserved_size(),
    1.99 +                                      _the_space, _gen_counters);
   1.100 +}
   1.101 +
   1.102 +void CompactingPermGenGen::update_counters() {
   1.103 +  if (UsePerfData) {
   1.104 +    _space_counters->update_all();
   1.105 +    _gen_counters->update_all();
   1.106 +  }
   1.107 +}
   1.108 +
   1.109 +
   1.110 +CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs,
   1.111 +                                           ReservedSpace shared_rs,
   1.112 +                                           size_t initial_byte_size,
   1.113 +                                           int level, GenRemSet* remset,
   1.114 +                                           ContiguousSpace* space,
   1.115 +                                           PermanentGenerationSpec* spec_) :
   1.116 +  OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion,
   1.117 +                               level, remset, space) {
   1.118 +
   1.119 +  set_spec(spec_);
   1.120 +  if (!UseSharedSpaces && !DumpSharedSpaces) {
   1.121 +    spec()->disable_sharing();
   1.122 +  }
   1.123 +
   1.124 +  // Break virtual space into address ranges for all spaces.
   1.125 +
   1.126 +  if (spec()->enable_shared_spaces()) {
   1.127 +    shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size());
   1.128 +      misccode_end = shared_end;
   1.129 +      misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size());
   1.130 +      miscdata_end = misccode_bottom;
   1.131 +      miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size());
   1.132 +      readwrite_end = miscdata_bottom;
   1.133 +      readwrite_bottom =
   1.134 +        readwrite_end - heap_word_size(spec()->read_write_size());
   1.135 +      readonly_end = readwrite_bottom;
   1.136 +      readonly_bottom =
   1.137 +        readonly_end - heap_word_size(spec()->read_only_size());
   1.138 +    shared_bottom = readonly_bottom;
   1.139 +    unshared_end = shared_bottom;
   1.140 +    assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch");
   1.141 +  } else {
   1.142 +    shared_end = (HeapWord*)(rs.base() + rs.size());
   1.143 +      misccode_end = shared_end;
   1.144 +      misccode_bottom = shared_end;
   1.145 +      miscdata_end = shared_end;
   1.146 +      miscdata_bottom = shared_end;
   1.147 +      readwrite_end = shared_end;
   1.148 +      readwrite_bottom = shared_end;
   1.149 +      readonly_end = shared_end;
   1.150 +      readonly_bottom = shared_end;
   1.151 +    shared_bottom = shared_end;
   1.152 +    unshared_end = shared_bottom;
   1.153 +  }
   1.154 +  unshared_bottom = (HeapWord*) rs.base();
   1.155 +
   1.156 +  // Verify shared and unshared spaces adjacent.
   1.157 +  assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch");
   1.158 +  assert(unshared_end > unshared_bottom, "shared space mismatch");
   1.159 +
   1.160 +  // Split reserved memory into pieces.
   1.161 +
   1.162 +  ReservedSpace ro_rs   = shared_rs.first_part(spec()->read_only_size(),
   1.163 +                                              UseSharedSpaces);
   1.164 +  ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size());
   1.165 +  ReservedSpace rw_rs   = tmp_rs1.first_part(spec()->read_write_size(),
   1.166 +                                             UseSharedSpaces);
   1.167 +  ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size());
   1.168 +  ReservedSpace md_rs   = tmp_rs2.first_part(spec()->misc_data_size(),
   1.169 +                                             UseSharedSpaces);
   1.170 +  ReservedSpace mc_rs   = tmp_rs2.last_part(spec()->misc_data_size());
   1.171 +
   1.172 +  _shared_space_size = spec()->read_only_size()
   1.173 +                     + spec()->read_write_size()
   1.174 +                     + spec()->misc_data_size()
   1.175 +                     + spec()->misc_code_size();
   1.176 +
   1.177 +  // Allocate the unshared (default) space.
   1.178 +  _the_space = new ContigPermSpace(_bts,
   1.179 +               MemRegion(unshared_bottom, heap_word_size(initial_byte_size)));
   1.180 +  if (_the_space == NULL)
   1.181 +    vm_exit_during_initialization("Could not allocate an unshared"
   1.182 +                                  " CompactingPermGen Space");
   1.183 +
   1.184 +  // Allocate shared spaces
   1.185 +  if (spec()->enable_shared_spaces()) {
   1.186 +
   1.187 +    // If mapping a shared file, the space is not committed, don't
   1.188 +    // mangle.
   1.189 +    NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;)
   1.190 +    NOT_PRODUCT(if (UseSharedSpaces) ZapUnusedHeapArea = false;)
   1.191 +
   1.192 +    // Commit the memory behind the shared spaces if dumping (not
   1.193 +    // mapping).
   1.194 +    if (DumpSharedSpaces) {
   1.195 +      _ro_vs.initialize(ro_rs, spec()->read_only_size());
   1.196 +      _rw_vs.initialize(rw_rs, spec()->read_write_size());
   1.197 +      _md_vs.initialize(md_rs, spec()->misc_data_size());
   1.198 +      _mc_vs.initialize(mc_rs, spec()->misc_code_size());
   1.199 +    }
   1.200 +
   1.201 +    // Allocate the shared spaces.
   1.202 +    _ro_bts = new BlockOffsetSharedArray(
   1.203 +                  MemRegion(readonly_bottom,
   1.204 +                            heap_word_size(spec()->read_only_size())),
   1.205 +                  heap_word_size(spec()->read_only_size()));
   1.206 +    _ro_space = new OffsetTableContigSpace(_ro_bts,
   1.207 +                  MemRegion(readonly_bottom, readonly_end));
   1.208 +    _rw_bts = new BlockOffsetSharedArray(
   1.209 +                  MemRegion(readwrite_bottom,
   1.210 +                            heap_word_size(spec()->read_write_size())),
   1.211 +                  heap_word_size(spec()->read_write_size()));
   1.212 +    _rw_space = new OffsetTableContigSpace(_rw_bts,
   1.213 +                  MemRegion(readwrite_bottom, readwrite_end));
   1.214 +
   1.215 +    // Restore mangling flag.
   1.216 +    NOT_PRODUCT(ZapUnusedHeapArea = old_ZapUnusedHeapArea;)
   1.217 +
   1.218 +    if (_ro_space == NULL || _rw_space == NULL)
   1.219 +      vm_exit_during_initialization("Could not allocate a shared space");
   1.220 +
   1.221 +    // Cover both shared spaces entirely with cards.
   1.222 +    _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
   1.223 +
   1.224 +    if (UseSharedSpaces) {
   1.225 +
   1.226 +      // Map in the regions in the shared file.
   1.227 +      FileMapInfo* mapinfo = FileMapInfo::current_info();
   1.228 +      size_t image_alignment = mapinfo->alignment();
   1.229 +      CollectedHeap* ch = Universe::heap();
   1.230 +      if ((!mapinfo->map_space(ro, ro_rs, _ro_space)) ||
   1.231 +          (!mapinfo->map_space(rw, rw_rs, _rw_space)) ||
   1.232 +          (!mapinfo->map_space(md, md_rs, NULL))      ||
   1.233 +          (!mapinfo->map_space(mc, mc_rs, NULL))      ||
   1.234 +          // check the alignment constraints
   1.235 +          (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap ||
   1.236 +           image_alignment !=
   1.237 +           ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) {
   1.238 +        // Base addresses didn't match; skip sharing, but continue
   1.239 +        shared_rs.release();
   1.240 +        spec()->disable_sharing();
   1.241 +        // If -Xshare:on is specified, print out the error message and exit VM,
   1.242 +        // otherwise, set UseSharedSpaces to false and continue.
   1.243 +        if (RequireSharedSpaces) {
   1.244 +          vm_exit_during_initialization("Unable to use shared archive.", NULL);
   1.245 +        } else {
   1.246 +          FLAG_SET_DEFAULT(UseSharedSpaces, false);
   1.247 +        }
   1.248 +
   1.249 +        // Note: freeing the block offset array objects does not
   1.250 +        // currently free up the underlying storage.
   1.251 +        delete _ro_bts;
   1.252 +        _ro_bts = NULL;
   1.253 +        delete _ro_space;
   1.254 +        _ro_space = NULL;
   1.255 +        delete _rw_bts;
   1.256 +        _rw_bts = NULL;
   1.257 +        delete _rw_space;
   1.258 +        _rw_space = NULL;
   1.259 +        shared_end = (HeapWord*)(rs.base() + rs.size());
   1.260 +        _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom));
   1.261 +      }
   1.262 +    }
   1.263 +
   1.264 +    // Reserved region includes shared spaces for oop.is_in_reserved().
   1.265 +    _reserved.set_end(shared_end);
   1.266 +
   1.267 +  } else {
   1.268 +    _ro_space = NULL;
   1.269 +    _rw_space = NULL;
   1.270 +  }
   1.271 +}
   1.272 +
   1.273 +
   1.274 +// Do a complete scan of the shared read write space to catch all
   1.275 +// objects which contain references to any younger generation.  Forward
   1.276 +// the pointers.  Avoid space_iterate, as actually visiting all the
   1.277 +// objects in the space will page in more objects than we need.
   1.278 +// Instead, use the system dictionary as strong roots into the read
   1.279 +// write space.
   1.280 +
   1.281 +void CompactingPermGenGen::pre_adjust_pointers() {
   1.282 +  if (spec()->enable_shared_spaces()) {
   1.283 +    RecursiveAdjustSharedObjectClosure blk;
   1.284 +    Universe::oops_do(&blk);
   1.285 +    StringTable::oops_do(&blk);
   1.286 +    SystemDictionary::always_strong_classes_do(&blk);
   1.287 +    TraversePlaceholdersClosure tpc;
   1.288 +    SystemDictionary::placeholders_do(&tpc);
   1.289 +  }
   1.290 +}
   1.291 +
   1.292 +
   1.293 +#ifdef ASSERT
   1.294 +class VerifyMarksClearedClosure : public ObjectClosure {
   1.295 +public:
   1.296 +  void do_object(oop obj) {
   1.297 +    assert(SharedSkipVerify || !obj->mark()->is_marked(),
   1.298 +           "Shared oop still marked?");
   1.299 +  }
   1.300 +};
   1.301 +#endif
   1.302 +
   1.303 +
   1.304 +void CompactingPermGenGen::post_compact() {
   1.305 +#ifdef ASSERT
   1.306 +  if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
   1.307 +    VerifyMarksClearedClosure blk;
   1.308 +    rw_space()->object_iterate(&blk);
   1.309 +  }
   1.310 +#endif
   1.311 +}
   1.312 +
   1.313 +
   1.314 +void CompactingPermGenGen::space_iterate(SpaceClosure* blk, bool usedOnly) {
   1.315 +  OneContigSpaceCardGeneration::space_iterate(blk, usedOnly);
   1.316 +  if (spec()->enable_shared_spaces()) {
   1.317 +#ifdef PRODUCT
   1.318 +    // Making the rw_space walkable will page in the entire space, and
   1.319 +    // is to be avoided. However, this is required for Verify options.
   1.320 +    ShouldNotReachHere();
   1.321 +#endif
   1.322 +
   1.323 +    blk->do_space(ro_space());
   1.324 +    blk->do_space(rw_space());
   1.325 +  }
   1.326 +}
   1.327 +
   1.328 +
   1.329 +void CompactingPermGenGen::print_on(outputStream* st) const {
   1.330 +  OneContigSpaceCardGeneration::print_on(st);
   1.331 +  if (spec()->enable_shared_spaces()) {
   1.332 +    st->print("    ro");
   1.333 +    ro_space()->print_on(st);
   1.334 +    st->print("    rw");
   1.335 +    rw_space()->print_on(st);
   1.336 +  } else {
   1.337 +    st->print_cr("No shared spaces configured.");
   1.338 +  }
   1.339 +}
   1.340 +
   1.341 +
   1.342 +// References from the perm gen to the younger generation objects may
   1.343 +// occur in static fields in Java classes or in constant pool references
   1.344 +// to String objects.
   1.345 +
   1.346 +void CompactingPermGenGen::younger_refs_iterate(OopsInGenClosure* blk) {
   1.347 +  OneContigSpaceCardGeneration::younger_refs_iterate(blk);
   1.348 +  if (spec()->enable_shared_spaces()) {
   1.349 +    blk->set_generation(this);
   1.350 +    // ro_space has no younger gen refs.
   1.351 +    _rs->younger_refs_in_space_iterate(rw_space(), blk);
   1.352 +    blk->reset_generation();
   1.353 +  }
   1.354 +}
   1.355 +
   1.356 +
   1.357 +// Shared spaces are addressed in pre_adjust_pointers.
   1.358 +void CompactingPermGenGen::adjust_pointers() {
   1.359 +  the_space()->adjust_pointers();
   1.360 +}
   1.361 +
   1.362 +
   1.363 +void CompactingPermGenGen::compact() {
   1.364 +  the_space()->compact();
   1.365 +}
   1.366 +
   1.367 +
   1.368 +size_t CompactingPermGenGen::contiguous_available() const {
   1.369 +  // Don't include shared spaces.
   1.370 +  return OneContigSpaceCardGeneration::contiguous_available()
   1.371 +         - _shared_space_size;
   1.372 +}
   1.373 +
   1.374 +size_t CompactingPermGenGen::max_capacity() const {
   1.375 +  // Don't include shared spaces.
   1.376 +  assert(UseSharedSpaces || (_shared_space_size == 0),
   1.377 +    "If not used, the size of shared spaces should be 0");
   1.378 +  return OneContigSpaceCardGeneration::max_capacity()
   1.379 +          - _shared_space_size;
   1.380 +}
   1.381 +
   1.382 +
   1.383 +
   1.384 +bool CompactingPermGenGen::grow_by(size_t bytes) {
   1.385 +  // Don't allow _virtual_size to expand into shared spaces.
   1.386 +  size_t max_bytes = _virtual_space.uncommitted_size() - _shared_space_size;
   1.387 +  if (bytes > _shared_space_size) {
   1.388 +    bytes = _shared_space_size;
   1.389 +  }
   1.390 +  return OneContigSpaceCardGeneration::grow_by(bytes);
   1.391 +}
   1.392 +
   1.393 +
   1.394 +void CompactingPermGenGen::grow_to_reserved() {
   1.395 +  // Don't allow _virtual_size to expand into shared spaces.
   1.396 +  if (_virtual_space.uncommitted_size() > _shared_space_size) {
   1.397 +    size_t remaining_bytes =
   1.398 +      _virtual_space.uncommitted_size() - _shared_space_size;
   1.399 +    bool success = OneContigSpaceCardGeneration::grow_by(remaining_bytes);
   1.400 +    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
   1.401 +  }
   1.402 +}
   1.403 +
   1.404 +
   1.405 +// No young generation references, clear this generation's main space's
   1.406 +// card table entries.  Do NOT clear the card table entries for the
   1.407 +// read-only space (always clear) or the read-write space (valuable
   1.408 +// information).
   1.409 +
   1.410 +void CompactingPermGenGen::clear_remembered_set() {
   1.411 +  _rs->clear(MemRegion(the_space()->bottom(), the_space()->end()));
   1.412 +}
   1.413 +
   1.414 +
   1.415 +// Objects in this generation's main space may have moved, invalidate
   1.416 +// that space's cards.  Do NOT invalidate the card table entries for the
   1.417 +// read-only or read-write spaces, as those objects never move.
   1.418 +
   1.419 +void CompactingPermGenGen::invalidate_remembered_set() {
   1.420 +  _rs->invalidate(used_region());
   1.421 +}
   1.422 +
   1.423 +
   1.424 +void CompactingPermGenGen::verify(bool allow_dirty) {
   1.425 +  the_space()->verify(allow_dirty);
   1.426 +  if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
   1.427 +    ro_space()->verify(allow_dirty);
   1.428 +    rw_space()->verify(allow_dirty);
   1.429 +  }
   1.430 +}
   1.431 +
   1.432 +
   1.433 +HeapWord* CompactingPermGenGen::unshared_bottom;
   1.434 +HeapWord* CompactingPermGenGen::unshared_end;
   1.435 +HeapWord* CompactingPermGenGen::shared_bottom;
   1.436 +HeapWord* CompactingPermGenGen::shared_end;
   1.437 +HeapWord* CompactingPermGenGen::readonly_bottom;
   1.438 +HeapWord* CompactingPermGenGen::readonly_end;
   1.439 +HeapWord* CompactingPermGenGen::readwrite_bottom;
   1.440 +HeapWord* CompactingPermGenGen::readwrite_end;
   1.441 +HeapWord* CompactingPermGenGen::miscdata_bottom;
   1.442 +HeapWord* CompactingPermGenGen::miscdata_end;
   1.443 +HeapWord* CompactingPermGenGen::misccode_bottom;
   1.444 +HeapWord* CompactingPermGenGen::misccode_end;
   1.445 +
   1.446 +// JVM/TI RedefineClasses() support:
   1.447 +bool CompactingPermGenGen::remap_shared_readonly_as_readwrite() {
   1.448 +  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   1.449 +
   1.450 +  if (UseSharedSpaces) {
   1.451 +    // remap the shared readonly space to shared readwrite, private
   1.452 +    FileMapInfo* mapinfo = FileMapInfo::current_info();
   1.453 +    if (!mapinfo->remap_shared_readonly_as_readwrite()) {
   1.454 +      return false;
   1.455 +    }
   1.456 +  }
   1.457 +  return true;
   1.458 +}
   1.459 +
   1.460 +void** CompactingPermGenGen::_vtbl_list;

mercurial