src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

changeset 4037
da91efe96a93
parent 4016
c9814fadeb38
child 4063
9646b7ff4d14
child 4077
a7509aff1b06
     1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 31 16:39:35 2012 -0700
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Sep 01 13:25:18 2012 -0400
     1.3 @@ -463,7 +463,8 @@
     1.4    G1CollectorPolicy* g1p = g1h->g1_policy();
     1.5    HeapRegion* hr = heap_region_containing(p);
     1.6    if (hr == NULL) {
     1.7 -     // perm gen (or null)
     1.8 +     // null
     1.9 +     assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
    1.10       return false;
    1.11    } else {
    1.12      return !hr->isHumongous();
    1.13 @@ -1285,6 +1286,8 @@
    1.14  
    1.15    print_heap_before_gc();
    1.16  
    1.17 +  size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
    1.18 +
    1.19    HRSPhaseSetter x(HRSPhaseFullGC);
    1.20    verify_region_sets_optional();
    1.21  
    1.22 @@ -1402,6 +1405,9 @@
    1.23      assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
    1.24      ref_processor_stw()->verify_no_references_recorded();
    1.25  
    1.26 +    // Delete metaspaces for unloaded class loaders and clean up loader_data graph
    1.27 +    ClassLoaderDataGraph::purge();
    1.28 +
    1.29      // Note: since we've just done a full GC, concurrent
    1.30      // marking is no longer active. Therefore we need not
    1.31      // re-enable reference discovery for the CM ref processor.
    1.32 @@ -1475,8 +1481,7 @@
    1.33      }
    1.34  
    1.35      if (true) { // FIXME
    1.36 -      // Ask the permanent generation to adjust size for full collections
    1.37 -      perm()->compute_new_size();
    1.38 +      MetaspaceGC::compute_new_size();
    1.39      }
    1.40  
    1.41      // Start a new incremental collection set for the next pause
    1.42 @@ -1990,8 +1995,6 @@
    1.43    _cg1r = new ConcurrentG1Refine();
    1.44  
    1.45    // Reserve the maximum.
    1.46 -  PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
    1.47 -  // Includes the perm-gen.
    1.48  
    1.49    // When compressed oops are enabled, the preferred heap base
    1.50    // is calculated by subtracting the requested size from the
    1.51 @@ -2005,44 +2008,11 @@
    1.52    // compressed oops mode.
    1.53  
    1.54    // Since max_byte_size is aligned to the size of a heap region (checked
    1.55 -  // above), we also need to align the perm gen size as it might not be.
    1.56 -  const size_t total_reserved = max_byte_size +
    1.57 -                                align_size_up(pgs->max_size(), HeapRegion::GrainBytes);
    1.58 -  Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm");
    1.59 -
    1.60 -  char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
    1.61 -
    1.62 -  ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes,
    1.63 -                            UseLargePages, addr);
    1.64 -
    1.65 -  if (UseCompressedOops) {
    1.66 -    if (addr != NULL && !heap_rs.is_reserved()) {
    1.67 -      // Failed to reserve at specified address - the requested memory
    1.68 -      // region is taken already, for example, by 'java' launcher.
    1.69 -      // Try again to reserver heap higher.
    1.70 -      addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
    1.71 -
    1.72 -      ReservedHeapSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
    1.73 -                                 UseLargePages, addr);
    1.74 -
    1.75 -      if (addr != NULL && !heap_rs0.is_reserved()) {
    1.76 -        // Failed to reserve at specified address again - give up.
    1.77 -        addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
    1.78 -        assert(addr == NULL, "");
    1.79 -
    1.80 -        ReservedHeapSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
    1.81 -                                   UseLargePages, addr);
    1.82 -        heap_rs = heap_rs1;
    1.83 -      } else {
    1.84 -        heap_rs = heap_rs0;
    1.85 -      }
    1.86 -    }
    1.87 -  }
    1.88 -
    1.89 -  if (!heap_rs.is_reserved()) {
    1.90 -    vm_exit_during_initialization("Could not reserve enough space for object heap");
    1.91 -    return JNI_ENOMEM;
    1.92 -  }
    1.93 +  // above).
    1.94 +  Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
    1.95 +
    1.96 +  ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
    1.97 +                                                 HeapRegion::GrainBytes);
    1.98  
    1.99    // It is important to do this in a way such that concurrent readers can't
   1.100    // temporarily think somethings in the heap.  (I've actually seen this
   1.101 @@ -2076,9 +2046,6 @@
   1.102    ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
   1.103    _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
   1.104                             g1_rs.size()/HeapWordSize);
   1.105 -  ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
   1.106 -
   1.107 -  _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
   1.108  
   1.109    _g1_storage.initialize(g1_rs, 0);
   1.110    _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
   1.111 @@ -2492,21 +2459,6 @@
   1.112    FullGCCount_lock->notify_all();
   1.113  }
   1.114  
   1.115 -void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   1.116 -  assert_at_safepoint(true /* should_be_vm_thread */);
   1.117 -  GCCauseSetter gcs(this, cause);
   1.118 -  switch (cause) {
   1.119 -    case GCCause::_heap_inspection:
   1.120 -    case GCCause::_heap_dump: {
   1.121 -      HandleMark hm;
   1.122 -      do_full_collection(false);         // don't clear all soft refs
   1.123 -      break;
   1.124 -    }
   1.125 -    default: // XXX FIX ME
   1.126 -      ShouldNotReachHere(); // Unexpected use of this function
   1.127 -  }
   1.128 -}
   1.129 -
   1.130  void G1CollectedHeap::collect(GCCause::Cause cause) {
   1.131    assert_heap_not_locked();
   1.132  
   1.133 @@ -2580,7 +2532,7 @@
   1.134      HeapRegion* hr = heap_region_containing_raw(p);
   1.135      return hr->is_in(p);
   1.136    } else {
   1.137 -    return _perm_gen->as_gen()->is_in(p);
   1.138 +    return false;
   1.139    }
   1.140  }
   1.141  
   1.142 @@ -2591,9 +2543,9 @@
   1.143  
   1.144  class IterateOopClosureRegionClosure: public HeapRegionClosure {
   1.145    MemRegion _mr;
   1.146 -  OopClosure* _cl;
   1.147 +  ExtendedOopClosure* _cl;
   1.148  public:
   1.149 -  IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
   1.150 +  IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
   1.151      : _mr(mr), _cl(cl) {}
   1.152    bool doHeapRegion(HeapRegion* r) {
   1.153      if (!r->continuesHumongous()) {
   1.154 @@ -2603,20 +2555,14 @@
   1.155    }
   1.156  };
   1.157  
   1.158 -void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
   1.159 +void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
   1.160    IterateOopClosureRegionClosure blk(_g1_committed, cl);
   1.161    heap_region_iterate(&blk);
   1.162 -  if (do_perm) {
   1.163 -    perm_gen()->oop_iterate(cl);
   1.164 -  }
   1.165 -}
   1.166 -
   1.167 -void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
   1.168 +}
   1.169 +
   1.170 +void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
   1.171    IterateOopClosureRegionClosure blk(mr, cl);
   1.172    heap_region_iterate(&blk);
   1.173 -  if (do_perm) {
   1.174 -    perm_gen()->oop_iterate(cl);
   1.175 -  }
   1.176  }
   1.177  
   1.178  // Iterates an ObjectClosure over all objects within a HeapRegion.
   1.179 @@ -2633,12 +2579,9 @@
   1.180    }
   1.181  };
   1.182  
   1.183 -void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
   1.184 +void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
   1.185    IterateObjectClosureRegionClosure blk(cl);
   1.186    heap_region_iterate(&blk);
   1.187 -  if (do_perm) {
   1.188 -    perm_gen()->object_iterate(cl);
   1.189 -  }
   1.190  }
   1.191  
   1.192  void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
   1.193 @@ -2983,8 +2926,6 @@
   1.194  
   1.195  Space* G1CollectedHeap::space_containing(const void* addr) const {
   1.196    Space* res = heap_region_containing(addr);
   1.197 -  if (res == NULL)
   1.198 -    res = perm_gen()->space_containing(addr);
   1.199    return res;
   1.200  }
   1.201  
   1.202 @@ -3139,7 +3080,7 @@
   1.203          guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
   1.204        }
   1.205  
   1.206 -      o->oop_iterate(&isLive);
   1.207 +      o->oop_iterate_no_header(&isLive);
   1.208        if (!_hr->obj_allocated_since_prev_marking(o)) {
   1.209          size_t obj_size = o->size();    // Make sure we don't overflow
   1.210          _live_bytes += (obj_size * HeapWordSize);
   1.211 @@ -3226,6 +3167,38 @@
   1.212    }
   1.213  };
   1.214  
   1.215 +class YoungRefCounterClosure : public OopClosure {
   1.216 +  G1CollectedHeap* _g1h;
   1.217 +  int              _count;
   1.218 + public:
   1.219 +  YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
   1.220 +  void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
   1.221 +  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   1.222 +
   1.223 +  int count() { return _count; }
   1.224 +  void reset_count() { _count = 0; };
   1.225 +};
   1.226 +
   1.227 +class VerifyKlassClosure: public KlassClosure {
   1.228 +  YoungRefCounterClosure _young_ref_counter_closure;
   1.229 +  OopClosure *_oop_closure;
   1.230 + public:
   1.231 +  VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
   1.232 +  void do_klass(Klass* k) {
   1.233 +    k->oops_do(_oop_closure);
   1.234 +
   1.235 +    _young_ref_counter_closure.reset_count();
   1.236 +    k->oops_do(&_young_ref_counter_closure);
   1.237 +    if (_young_ref_counter_closure.count() > 0) {
   1.238 +      guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
   1.239 +    }
   1.240 +  }
   1.241 +};
   1.242 +
   1.243 +// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
   1.244 +//       pass it as the perm_blk to SharedHeap::process_strong_roots.
   1.245 +//       When process_strong_roots stop calling perm_blk->younger_refs_iterate
   1.246 +//       we can change this closure to extend the simpler OopClosure.
   1.247  class VerifyRootsClosure: public OopsInGenClosure {
   1.248  private:
   1.249    G1CollectedHeap* _g1h;
   1.250 @@ -3303,39 +3276,31 @@
   1.251  void G1CollectedHeap::verify(bool silent,
   1.252                               VerifyOption vo) {
   1.253    if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
   1.254 -    if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
   1.255 +    if (!silent) { gclog_or_tty->print("Roots "); }
   1.256      VerifyRootsClosure rootsCl(vo);
   1.257  
   1.258      assert(Thread::current()->is_VM_thread(),
   1.259        "Expected to be executed serially by the VM thread at this point");
   1.260  
   1.261      CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
   1.262 +    VerifyKlassClosure klassCl(this, &rootsCl);
   1.263  
   1.264      // We apply the relevant closures to all the oops in the
   1.265      // system dictionary, the string table and the code cache.
   1.266      const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
   1.267  
   1.268 +    // Need cleared claim bits for the strong roots processing
   1.269 +    ClassLoaderDataGraph::clear_claimed_marks();
   1.270 +
   1.271      process_strong_roots(true,      // activate StrongRootsScope
   1.272 -                         true,      // we set "collecting perm gen" to true,
   1.273 -                                    // so we don't reset the dirty cards in the perm gen.
   1.274 +                         false,     // we set "is scavenging" to false,
   1.275 +                                    // so we don't reset the dirty cards.
   1.276                           ScanningOption(so),  // roots scanning options
   1.277                           &rootsCl,
   1.278                           &blobsCl,
   1.279 -                         &rootsCl);
   1.280 -
   1.281 -    // If we're verifying after the marking phase of a Full GC then we can't
   1.282 -    // treat the perm gen as roots into the G1 heap. Some of the objects in
   1.283 -    // the perm gen may be dead and hence not marked. If one of these dead
   1.284 -    // objects is considered to be a root then we may end up with a false
   1.285 -    // "Root location <x> points to dead ob <y>" failure.
   1.286 -    if (vo != VerifyOption_G1UseMarkWord) {
   1.287 -      // Since we used "collecting_perm_gen" == true above, we will not have
   1.288 -      // checked the refs from perm into the G1-collected heap. We check those
   1.289 -      // references explicitly below. Whether the relevant cards are dirty
   1.290 -      // is checked further below in the rem set verification.
   1.291 -      if (!silent) { gclog_or_tty->print("Permgen roots "); }
   1.292 -      perm_gen()->oop_iterate(&rootsCl);
   1.293 -    }
   1.294 +                         &klassCl
   1.295 +                         );
   1.296 +
   1.297      bool failures = rootsCl.failures();
   1.298  
   1.299      if (vo != VerifyOption_G1UseMarkWord) {
   1.300 @@ -3431,7 +3396,6 @@
   1.301    st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
   1.302              (size_t) survivor_regions * HeapRegion::GrainBytes / K);
   1.303    st->cr();
   1.304 -  perm()->as_gen()->print_on(st);
   1.305  }
   1.306  
   1.307  void G1CollectedHeap::print_extended_on(outputStream* st) const {
   1.308 @@ -3849,7 +3813,6 @@
   1.309          if (g1_policy()->during_initial_mark_pause()) {
   1.310            concurrent_mark()->checkpointRootsInitialPre();
   1.311          }
   1.312 -        perm_gen()->save_marks();
   1.313  
   1.314  #if YOUNG_LIST_VERBOSE
   1.315          gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
   1.316 @@ -4642,6 +4605,13 @@
   1.317    return obj;
   1.318  }
   1.319  
   1.320 +template <class T>
   1.321 +void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
   1.322 +  if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
   1.323 +    _scanned_klass->record_modified_oops();
   1.324 +  }
   1.325 +}
   1.326 +
   1.327  template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
   1.328  template <class T>
   1.329  void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
   1.330 @@ -4671,6 +4641,8 @@
   1.331      // When scanning the RS, we only care about objs in CS.
   1.332      if (barrier == G1BarrierRS) {
   1.333        _par_scan_state->update_rs(_from, p, _worker_id);
   1.334 +    } else if (barrier == G1BarrierKlass) {
   1.335 +      do_klass_barrier(p, forwardee);
   1.336      }
   1.337    } else {
   1.338      // The object is not in collection set. If we're a root scanning
   1.339 @@ -4799,6 +4771,32 @@
   1.340    pss->retire_alloc_buffers();
   1.341  }
   1.342  
   1.343 +class G1KlassScanClosure : public KlassClosure {
   1.344 + G1ParCopyHelper* _closure;
   1.345 + bool             _process_only_dirty;
   1.346 + int              _count;
   1.347 + public:
   1.348 +  G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
   1.349 +      : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
   1.350 +  void do_klass(Klass* klass) {
   1.351 +    // If the klass has not been dirtied we know that there's
   1.352 +    // no references into  the young gen and we can skip it.
   1.353 +   if (!_process_only_dirty || klass->has_modified_oops()) {
   1.354 +      // Clean the klass since we're going to scavenge all the metadata.
   1.355 +      klass->clear_modified_oops();
   1.356 +
   1.357 +      // Tell the closure that this klass is the Klass to scavenge
   1.358 +      // and is the one to dirty if oops are left pointing into the young gen.
   1.359 +      _closure->set_scanned_klass(klass);
   1.360 +
   1.361 +      klass->oops_do(_closure);
   1.362 +
   1.363 +      _closure->set_scanned_klass(NULL);
   1.364 +    }
   1.365 +    _count++;
   1.366 +  }
   1.367 +};
   1.368 +
   1.369  class G1ParTask : public AbstractGangTask {
   1.370  protected:
   1.371    G1CollectedHeap*       _g1h;
   1.372 @@ -4866,28 +4864,34 @@
   1.373        pss.set_partial_scan_closure(&partial_scan_cl);
   1.374  
   1.375        G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
   1.376 -      G1ParScanPermClosure           only_scan_perm_cl(_g1h, &pss, rp);
   1.377 +      G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
   1.378  
   1.379        G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
   1.380 -      G1ParScanAndMarkPermClosure    scan_mark_perm_cl(_g1h, &pss, rp);
   1.381 +      G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
   1.382 +
   1.383 +      bool only_young                 = _g1h->g1_policy()->gcs_are_young();
   1.384 +      G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
   1.385 +      G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
   1.386  
   1.387        OopClosure*                    scan_root_cl = &only_scan_root_cl;
   1.388 -      OopsInHeapRegionClosure*       scan_perm_cl = &only_scan_perm_cl;
   1.389 +      G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
   1.390  
   1.391        if (_g1h->g1_policy()->during_initial_mark_pause()) {
   1.392          // We also need to mark copied objects.
   1.393          scan_root_cl = &scan_mark_root_cl;
   1.394 -        scan_perm_cl = &scan_mark_perm_cl;
   1.395 +        scan_klasses_cl = &scan_mark_klasses_cl_s;
   1.396        }
   1.397  
   1.398        G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
   1.399  
   1.400 +      int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
   1.401 +
   1.402        pss.start_strong_roots();
   1.403 -      _g1h->g1_process_strong_roots(/* not collecting perm */ false,
   1.404 -                                    SharedHeap::SO_AllClasses,
   1.405 +      _g1h->g1_process_strong_roots(/* is scavenging */ true,
   1.406 +                                    SharedHeap::ScanningOption(so),
   1.407                                      scan_root_cl,
   1.408                                      &push_heap_rs_cl,
   1.409 -                                    scan_perm_cl,
   1.410 +                                    scan_klasses_cl,
   1.411                                      worker_id);
   1.412        pss.end_strong_roots();
   1.413  
   1.414 @@ -4987,30 +4991,29 @@
   1.415  
   1.416  void
   1.417  G1CollectedHeap::
   1.418 -g1_process_strong_roots(bool collecting_perm_gen,
   1.419 +g1_process_strong_roots(bool is_scavenging,
   1.420                          ScanningOption so,
   1.421                          OopClosure* scan_non_heap_roots,
   1.422                          OopsInHeapRegionClosure* scan_rs,
   1.423 -                        OopsInGenClosure* scan_perm,
   1.424 +                        G1KlassScanClosure* scan_klasses,
   1.425                          int worker_i) {
   1.426  
   1.427 -  // First scan the strong roots, including the perm gen.
   1.428 +  // First scan the strong roots
   1.429    double ext_roots_start = os::elapsedTime();
   1.430    double closure_app_time_sec = 0.0;
   1.431  
   1.432    BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
   1.433 -  BufferingOopsInGenClosure buf_scan_perm(scan_perm);
   1.434 -  buf_scan_perm.set_generation(perm_gen());
   1.435  
   1.436    // Walk the code cache w/o buffering, because StarTask cannot handle
   1.437    // unaligned oop locations.
   1.438    G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
   1.439  
   1.440    process_strong_roots(false, // no scoping; this is parallel code
   1.441 -                       collecting_perm_gen, so,
   1.442 +                       is_scavenging, so,
   1.443                         &buf_scan_non_heap_roots,
   1.444                         &eager_scan_code_roots,
   1.445 -                       &buf_scan_perm);
   1.446 +                       scan_klasses
   1.447 +                       );
   1.448  
   1.449    // Now the CM ref_processor roots.
   1.450    if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
   1.451 @@ -5023,10 +5026,9 @@
   1.452  
   1.453    // Finish up any enqueued closure apps (attributed as object copy time).
   1.454    buf_scan_non_heap_roots.done();
   1.455 -  buf_scan_perm.done();
   1.456 -
   1.457 -  double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
   1.458 -                                buf_scan_non_heap_roots.closure_app_seconds();
   1.459 +
   1.460 +  double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
   1.461 +
   1.462    g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
   1.463  
   1.464    double ext_root_time_ms =
   1.465 @@ -5053,7 +5055,6 @@
   1.466    if (scan_rs != NULL) {
   1.467      g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
   1.468    }
   1.469 -
   1.470    _process_strong_tasks->all_tasks_completed();
   1.471  }
   1.472  
   1.473 @@ -5113,17 +5114,17 @@
   1.474  class G1CopyingKeepAliveClosure: public OopClosure {
   1.475    G1CollectedHeap*         _g1h;
   1.476    OopClosure*              _copy_non_heap_obj_cl;
   1.477 -  OopsInHeapRegionClosure* _copy_perm_obj_cl;
   1.478 +  OopsInHeapRegionClosure* _copy_metadata_obj_cl;
   1.479    G1ParScanThreadState*    _par_scan_state;
   1.480  
   1.481  public:
   1.482    G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
   1.483                              OopClosure* non_heap_obj_cl,
   1.484 -                            OopsInHeapRegionClosure* perm_obj_cl,
   1.485 +                            OopsInHeapRegionClosure* metadata_obj_cl,
   1.486                              G1ParScanThreadState* pss):
   1.487      _g1h(g1h),
   1.488      _copy_non_heap_obj_cl(non_heap_obj_cl),
   1.489 -    _copy_perm_obj_cl(perm_obj_cl),
   1.490 +    _copy_metadata_obj_cl(metadata_obj_cl),
   1.491      _par_scan_state(pss)
   1.492    {}
   1.493  
   1.494 @@ -5148,22 +5149,20 @@
   1.495        // phase of reference processing) the object and it's followers
   1.496        // will be copied, the reference field set to point to the
   1.497        // new location, and the RSet updated. Otherwise we need to
   1.498 -      // use the the non-heap or perm closures directly to copy
   1.499 +      // use the the non-heap or metadata closures directly to copy
   1.500        // the refernt object and update the pointer, while avoiding
   1.501        // updating the RSet.
   1.502  
   1.503        if (_g1h->is_in_g1_reserved(p)) {
   1.504          _par_scan_state->push_on_queue(p);
   1.505        } else {
   1.506 -        // The reference field is not in the G1 heap.
   1.507 -        if (_g1h->perm_gen()->is_in(p)) {
   1.508 -          _copy_perm_obj_cl->do_oop(p);
   1.509 -        } else {
   1.510 +        assert(!ClassLoaderDataGraph::contains((address)p),
   1.511 +               err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
   1.512 +                              PTR_FORMAT, p));
   1.513            _copy_non_heap_obj_cl->do_oop(p);
   1.514          }
   1.515        }
   1.516      }
   1.517 -  }
   1.518  };
   1.519  
   1.520  // Serial drain queue closure. Called as the 'complete_gc'
   1.521 @@ -5258,22 +5257,22 @@
   1.522      pss.set_partial_scan_closure(&partial_scan_cl);
   1.523  
   1.524      G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
   1.525 -    G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
   1.526 +    G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
   1.527  
   1.528      G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
   1.529 -    G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
   1.530 +    G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
   1.531  
   1.532      OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
   1.533 -    OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
   1.534 +    OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
   1.535  
   1.536      if (_g1h->g1_policy()->during_initial_mark_pause()) {
   1.537        // We also need to mark copied objects.
   1.538        copy_non_heap_cl = &copy_mark_non_heap_cl;
   1.539 -      copy_perm_cl = &copy_mark_perm_cl;
   1.540 +      copy_metadata_cl = &copy_mark_metadata_cl;
   1.541      }
   1.542  
   1.543      // Keep alive closure.
   1.544 -    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
   1.545 +    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
   1.546  
   1.547      // Complete GC closure
   1.548      G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
   1.549 @@ -5372,18 +5371,18 @@
   1.550  
   1.551  
   1.552      G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
   1.553 -    G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
   1.554 +    G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
   1.555  
   1.556      G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
   1.557 -    G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
   1.558 +    G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
   1.559  
   1.560      OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
   1.561 -    OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
   1.562 +    OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
   1.563  
   1.564      if (_g1h->g1_policy()->during_initial_mark_pause()) {
   1.565        // We also need to mark copied objects.
   1.566        copy_non_heap_cl = &copy_mark_non_heap_cl;
   1.567 -      copy_perm_cl = &copy_mark_perm_cl;
   1.568 +      copy_metadata_cl = &copy_mark_metadata_cl;
   1.569      }
   1.570  
   1.571      // Is alive closure
   1.572 @@ -5391,7 +5390,7 @@
   1.573  
   1.574      // Copying keep alive closure. Applied to referent objects that need
   1.575      // to be copied.
   1.576 -    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
   1.577 +    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
   1.578  
   1.579      ReferenceProcessor* rp = _g1h->ref_processor_cm();
   1.580  
   1.581 @@ -5502,22 +5501,22 @@
   1.582    assert(pss.refs()->is_empty(), "pre-condition");
   1.583  
   1.584    G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
   1.585 -  G1ParScanPermClosure           only_copy_perm_cl(this, &pss, NULL);
   1.586 +  G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
   1.587  
   1.588    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
   1.589 -  G1ParScanAndMarkPermClosure    copy_mark_perm_cl(this, &pss, NULL);
   1.590 +  G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
   1.591  
   1.592    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
   1.593 -  OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
   1.594 +  OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
   1.595  
   1.596    if (_g1h->g1_policy()->during_initial_mark_pause()) {
   1.597      // We also need to mark copied objects.
   1.598      copy_non_heap_cl = &copy_mark_non_heap_cl;
   1.599 -    copy_perm_cl = &copy_mark_perm_cl;
   1.600 +    copy_metadata_cl = &copy_mark_metadata_cl;
   1.601    }
   1.602  
   1.603    // Keep alive closure.
   1.604 -  G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss);
   1.605 +  G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
   1.606  
   1.607    // Serial Complete GC closure
   1.608    G1STWDrainQueueClosure drain_queue(this, &pss);
   1.609 @@ -6241,7 +6240,7 @@
   1.610  bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
   1.611    HeapRegion* hr = heap_region_containing(p);
   1.612    if (hr == NULL) {
   1.613 -    return is_in_permanent(p);
   1.614 +    return false;
   1.615    } else {
   1.616      return hr->is_in(p);
   1.617    }

mercurial