Merge

Fri, 20 Jun 2008 13:59:55 -0700

author
jcoomes
date
Fri, 20 Jun 2008 13:59:55 -0700
changeset 646
337e0e51cd6b
parent 642
e619218327a7
parent 645
05712c37c828
child 652
411c61adc994
child 783
69fefd031e6c

Merge

     1.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Jun 20 11:47:53 2008 -0700
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Jun 20 13:59:55 2008 -0700
     1.3 @@ -1004,6 +1004,9 @@
     1.4  
     1.5    DEBUG_ONLY(mark_bitmap()->verify_clear();)
     1.6    DEBUG_ONLY(summary_data().verify_clear();)
     1.7 +
     1.8 +  // Have worker threads release resources the next time they run a task.
     1.9 +  gc_task_manager()->release_all_resources();
    1.10  }
    1.11  
    1.12  void PSParallelCompact::post_compact()
    1.13 @@ -1949,12 +1952,6 @@
    1.14    TimeStamp compaction_start;
    1.15    TimeStamp collection_exit;
    1.16  
    1.17 -  // "serial_CM" is needed until the parallel implementation
    1.18 -  // of the move and update is done.
    1.19 -  ParCompactionManager* serial_CM = new ParCompactionManager();
    1.20 -  // Don't initialize more than once.
    1.21 -  // serial_CM->initialize(&summary_data(), mark_bitmap());
    1.22 -
    1.23    ParallelScavengeHeap* heap = gc_heap();
    1.24    GCCause::Cause gc_cause = heap->gc_cause();
    1.25    PSYoungGen* young_gen = heap->young_gen();
    1.26 @@ -1969,6 +1966,10 @@
    1.27    PreGCValues pre_gc_values;
    1.28    pre_compact(&pre_gc_values);
    1.29  
    1.30 +  // Get the compaction manager reserved for the VM thread.
    1.31 +  ParCompactionManager* const vmthread_cm =
    1.32 +    ParCompactionManager::manager_array(gc_task_manager()->workers());
    1.33 +
    1.34    // Place after pre_compact() where the number of invocations is incremented.
    1.35    AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
    1.36  
    1.37 @@ -2008,7 +2009,7 @@
    1.38      bool marked_for_unloading = false;
    1.39  
    1.40      marking_start.update();
    1.41 -    marking_phase(serial_CM, maximum_heap_compaction);
    1.42 +    marking_phase(vmthread_cm, maximum_heap_compaction);
    1.43  
    1.44  #ifndef PRODUCT
    1.45      if (TraceParallelOldGCMarkingPhase) {
    1.46 @@ -2039,7 +2040,7 @@
    1.47  #endif
    1.48  
    1.49      bool max_on_system_gc = UseMaximumCompactionOnSystemGC && is_system_gc;
    1.50 -    summary_phase(serial_CM, maximum_heap_compaction || max_on_system_gc);
    1.51 +    summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
    1.52  
    1.53  #ifdef ASSERT
    1.54      if (VerifyParallelOldWithMarkSweep &&
    1.55 @@ -2067,13 +2068,13 @@
    1.56        // code can use the the forwarding pointers to
    1.57        // check the new pointer calculation.  The restore_marks()
    1.58        // has to be done before the real compact.
    1.59 -      serial_CM->set_action(ParCompactionManager::VerifyUpdate);
    1.60 -      compact_perm(serial_CM);
    1.61 -      compact_serial(serial_CM);
    1.62 -      serial_CM->set_action(ParCompactionManager::ResetObjects);
    1.63 -      compact_perm(serial_CM);
    1.64 -      compact_serial(serial_CM);
    1.65 -      serial_CM->set_action(ParCompactionManager::UpdateAndCopy);
    1.66 +      vmthread_cm->set_action(ParCompactionManager::VerifyUpdate);
    1.67 +      compact_perm(vmthread_cm);
    1.68 +      compact_serial(vmthread_cm);
    1.69 +      vmthread_cm->set_action(ParCompactionManager::ResetObjects);
    1.70 +      compact_perm(vmthread_cm);
    1.71 +      compact_serial(vmthread_cm);
    1.72 +      vmthread_cm->set_action(ParCompactionManager::UpdateAndCopy);
    1.73  
    1.74        // For debugging only
    1.75        PSMarkSweep::restore_marks();
    1.76 @@ -2084,16 +2085,14 @@
    1.77      compaction_start.update();
    1.78      // Does the perm gen always have to be done serially because
    1.79      // klasses are used in the update of an object?
    1.80 -    compact_perm(serial_CM);
    1.81 +    compact_perm(vmthread_cm);
    1.82  
    1.83      if (UseParallelOldGCCompacting) {
    1.84        compact();
    1.85      } else {
    1.86 -      compact_serial(serial_CM);
    1.87 +      compact_serial(vmthread_cm);
    1.88      }
    1.89  
    1.90 -    delete serial_CM;
    1.91 -
    1.92      // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
    1.93      // done before resizing.
    1.94      post_compact();
     2.1 --- a/src/share/vm/memory/cardTableModRefBS.cpp	Fri Jun 20 11:47:53 2008 -0700
     2.2 +++ b/src/share/vm/memory/cardTableModRefBS.cpp	Fri Jun 20 13:59:55 2008 -0700
     2.3 @@ -196,6 +196,8 @@
     2.4    assert(_whole_heap.contains(new_region),
     2.5             "attempt to cover area not in reserved area");
     2.6    debug_only(verify_guard();)
     2.7 +  // collided is true if the expansion would push into another committed region
     2.8 +  debug_only(bool collided = false;)
     2.9    int const ind = find_covering_region_by_base(new_region.start());
    2.10    MemRegion const old_region = _covered[ind];
    2.11    assert(old_region.start() == new_region.start(), "just checking");
    2.12 @@ -211,12 +213,36 @@
    2.13      }
    2.14      // Align the end up to a page size (starts are already aligned).
    2.15      jbyte* const new_end = byte_after(new_region.last());
    2.16 -    HeapWord* const new_end_aligned =
    2.17 +    HeapWord* new_end_aligned =
    2.18        (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
    2.19      assert(new_end_aligned >= (HeapWord*) new_end,
    2.20             "align up, but less");
    2.21 +    int ri = 0;
    2.22 +    for (ri = 0; ri < _cur_covered_regions; ri++) {
    2.23 +      if (ri != ind) {
    2.24 +        if (_committed[ri].contains(new_end_aligned)) {
    2.25 +          assert((new_end_aligned >= _committed[ri].start()) &&
    2.26 +                 (_committed[ri].start() > _committed[ind].start()),
    2.27 +                 "New end of committed region is inconsistent");
    2.28 +          new_end_aligned = _committed[ri].start();
    2.29 +          assert(new_end_aligned > _committed[ind].start(),
    2.30 +            "New end of committed region is before start");
    2.31 +          debug_only(collided = true;)
    2.32 +          // Should only collide with 1 region
    2.33 +          break;
    2.34 +        }
    2.35 +      }
    2.36 +    }
    2.37 +#ifdef ASSERT
    2.38 +    for (++ri; ri < _cur_covered_regions; ri++) {
    2.39 +      assert(!_committed[ri].contains(new_end_aligned),
    2.40 +        "New end of committed region is in a second committed region");
    2.41 +    }
    2.42 +#endif
    2.43      // The guard page is always committed and should not be committed over.
    2.44 -    HeapWord* const new_end_for_commit = MIN2(new_end_aligned, _guard_region.start());
    2.45 +    HeapWord* const new_end_for_commit = MIN2(new_end_aligned,
    2.46 +                                              _guard_region.start());
    2.47 +
    2.48      if (new_end_for_commit > cur_committed.end()) {
    2.49        // Must commit new pages.
    2.50        MemRegion const new_committed =
    2.51 @@ -239,9 +265,11 @@
    2.52        if (!uncommit_region.is_empty()) {
    2.53          if (!os::uncommit_memory((char*)uncommit_region.start(),
    2.54                                   uncommit_region.byte_size())) {
    2.55 -          // Do better than this for Merlin
    2.56 -          vm_exit_out_of_memory(uncommit_region.byte_size(),
    2.57 -            "card table contraction");
    2.58 +          assert(false, "Card table contraction failed");
    2.59 +          // The call failed so don't change the end of the
    2.60 +          // committed region.  This is better than taking the
    2.61 +          // VM down.
    2.62 +          new_end_aligned = _committed[ind].end();
    2.63          }
    2.64        }
    2.65      }
    2.66 @@ -257,8 +285,25 @@
    2.67      }
    2.68      assert(index_for(new_region.last()) < (int) _guard_index,
    2.69        "The guard card will be overwritten");
    2.70 -    jbyte* const end = byte_after(new_region.last());
    2.71 +    // This line commented out cleans the newly expanded region and
    2.72 +    // not the aligned up expanded region.
    2.73 +    // jbyte* const end = byte_after(new_region.last());
    2.74 +    jbyte* const end = (jbyte*) new_end_for_commit;
    2.75 +    assert((end >= byte_after(new_region.last())) || collided,
    2.76 +      "Expect to be beyond new region unless impacting another region");
    2.77      // do nothing if we resized downward.
    2.78 +#ifdef ASSERT
    2.79 +    for (int ri = 0; ri < _cur_covered_regions; ri++) {
    2.80 +      if (ri != ind) {
    2.81 +        // The end of the new committed region should not
    2.82 +        // be in any existing region unless it matches
    2.83 +        // the start of the next region.
    2.84 +        assert(!_committed[ri].contains(end) ||
    2.85 +               (_committed[ri].start() == (HeapWord*) end),
    2.86 +               "Overlapping committed regions");
    2.87 +      }
    2.88 +    }
    2.89 +#endif
    2.90      if (entry < end) {
    2.91        memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
    2.92      }

mercurial