Merge

Thu, 20 Jan 2011 13:57:12 -0800

author
johnc
date
Thu, 20 Jan 2011 13:57:12 -0800
changeset 2473
377371490991
parent 2468
02b6913287da
parent 2472
0fa27f37d4d4
child 2475
98bf1c6bb73a
child 2476
85330eaa15ee
child 2485
a7367756024b
child 2492
a672e43650cc

Merge

src/share/vm/gc_implementation/g1/concurrentZFThread.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/concurrentZFThread.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/cpu/x86/vm/globals_x86.hpp	Wed Jan 19 19:24:34 2011 -0800
     1.2 +++ b/src/cpu/x86/vm/globals_x86.hpp	Thu Jan 20 13:57:12 2011 -0800
     1.3 @@ -62,7 +62,7 @@
     1.4  // due to lack of optimization caused by C++ compiler bugs
     1.5  define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
     1.6  #else
     1.7 -define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
     1.8 +define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+5));
     1.9  #endif // AMD64
    1.10  
    1.11  define_pd_global(intx, PreInflateSpin,           10);
     2.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jan 19 19:24:34 2011 -0800
     2.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Jan 20 13:57:12 2011 -0800
     2.3 @@ -1,5 +1,5 @@
     2.4  /*
     2.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     2.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     2.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.8   *
     2.9   * This code is free software; you can redistribute it and/or modify it
    2.10 @@ -458,6 +458,7 @@
    2.11    _marking_task_overhead(1.0),
    2.12    _cleanup_sleep_factor(0.0),
    2.13    _cleanup_task_overhead(1.0),
    2.14 +  _cleanup_list("Cleanup List"),
    2.15    _region_bm(max_regions, false /* in_resource_area*/),
    2.16    _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
    2.17             CardTableModRefBS::card_shift,
    2.18 @@ -521,12 +522,6 @@
    2.19    SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
    2.20    satb_qs.set_buffer_size(G1SATBBufferSize);
    2.21  
    2.22 -  int size = (int) MAX2(ParallelGCThreads, (size_t)1);
    2.23 -  _par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
    2.24 -  for (int i = 0 ; i < size; i++) {
    2.25 -    _par_cleanup_thread_state[i] = new ParCleanupThreadState;
    2.26 -  }
    2.27 -
    2.28    _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
    2.29    _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
    2.30  
    2.31 @@ -711,11 +706,6 @@
    2.32  }
    2.33  
    2.34  ConcurrentMark::~ConcurrentMark() {
    2.35 -  int size = (int) MAX2(ParallelGCThreads, (size_t)1);
    2.36 -  for (int i = 0; i < size; i++) delete _par_cleanup_thread_state[i];
    2.37 -  FREE_C_HEAP_ARRAY(ParCleanupThreadState*,
    2.38 -                    _par_cleanup_thread_state);
    2.39 -
    2.40    for (int i = 0; i < (int) _max_task_num; ++i) {
    2.41      delete _task_queues->queue(i);
    2.42      delete _tasks[i];
    2.43 @@ -1171,12 +1161,12 @@
    2.44      if (G1TraceMarkStackOverflow)
    2.45        gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
    2.46    } else {
    2.47 +    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
    2.48      // We're done with marking.
    2.49      // This is the end of  the marking cycle, we're expected all
    2.50      // threads to have SATB queues with active set to true.
    2.51 -    JavaThread::satb_mark_queue_set().set_active_all_threads(
    2.52 -                                                  false, /* new active value */
    2.53 -                                                  true /* expected_active */);
    2.54 +    satb_mq_set.set_active_all_threads(false, /* new active value */
    2.55 +                                       true /* expected_active */);
    2.56  
    2.57      if (VerifyDuringGC) {
    2.58        HandleMark hm;  // handle scope
    2.59 @@ -1510,21 +1500,20 @@
    2.60    size_t _max_live_bytes;
    2.61    size_t _regions_claimed;
    2.62    size_t _freed_bytes;
    2.63 -  size_t _cleared_h_regions;
    2.64 -  size_t _freed_regions;
    2.65 -  UncleanRegionList* _unclean_region_list;
    2.66 +  FreeRegionList _local_cleanup_list;
    2.67 +  HumongousRegionSet _humongous_proxy_set;
    2.68    double _claimed_region_time;
    2.69    double _max_region_time;
    2.70  
    2.71  public:
    2.72    G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
    2.73 -                             UncleanRegionList* list,
    2.74                               int worker_num);
    2.75    size_t freed_bytes() { return _freed_bytes; }
    2.76 -  size_t cleared_h_regions() { return _cleared_h_regions; }
    2.77 -  size_t freed_regions() { return  _freed_regions; }
    2.78 -  UncleanRegionList* unclean_region_list() {
    2.79 -    return _unclean_region_list;
    2.80 +  FreeRegionList* local_cleanup_list() {
    2.81 +    return &_local_cleanup_list;
    2.82 +  }
    2.83 +  HumongousRegionSet* humongous_proxy_set() {
    2.84 +    return &_humongous_proxy_set;
    2.85    }
    2.86  
    2.87    bool doHeapRegion(HeapRegion *r);
    2.88 @@ -1537,25 +1526,22 @@
    2.89  
    2.90  class G1ParNoteEndTask: public AbstractGangTask {
    2.91    friend class G1NoteEndOfConcMarkClosure;
    2.92 +
    2.93  protected:
    2.94    G1CollectedHeap* _g1h;
    2.95    size_t _max_live_bytes;
    2.96    size_t _freed_bytes;
    2.97 -  ConcurrentMark::ParCleanupThreadState** _par_cleanup_thread_state;
    2.98 +  FreeRegionList* _cleanup_list;
    2.99 +
   2.100  public:
   2.101    G1ParNoteEndTask(G1CollectedHeap* g1h,
   2.102 -                   ConcurrentMark::ParCleanupThreadState**
   2.103 -                   par_cleanup_thread_state) :
   2.104 +                   FreeRegionList* cleanup_list) :
   2.105      AbstractGangTask("G1 note end"), _g1h(g1h),
   2.106 -    _max_live_bytes(0), _freed_bytes(0),
   2.107 -    _par_cleanup_thread_state(par_cleanup_thread_state)
   2.108 -  {}
   2.109 +    _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
   2.110  
   2.111    void work(int i) {
   2.112      double start = os::elapsedTime();
   2.113 -    G1NoteEndOfConcMarkClosure g1_note_end(_g1h,
   2.114 -                                           &_par_cleanup_thread_state[i]->list,
   2.115 -                                           i);
   2.116 +    G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i);
   2.117      if (G1CollectedHeap::use_parallel_gc_threads()) {
   2.118        _g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
   2.119                                              HeapRegion::NoteEndClaimValue);
   2.120 @@ -1564,14 +1550,18 @@
   2.121      }
   2.122      assert(g1_note_end.complete(), "Shouldn't have yielded!");
   2.123  
   2.124 -    // Now finish up freeing the current thread's regions.
   2.125 -    _g1h->finish_free_region_work(g1_note_end.freed_bytes(),
   2.126 -                                  g1_note_end.cleared_h_regions(),
   2.127 -                                  0, NULL);
   2.128 +    // Now update the lists
   2.129 +    _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
   2.130 +                                            NULL /* free_list */,
   2.131 +                                            g1_note_end.humongous_proxy_set(),
   2.132 +                                            true /* par */);
   2.133      {
   2.134        MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
   2.135        _max_live_bytes += g1_note_end.max_live_bytes();
   2.136        _freed_bytes += g1_note_end.freed_bytes();
   2.137 +
   2.138 +      _cleanup_list->add_as_tail(g1_note_end.local_cleanup_list());
   2.139 +      assert(g1_note_end.local_cleanup_list()->is_empty(), "post-condition");
   2.140      }
   2.141      double end = os::elapsedTime();
   2.142      if (G1PrintParCleanupStats) {
   2.143 @@ -1612,30 +1602,28 @@
   2.144  
   2.145  G1NoteEndOfConcMarkClosure::
   2.146  G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
   2.147 -                           UncleanRegionList* list,
   2.148                             int worker_num)
   2.149    : _g1(g1), _worker_num(worker_num),
   2.150      _max_live_bytes(0), _regions_claimed(0),
   2.151 -    _freed_bytes(0), _cleared_h_regions(0), _freed_regions(0),
   2.152 +    _freed_bytes(0),
   2.153      _claimed_region_time(0.0), _max_region_time(0.0),
   2.154 -    _unclean_region_list(list)
   2.155 -{}
   2.156 -
   2.157 -bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *r) {
   2.158 +    _local_cleanup_list("Local Cleanup List"),
   2.159 +    _humongous_proxy_set("Local Cleanup Humongous Proxy Set") { }
   2.160 +
   2.161 +bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) {
   2.162    // We use a claim value of zero here because all regions
   2.163    // were claimed with value 1 in the FinalCount task.
   2.164 -  r->reset_gc_time_stamp();
   2.165 -  if (!r->continuesHumongous()) {
   2.166 +  hr->reset_gc_time_stamp();
   2.167 +  if (!hr->continuesHumongous()) {
   2.168      double start = os::elapsedTime();
   2.169      _regions_claimed++;
   2.170 -    r->note_end_of_marking();
   2.171 -    _max_live_bytes += r->max_live_bytes();
   2.172 -    _g1->free_region_if_totally_empty_work(r,
   2.173 -                                           _freed_bytes,
   2.174 -                                           _cleared_h_regions,
   2.175 -                                           _freed_regions,
   2.176 -                                           _unclean_region_list,
   2.177 -                                           true /*par*/);
   2.178 +    hr->note_end_of_marking();
   2.179 +    _max_live_bytes += hr->max_live_bytes();
   2.180 +    _g1->free_region_if_totally_empty(hr,
   2.181 +                                      &_freed_bytes,
   2.182 +                                      &_local_cleanup_list,
   2.183 +                                      &_humongous_proxy_set,
   2.184 +                                      true /* par */);
   2.185      double region_time = (os::elapsedTime() - start);
   2.186      _claimed_region_time += region_time;
   2.187      if (region_time > _max_region_time) _max_region_time = region_time;
   2.188 @@ -1655,6 +1643,8 @@
   2.189      return;
   2.190    }
   2.191  
   2.192 +  g1h->verify_region_sets_optional();
   2.193 +
   2.194    if (VerifyDuringGC) {
   2.195      HandleMark hm;  // handle scope
   2.196      gclog_or_tty->print(" VerifyDuringGC:(before)");
   2.197 @@ -1719,7 +1709,7 @@
   2.198  
   2.199    // Note end of marking in all heap regions.
   2.200    double note_end_start = os::elapsedTime();
   2.201 -  G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state);
   2.202 +  G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
   2.203    if (G1CollectedHeap::use_parallel_gc_threads()) {
   2.204      int n_workers = g1h->workers()->total_workers();
   2.205      g1h->set_par_threads(n_workers);
   2.206 @@ -1731,9 +1721,14 @@
   2.207    } else {
   2.208      g1_par_note_end_task.work(0);
   2.209    }
   2.210 -  g1h->set_unclean_regions_coming(true);
   2.211 +
   2.212 +  if (!cleanup_list_is_empty()) {
   2.213 +    // The cleanup list is not empty, so we'll have to process it
   2.214 +    // concurrently. Notify anyone else that might be wanting free
   2.215 +    // regions that there will be more free regions coming soon.
   2.216 +    g1h->set_free_regions_coming();
   2.217 +  }
   2.218    double note_end_end = os::elapsedTime();
   2.219 -  // Tell the mutators that there might be unclean regions coming...
   2.220    if (G1PrintParCleanupStats) {
   2.221      gclog_or_tty->print_cr("  note end of marking: %8.3f ms.",
   2.222                             (note_end_end - note_end_start)*1000.0);
   2.223 @@ -1799,33 +1794,63 @@
   2.224                       /* silent       */ false,
   2.225                       /* prev marking */ true);
   2.226    }
   2.227 +
   2.228 +  g1h->verify_region_sets_optional();
   2.229  }
   2.230  
   2.231  void ConcurrentMark::completeCleanup() {
   2.232 -  // A full collection intervened.
   2.233    if (has_aborted()) return;
   2.234  
   2.235 -  int first = 0;
   2.236 -  int last = (int)MAX2(ParallelGCThreads, (size_t)1);
   2.237 -  for (int t = 0; t < last; t++) {
   2.238 -    UncleanRegionList* list = &_par_cleanup_thread_state[t]->list;
   2.239 -    assert(list->well_formed(), "Inv");
   2.240 -    HeapRegion* hd = list->hd();
   2.241 -    while (hd != NULL) {
   2.242 -      // Now finish up the other stuff.
   2.243 -      hd->rem_set()->clear();
   2.244 -      HeapRegion* next_hd = hd->next_from_unclean_list();
   2.245 -      (void)list->pop();
   2.246 -      assert(list->hd() == next_hd, "how not?");
   2.247 -      _g1h->put_region_on_unclean_list(hd);
   2.248 -      if (!hd->isHumongous()) {
   2.249 -        // Add this to the _free_regions count by 1.
   2.250 -        _g1h->finish_free_region_work(0, 0, 1, NULL);
   2.251 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   2.252 +
   2.253 +  _cleanup_list.verify_optional();
   2.254 +  FreeRegionList local_free_list("Local Cleanup List");
   2.255 +
   2.256 +  if (G1ConcRegionFreeingVerbose) {
   2.257 +    gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
   2.258 +                           "cleanup list has "SIZE_FORMAT" entries",
   2.259 +                           _cleanup_list.length());
   2.260 +  }
   2.261 +
   2.262 +  // Noone else should be accessing the _cleanup_list at this point,
   2.263 +  // so it's not necessary to take any locks
   2.264 +  while (!_cleanup_list.is_empty()) {
   2.265 +    HeapRegion* hr = _cleanup_list.remove_head();
   2.266 +    assert(hr != NULL, "the list was not empty");
   2.267 +    hr->rem_set()->clear();
   2.268 +    local_free_list.add_as_tail(hr);
   2.269 +
   2.270 +    // Instead of adding one region at a time to the secondary_free_list,
   2.271 +    // we accumulate them in the local list and move them a few at a
   2.272 +    // time. This also cuts down on the number of notify_all() calls
   2.273 +    // we do during this process. We'll also append the local list when
   2.274 +    // _cleanup_list is empty (which means we just removed the last
   2.275 +    // region from the _cleanup_list).
   2.276 +    if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
   2.277 +        _cleanup_list.is_empty()) {
   2.278 +      if (G1ConcRegionFreeingVerbose) {
   2.279 +        gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
   2.280 +                               "appending "SIZE_FORMAT" entries to the "
   2.281 +                               "secondary_free_list, clean list still has "
   2.282 +                               SIZE_FORMAT" entries",
   2.283 +                               local_free_list.length(),
   2.284 +                               _cleanup_list.length());
   2.285        }
   2.286 -      hd = list->hd();
   2.287 -      assert(hd == next_hd, "how not?");
   2.288 +
   2.289 +      {
   2.290 +        MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   2.291 +        g1h->secondary_free_list_add_as_tail(&local_free_list);
   2.292 +        SecondaryFreeList_lock->notify_all();
   2.293 +      }
   2.294 +
   2.295 +      if (G1StressConcRegionFreeing) {
   2.296 +        for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
   2.297 +          os::sleep(Thread::current(), (jlong) 1, false);
   2.298 +        }
   2.299 +      }
   2.300      }
   2.301    }
   2.302 +  assert(local_free_list.is_empty(), "post-condition");
   2.303  }
   2.304  
   2.305  bool G1CMIsAliveClosure::do_object_b(oop obj) {
   2.306 @@ -2897,9 +2922,9 @@
   2.307    virtual void do_oop(      oop* p) { do_oop_work(p); }
   2.308  
   2.309    template <class T> void do_oop_work(T* p) {
   2.310 -    assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
   2.311 -    assert(!_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(),
   2.312 -           "invariant");
   2.313 +    assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
   2.314 +    assert(!_g1h->is_on_free_list(
   2.315 +                    _g1h->heap_region_containing((HeapWord*) p)), "invariant");
   2.316  
   2.317      oop obj = oopDesc::load_decode_heap_oop(p);
   2.318      if (_cm->verbose_high())
   2.319 @@ -3119,8 +3144,8 @@
   2.320  void CMTask::push(oop obj) {
   2.321    HeapWord* objAddr = (HeapWord*) obj;
   2.322    assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
   2.323 -  assert(!_g1h->heap_region_containing(objAddr)->is_on_free_list(),
   2.324 -         "invariant");
   2.325 +  assert(!_g1h->is_on_free_list(
   2.326 +              _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
   2.327    assert(!_g1h->is_obj_ill(obj), "invariant");
   2.328    assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
   2.329  
   2.330 @@ -3365,8 +3390,8 @@
   2.331                                 (void*) obj);
   2.332  
   2.333        assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
   2.334 -      assert(!_g1h->heap_region_containing(obj)->is_on_free_list(),
   2.335 -             "invariant");
   2.336 +      assert(!_g1h->is_on_free_list(
   2.337 +                  _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
   2.338  
   2.339        scan_object(obj);
   2.340  
     3.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Jan 19 19:24:34 2011 -0800
     3.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Jan 20 13:57:12 2011 -0800
     3.3 @@ -1,5 +1,5 @@
     3.4  /*
     3.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.8   *
     3.9   * This code is free software; you can redistribute it and/or modify it
    3.10 @@ -25,7 +25,7 @@
    3.11  #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
    3.12  #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
    3.13  
    3.14 -#include "gc_implementation/g1/heapRegion.hpp"
    3.15 +#include "gc_implementation/g1/heapRegionSets.hpp"
    3.16  #include "utilities/taskqueue.hpp"
    3.17  
    3.18  class G1CollectedHeap;
    3.19 @@ -369,13 +369,7 @@
    3.20    double                _cleanup_sleep_factor;
    3.21    double                _cleanup_task_overhead;
    3.22  
    3.23 -  // Stuff related to age cohort processing.
    3.24 -  struct ParCleanupThreadState {
    3.25 -    char _pre[64];
    3.26 -    UncleanRegionList list;
    3.27 -    char _post[64];
    3.28 -  };
    3.29 -  ParCleanupThreadState** _par_cleanup_thread_state;
    3.30 +  FreeRegionList        _cleanup_list;
    3.31  
    3.32    // CMS marking support structures
    3.33    CMBitMap                _markBitMap1;
    3.34 @@ -484,6 +478,10 @@
    3.35    // prints all gathered CM-related statistics
    3.36    void print_stats();
    3.37  
    3.38 +  bool cleanup_list_is_empty() {
    3.39 +    return _cleanup_list.is_empty();
    3.40 +  }
    3.41 +
    3.42    // accessor methods
    3.43    size_t parallel_marking_threads() { return _parallel_marking_threads; }
    3.44    double sleep_factor()             { return _sleep_factor; }
     4.1 --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Jan 19 19:24:34 2011 -0800
     4.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Thu Jan 20 13:57:12 2011 -0800
     4.3 @@ -1,5 +1,5 @@
     4.4  /*
     4.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     4.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     4.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4.8   *
     4.9   * This code is free software; you can redistribute it and/or modify it
    4.10 @@ -95,8 +95,8 @@
    4.11    _vtime_start = os::elapsedVTime();
    4.12    wait_for_universe_init();
    4.13  
    4.14 -  G1CollectedHeap* g1 = G1CollectedHeap::heap();
    4.15 -  G1CollectorPolicy* g1_policy = g1->g1_policy();
    4.16 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
    4.17 +  G1CollectorPolicy* g1_policy = g1h->g1_policy();
    4.18    G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
    4.19    Thread *current_thread = Thread::current();
    4.20  
    4.21 @@ -119,7 +119,7 @@
    4.22        if (!g1_policy->in_young_gc_mode()) {
    4.23          // this ensures the flag is not set if we bail out of the marking
    4.24          // cycle; normally the flag is cleared immediately after cleanup
    4.25 -        g1->set_marking_complete();
    4.26 +        g1h->set_marking_complete();
    4.27  
    4.28          if (g1_policy->adaptive_young_list_length()) {
    4.29            double now = os::elapsedTime();
    4.30 @@ -228,10 +228,20 @@
    4.31          VM_CGC_Operation op(&cl_cl, verbose_str);
    4.32          VMThread::execute(&op);
    4.33        } else {
    4.34 -        G1CollectedHeap::heap()->set_marking_complete();
    4.35 +        g1h->set_marking_complete();
    4.36        }
    4.37  
    4.38 -      if (!cm()->has_aborted()) {
    4.39 +      // Check if cleanup set the free_regions_coming flag. If it
    4.40 +      // hasn't, we can just skip the next step.
    4.41 +      if (g1h->free_regions_coming()) {
    4.42 +        // The following will finish freeing up any regions that we
    4.43 +        // found to be empty during cleanup. We'll do this part
    4.44 +        // without joining the suspendible set. If an evacuation pause
    4.45 +        // takes places, then we would carry on freeing regions in
    4.46 +        // case they are needed by the pause. If a Full GC takes
    4.47 +        // places, it would wait for us to process the regions
    4.48 +        // reclaimed by cleanup.
    4.49 +
    4.50          double cleanup_start_sec = os::elapsedTime();
    4.51          if (PrintGC) {
    4.52            gclog_or_tty->date_stamp(PrintGCDateStamps);
    4.53 @@ -240,23 +250,22 @@
    4.54          }
    4.55  
    4.56          // Now do the remainder of the cleanup operation.
    4.57 -        _sts.join();
    4.58          _cm->completeCleanup();
    4.59 -        if (!cm()->has_aborted()) {
    4.60 -          g1_policy->record_concurrent_mark_cleanup_completed();
    4.61 +        g1_policy->record_concurrent_mark_cleanup_completed();
    4.62  
    4.63 -          double cleanup_end_sec = os::elapsedTime();
    4.64 -          if (PrintGC) {
    4.65 -            gclog_or_tty->date_stamp(PrintGCDateStamps);
    4.66 -            gclog_or_tty->stamp(PrintGCTimeStamps);
    4.67 -            gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
    4.68 -                                   cleanup_end_sec - cleanup_start_sec);
    4.69 -          }
    4.70 +        double cleanup_end_sec = os::elapsedTime();
    4.71 +        if (PrintGC) {
    4.72 +          gclog_or_tty->date_stamp(PrintGCDateStamps);
    4.73 +          gclog_or_tty->stamp(PrintGCTimeStamps);
    4.74 +          gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
    4.75 +                                 cleanup_end_sec - cleanup_start_sec);
    4.76          }
    4.77 -        _sts.leave();
    4.78 +
    4.79 +        // We're done: no more free regions coming.
    4.80 +        g1h->reset_free_regions_coming();
    4.81        }
    4.82 -      // We're done: no more unclean regions coming.
    4.83 -      G1CollectedHeap::heap()->set_unclean_regions_coming(false);
    4.84 +      guarantee(cm()->cleanup_list_is_empty(),
    4.85 +                "at this point there should be no regions on the cleanup list");
    4.86  
    4.87        if (cm()->has_aborted()) {
    4.88          if (PrintGC) {
    4.89 @@ -278,7 +287,7 @@
    4.90      // Java thread is waiting for a full GC to happen (e.g., it
    4.91      // called System.gc() with +ExplicitGCInvokesConcurrent).
    4.92      _sts.join();
    4.93 -    g1->increment_full_collections_completed(true /* concurrent */);
    4.94 +    g1h->increment_full_collections_completed(true /* concurrent */);
    4.95      _sts.leave();
    4.96    }
    4.97    assert(_should_terminate, "just checking");
     5.1 --- a/src/share/vm/gc_implementation/g1/concurrentZFThread.cpp	Wed Jan 19 19:24:34 2011 -0800
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,194 +0,0 @@
     5.4 -/*
     5.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     5.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5.7 - *
     5.8 - * This code is free software; you can redistribute it and/or modify it
     5.9 - * under the terms of the GNU General Public License version 2 only, as
    5.10 - * published by the Free Software Foundation.
    5.11 - *
    5.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
    5.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    5.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    5.15 - * version 2 for more details (a copy is included in the LICENSE file that
    5.16 - * accompanied this code).
    5.17 - *
    5.18 - * You should have received a copy of the GNU General Public License version
    5.19 - * 2 along with this work; if not, write to the Free Software Foundation,
    5.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    5.21 - *
    5.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    5.23 - * or visit www.oracle.com if you need additional information or have any
    5.24 - * questions.
    5.25 - *
    5.26 - */
    5.27 -
    5.28 -#include "precompiled.hpp"
    5.29 -#include "gc_implementation/g1/concurrentZFThread.hpp"
    5.30 -#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    5.31 -#include "gc_implementation/g1/heapRegion.hpp"
    5.32 -#include "memory/space.inline.hpp"
    5.33 -#include "runtime/mutexLocker.hpp"
    5.34 -#include "utilities/copy.hpp"
    5.35 -
    5.36 -// ======= Concurrent Zero-Fill Thread ========
    5.37 -
    5.38 -// The CM thread is created when the G1 garbage collector is used
    5.39 -
    5.40 -int ConcurrentZFThread::_region_allocs = 0;
    5.41 -int ConcurrentZFThread::_sync_zfs = 0;
    5.42 -int ConcurrentZFThread::_zf_waits = 0;
    5.43 -int ConcurrentZFThread::_regions_filled = 0;
    5.44 -
    5.45 -ConcurrentZFThread::ConcurrentZFThread() :
    5.46 -  ConcurrentGCThread()
    5.47 -{
    5.48 -  create_and_start();
    5.49 -}
    5.50 -
    5.51 -void ConcurrentZFThread::wait_for_ZF_completed(HeapRegion* hr) {
    5.52 -  assert(ZF_mon->owned_by_self(), "Precondition.");
    5.53 -  note_zf_wait();
    5.54 -  while (hr->zero_fill_state() == HeapRegion::ZeroFilling) {
    5.55 -    ZF_mon->wait(Mutex::_no_safepoint_check_flag);
    5.56 -  }
    5.57 -}
    5.58 -
    5.59 -void ConcurrentZFThread::processHeapRegion(HeapRegion* hr) {
    5.60 -  assert(!Universe::heap()->is_gc_active(),
    5.61 -         "This should not happen during GC.");
    5.62 -  assert(hr != NULL, "Precondition");
    5.63 -  // These are unlocked reads, but if this test is successful, then no
    5.64 -  // other thread will attempt this zero filling.  Only a GC thread can
    5.65 -  // modify the ZF state of a region whose state is zero-filling, and this
    5.66 -  // should only happen while the ZF thread is locking out GC.
    5.67 -  if (hr->zero_fill_state() == HeapRegion::ZeroFilling
    5.68 -      && hr->zero_filler() == Thread::current()) {
    5.69 -    assert(hr->top() == hr->bottom(), "better be empty!");
    5.70 -    assert(!hr->isHumongous(), "Only free regions on unclean list.");
    5.71 -    Copy::fill_to_words(hr->bottom(), hr->capacity()/HeapWordSize);
    5.72 -    note_region_filled();
    5.73 -  }
    5.74 -}
    5.75 -
    5.76 -void ConcurrentZFThread::run() {
    5.77 -  initialize_in_thread();
    5.78 -  Thread* thr_self = Thread::current();
    5.79 -  _vtime_start = os::elapsedVTime();
    5.80 -  wait_for_universe_init();
    5.81 -
    5.82 -  G1CollectedHeap* g1 = G1CollectedHeap::heap();
    5.83 -  _sts.join();
    5.84 -  while (!_should_terminate) {
    5.85 -    _sts.leave();
    5.86 -
    5.87 -    {
    5.88 -      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
    5.89 -
    5.90 -      // This local variable will hold a region being zero-filled.  This
    5.91 -      // region will neither be on the unclean or zero-filled lists, and
    5.92 -      // will not be available for allocation; thus, we might have an
    5.93 -      // allocation fail, causing a full GC, because of this, but this is a
    5.94 -      // price we will pay.  (In future, we might want to make the fact
    5.95 -      // that there's a region being zero-filled apparent to the G1 heap,
    5.96 -      // which could then wait for it in this extreme case...)
    5.97 -      HeapRegion* to_fill;
    5.98 -
    5.99 -      while (!g1->should_zf()
   5.100 -             || (to_fill = g1->pop_unclean_region_list_locked()) == NULL)
   5.101 -        ZF_mon->wait(Mutex::_no_safepoint_check_flag);
   5.102 -      while (to_fill->zero_fill_state() == HeapRegion::ZeroFilling)
   5.103 -        ZF_mon->wait(Mutex::_no_safepoint_check_flag);
   5.104 -
   5.105 -      // So now to_fill is non-NULL and is not ZeroFilling.  It might be
   5.106 -      // Allocated or ZeroFilled.  (The latter could happen if this thread
   5.107 -      // starts the zero-filling of a region, but a GC intervenes and
   5.108 -      // pushes new regions needing on the front of the filling on the
   5.109 -      // front of the list.)
   5.110 -
   5.111 -      switch (to_fill->zero_fill_state()) {
   5.112 -      case HeapRegion::Allocated:
   5.113 -        to_fill = NULL;
   5.114 -        break;
   5.115 -
   5.116 -      case HeapRegion::NotZeroFilled:
   5.117 -        to_fill->set_zero_fill_in_progress(thr_self);
   5.118 -
   5.119 -        ZF_mon->unlock();
   5.120 -        _sts.join();
   5.121 -        processHeapRegion(to_fill);
   5.122 -        _sts.leave();
   5.123 -        ZF_mon->lock_without_safepoint_check();
   5.124 -
   5.125 -        if (to_fill->zero_fill_state() == HeapRegion::ZeroFilling
   5.126 -            && to_fill->zero_filler() == thr_self) {
   5.127 -          to_fill->set_zero_fill_complete();
   5.128 -          (void)g1->put_free_region_on_list_locked(to_fill);
   5.129 -        }
   5.130 -        break;
   5.131 -
   5.132 -      case HeapRegion::ZeroFilled:
   5.133 -        (void)g1->put_free_region_on_list_locked(to_fill);
   5.134 -        break;
   5.135 -
   5.136 -      case HeapRegion::ZeroFilling:
   5.137 -        ShouldNotReachHere();
   5.138 -        break;
   5.139 -      }
   5.140 -    }
   5.141 -    _vtime_accum = (os::elapsedVTime() - _vtime_start);
   5.142 -    _sts.join();
   5.143 -  }
   5.144 -  _sts.leave();
   5.145 -
   5.146 -  assert(_should_terminate, "just checking");
   5.147 -  terminate();
   5.148 -}
   5.149 -
   5.150 -bool ConcurrentZFThread::offer_yield() {
   5.151 -  if (_sts.should_yield()) {
   5.152 -    _sts.yield("Concurrent ZF");
   5.153 -    return true;
   5.154 -  } else {
   5.155 -    return false;
   5.156 -  }
   5.157 -}
   5.158 -
   5.159 -void ConcurrentZFThread::stop() {
   5.160 -  // it is ok to take late safepoints here, if needed
   5.161 -  MutexLockerEx mu(Terminator_lock);
   5.162 -  _should_terminate = true;
   5.163 -  while (!_has_terminated) {
   5.164 -    Terminator_lock->wait();
   5.165 -  }
   5.166 -}
   5.167 -
   5.168 -void ConcurrentZFThread::print() const {
   5.169 -  print_on(tty);
   5.170 -}
   5.171 -
   5.172 -void ConcurrentZFThread::print_on(outputStream* st) const {
   5.173 -  st->print("\"G1 Concurrent Zero-Fill Thread\" ");
   5.174 -  Thread::print_on(st);
   5.175 -  st->cr();
   5.176 -}
   5.177 -
   5.178 -
   5.179 -double ConcurrentZFThread::_vtime_accum;
   5.180 -
   5.181 -void ConcurrentZFThread::print_summary_info() {
   5.182 -  gclog_or_tty->print("\nConcurrent Zero-Filling:\n");
   5.183 -  gclog_or_tty->print("  Filled %d regions, used %5.2fs.\n",
   5.184 -                      _regions_filled,
   5.185 -                      vtime_accum());
   5.186 -  gclog_or_tty->print("  Of %d region allocs, %d (%5.2f%%) required sync ZF,\n",
   5.187 -                      _region_allocs, _sync_zfs,
   5.188 -                      (_region_allocs > 0 ?
   5.189 -                       (float)_sync_zfs/(float)_region_allocs*100.0 :
   5.190 -                       0.0));
   5.191 -  gclog_or_tty->print("     and %d (%5.2f%%) required a ZF wait.\n",
   5.192 -                      _zf_waits,
   5.193 -                      (_region_allocs > 0 ?
   5.194 -                       (float)_zf_waits/(float)_region_allocs*100.0 :
   5.195 -                       0.0));
   5.196 -
   5.197 -}
     6.1 --- a/src/share/vm/gc_implementation/g1/concurrentZFThread.hpp	Wed Jan 19 19:24:34 2011 -0800
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,91 +0,0 @@
     6.4 -/*
     6.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     6.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.7 - *
     6.8 - * This code is free software; you can redistribute it and/or modify it
     6.9 - * under the terms of the GNU General Public License version 2 only, as
    6.10 - * published by the Free Software Foundation.
    6.11 - *
    6.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
    6.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    6.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    6.15 - * version 2 for more details (a copy is included in the LICENSE file that
    6.16 - * accompanied this code).
    6.17 - *
    6.18 - * You should have received a copy of the GNU General Public License version
    6.19 - * 2 along with this work; if not, write to the Free Software Foundation,
    6.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    6.21 - *
    6.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    6.23 - * or visit www.oracle.com if you need additional information or have any
    6.24 - * questions.
    6.25 - *
    6.26 - */
    6.27 -
    6.28 -#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
    6.29 -#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
    6.30 -
    6.31 -#include "gc_implementation/shared/concurrentGCThread.hpp"
    6.32 -
    6.33 -// The Concurrent ZF Thread.  Performs concurrent zero-filling.
    6.34 -
    6.35 -class ConcurrentZFThread: public ConcurrentGCThread {
    6.36 -  friend class VMStructs;
    6.37 -  friend class ZeroFillRegionClosure;
    6.38 -
    6.39 - private:
    6.40 -
    6.41 -  // Zero fill the heap region.
    6.42 -  void processHeapRegion(HeapRegion* r);
    6.43 -
    6.44 -  // Stats
    6.45 -  //   Allocation (protected by heap lock).
    6.46 -  static int _region_allocs;  // Number of regions allocated
    6.47 -  static int _sync_zfs;       //   Synchronous zero-fills +
    6.48 -  static int _zf_waits;      //   Wait for conc zero-fill completion.
    6.49 -
    6.50 -  // Number of regions CFZ thread fills.
    6.51 -  static int _regions_filled;
    6.52 -
    6.53 -  double _vtime_start;  // Initial virtual time.
    6.54 -
    6.55 -  // These are static because the "print_summary_info" method is, and
    6.56 -  // it currently assumes there is only one ZF thread.  We'll change when
    6.57 -  // we need to.
    6.58 -  static double _vtime_accum;  // Initial virtual time.
    6.59 -  static double vtime_accum() { return _vtime_accum; }
    6.60 -
    6.61 -  // Offer yield for GC.  Returns true if yield occurred.
    6.62 -  bool offer_yield();
    6.63 -
    6.64 - public:
    6.65 -  // Constructor
    6.66 -  ConcurrentZFThread();
    6.67 -
    6.68 -  // Main loop.
    6.69 -  virtual void run();
    6.70 -
    6.71 -  // Printing
    6.72 -  void print_on(outputStream* st) const;
    6.73 -  void print() const;
    6.74 -
    6.75 -  // Waits until "r" has been zero-filled.  Requires caller to hold the
    6.76 -  // ZF_mon.
    6.77 -  static void wait_for_ZF_completed(HeapRegion* r);
    6.78 -
    6.79 -  // Get or clear the current unclean region.  Should be done
    6.80 -  // while holding the ZF_needed_mon lock.
    6.81 -
    6.82 -  // shutdown
    6.83 -  void stop();
    6.84 -
    6.85 -  // Stats
    6.86 -  static void note_region_alloc() {_region_allocs++; }
    6.87 -  static void note_sync_zfs() { _sync_zfs++; }
    6.88 -  static void note_zf_wait() { _zf_waits++; }
    6.89 -  static void note_region_filled() { _regions_filled++; }
    6.90 -
    6.91 -  static void print_summary_info();
    6.92 -};
    6.93 -
    6.94 -#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTZFTHREAD_HPP
     7.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jan 19 19:24:34 2011 -0800
     7.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jan 20 13:57:12 2011 -0800
     7.3 @@ -28,7 +28,6 @@
     7.4  #include "gc_implementation/g1/concurrentG1Refine.hpp"
     7.5  #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
     7.6  #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
     7.7 -#include "gc_implementation/g1/concurrentZFThread.hpp"
     7.8  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
     7.9  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    7.10  #include "gc_implementation/g1/g1MarkSweep.hpp"
    7.11 @@ -425,11 +424,9 @@
    7.12  
    7.13  void G1CollectedHeap::stop_conc_gc_threads() {
    7.14    _cg1r->stop();
    7.15 -  _czft->stop();
    7.16    _cmThread->stop();
    7.17  }
    7.18  
    7.19 -
    7.20  void G1CollectedHeap::check_ct_logs_at_safepoint() {
    7.21    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
    7.22    CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
    7.23 @@ -481,49 +478,92 @@
    7.24  
    7.25  // Private methods.
    7.26  
    7.27 -// Finds a HeapRegion that can be used to allocate a given size of block.
    7.28 -
    7.29 -
    7.30 -HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
    7.31 -                                                 bool do_expand,
    7.32 -                                                 bool zero_filled) {
    7.33 -  ConcurrentZFThread::note_region_alloc();
    7.34 -  HeapRegion* res = alloc_free_region_from_lists(zero_filled);
    7.35 +HeapRegion*
    7.36 +G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) {
    7.37 +  MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
    7.38 +  while (!_secondary_free_list.is_empty() || free_regions_coming()) {
    7.39 +    if (!_secondary_free_list.is_empty()) {
    7.40 +      if (G1ConcRegionFreeingVerbose) {
    7.41 +        gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
    7.42 +                               "secondary_free_list has "SIZE_FORMAT" entries",
    7.43 +                               _secondary_free_list.length());
    7.44 +      }
    7.45 +      // It looks as if there are free regions available on the
    7.46 +      // secondary_free_list. Let's move them to the free_list and try
    7.47 +      // again to allocate from it.
    7.48 +      append_secondary_free_list();
    7.49 +
    7.50 +      assert(!_free_list.is_empty(), "if the secondary_free_list was not "
    7.51 +             "empty we should have moved at least one entry to the free_list");
    7.52 +      HeapRegion* res = _free_list.remove_head();
    7.53 +      if (G1ConcRegionFreeingVerbose) {
    7.54 +        gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
    7.55 +                               "allocated "HR_FORMAT" from secondary_free_list",
    7.56 +                               HR_FORMAT_PARAMS(res));
    7.57 +      }
    7.58 +      return res;
    7.59 +    }
    7.60 +
    7.61 +    // Wait here until we get notifed either when (a) there are no
    7.62 +    // more free regions coming or (b) some regions have been moved on
    7.63 +    // the secondary_free_list.
    7.64 +    SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
    7.65 +  }
    7.66 +
    7.67 +  if (G1ConcRegionFreeingVerbose) {
    7.68 +    gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
    7.69 +                           "could not allocate from secondary_free_list");
    7.70 +  }
    7.71 +  return NULL;
    7.72 +}
    7.73 +
    7.74 +HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
    7.75 +                                             bool do_expand) {
    7.76 +  assert(!isHumongous(word_size) ||
    7.77 +                                  word_size <= (size_t) HeapRegion::GrainWords,
    7.78 +         "the only time we use this to allocate a humongous region is "
    7.79 +         "when we are allocating a single humongous region");
    7.80 +
    7.81 +  HeapRegion* res;
    7.82 +  if (G1StressConcRegionFreeing) {
    7.83 +    if (!_secondary_free_list.is_empty()) {
    7.84 +      if (G1ConcRegionFreeingVerbose) {
    7.85 +        gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
    7.86 +                               "forced to look at the secondary_free_list");
    7.87 +      }
    7.88 +      res = new_region_try_secondary_free_list(word_size);
    7.89 +      if (res != NULL) {
    7.90 +        return res;
    7.91 +      }
    7.92 +    }
    7.93 +  }
    7.94 +  res = _free_list.remove_head_or_null();
    7.95 +  if (res == NULL) {
    7.96 +    if (G1ConcRegionFreeingVerbose) {
    7.97 +      gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
    7.98 +                             "res == NULL, trying the secondary_free_list");
    7.99 +    }
   7.100 +    res = new_region_try_secondary_free_list(word_size);
   7.101 +  }
   7.102    if (res == NULL && do_expand) {
   7.103      expand(word_size * HeapWordSize);
   7.104 -    res = alloc_free_region_from_lists(zero_filled);
   7.105 -    assert(res == NULL ||
   7.106 -           (!res->isHumongous() &&
   7.107 -            (!zero_filled ||
   7.108 -             res->zero_fill_state() == HeapRegion::Allocated)),
   7.109 -           "Alloc Regions must be zero filled (and non-H)");
   7.110 +    res = _free_list.remove_head_or_null();
   7.111    }
   7.112    if (res != NULL) {
   7.113 -    if (res->is_empty()) {
   7.114 -      _free_regions--;
   7.115 -    }
   7.116 -    assert(!res->isHumongous() &&
   7.117 -           (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated),
   7.118 -           err_msg("Non-young alloc Regions must be zero filled (and non-H):"
   7.119 -                   " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d",
   7.120 -                   res->isHumongous(), zero_filled, res->zero_fill_state()));
   7.121 -    assert(!res->is_on_unclean_list(),
   7.122 -           "Alloc Regions must not be on the unclean list");
   7.123      if (G1PrintHeapRegions) {
   7.124 -      gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
   7.125 -                             "top "PTR_FORMAT,
   7.126 -                             res->hrs_index(), res->bottom(), res->end(), res->top());
   7.127 +      gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], "
   7.128 +                             "top "PTR_FORMAT, res->hrs_index(),
   7.129 +                             res->bottom(), res->end(), res->top());
   7.130      }
   7.131    }
   7.132    return res;
   7.133  }
   7.134  
   7.135 -HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
   7.136 -                                                         size_t word_size,
   7.137 -                                                         bool zero_filled) {
   7.138 +HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
   7.139 +                                                 size_t word_size) {
   7.140    HeapRegion* alloc_region = NULL;
   7.141    if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
   7.142 -    alloc_region = newAllocRegion_work(word_size, true, zero_filled);
   7.143 +    alloc_region = new_region_work(word_size, true /* do_expand */);
   7.144      if (purpose == GCAllocForSurvived && alloc_region != NULL) {
   7.145        alloc_region->set_survivor();
   7.146      }
   7.147 @@ -534,82 +574,188 @@
   7.148    return alloc_region;
   7.149  }
   7.150  
   7.151 +int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
   7.152 +                                                       size_t word_size) {
   7.153 +  int first = -1;
   7.154 +  if (num_regions == 1) {
   7.155 +    // Only one region to allocate, no need to go through the slower
   7.156 +    // path. The caller will attempt the expasion if this fails, so
   7.157 +    // let's not try to expand here too.
   7.158 +    HeapRegion* hr = new_region_work(word_size, false /* do_expand */);
   7.159 +    if (hr != NULL) {
   7.160 +      first = hr->hrs_index();
   7.161 +    } else {
   7.162 +      first = -1;
   7.163 +    }
   7.164 +  } else {
   7.165 +    // We can't allocate humongous regions while cleanupComplete() is
   7.166 +    // running, since some of the regions we find to be empty might not
   7.167 +    // yet be added to the free list and it is not straightforward to
   7.168 +    // know which list they are on so that we can remove them. Note
   7.169 +    // that we only need to do this if we need to allocate more than
   7.170 +    // one region to satisfy the current humongous allocation
   7.171 +    // request. If we are only allocating one region we use the common
   7.172 +    // region allocation code (see above).
   7.173 +    wait_while_free_regions_coming();
   7.174 +    append_secondary_free_list_if_not_empty();
   7.175 +
   7.176 +    if (free_regions() >= num_regions) {
   7.177 +      first = _hrs->find_contiguous(num_regions);
   7.178 +      if (first != -1) {
   7.179 +        for (int i = first; i < first + (int) num_regions; ++i) {
   7.180 +          HeapRegion* hr = _hrs->at(i);
   7.181 +          assert(hr->is_empty(), "sanity");
   7.182 +          assert(is_on_free_list(hr), "sanity");
   7.183 +          hr->set_pending_removal(true);
   7.184 +        }
   7.185 +        _free_list.remove_all_pending(num_regions);
   7.186 +      }
   7.187 +    }
   7.188 +  }
   7.189 +  return first;
   7.190 +}
   7.191 +
   7.192  // If could fit into free regions w/o expansion, try.
   7.193  // Otherwise, if can expand, do so.
   7.194  // Otherwise, if using ex regions might help, try with ex given back.
   7.195  HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   7.196 -  assert_heap_locked_or_at_safepoint();
   7.197 -  assert(regions_accounted_for(), "Region leakage!");
   7.198 -
   7.199 -  // We can't allocate humongous regions while cleanupComplete is
   7.200 -  // running, since some of the regions we find to be empty might not
   7.201 -  // yet be added to the unclean list. If we're already at a
   7.202 -  // safepoint, this call is unnecessary, not to mention wrong.
   7.203 -  if (!SafepointSynchronize::is_at_safepoint()) {
   7.204 -    wait_for_cleanup_complete();
   7.205 -  }
   7.206 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   7.207 +
   7.208 +  verify_region_sets_optional();
   7.209  
   7.210    size_t num_regions =
   7.211           round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   7.212 -
   7.213 -  // Special case if < one region???
   7.214 -
   7.215 -  // Remember the ft size.
   7.216    size_t x_size = expansion_regions();
   7.217 -
   7.218 -  HeapWord* res = NULL;
   7.219 -  bool eliminated_allocated_from_lists = false;
   7.220 -
   7.221 -  // Can the allocation potentially fit in the free regions?
   7.222 -  if (free_regions() >= num_regions) {
   7.223 -    res = _hrs->obj_allocate(word_size);
   7.224 -  }
   7.225 -  if (res == NULL) {
   7.226 -    // Try expansion.
   7.227 -    size_t fs = _hrs->free_suffix();
   7.228 +  size_t fs = _hrs->free_suffix();
   7.229 +  int first = humongous_obj_allocate_find_first(num_regions, word_size);
   7.230 +  if (first == -1) {
   7.231 +    // The only thing we can do now is attempt expansion.
   7.232      if (fs + x_size >= num_regions) {
   7.233        expand((num_regions - fs) * HeapRegion::GrainBytes);
   7.234 -      res = _hrs->obj_allocate(word_size);
   7.235 -      assert(res != NULL, "This should have worked.");
   7.236 -    } else {
   7.237 -      // Expansion won't help.  Are there enough free regions if we get rid
   7.238 -      // of reservations?
   7.239 -      size_t avail = free_regions();
   7.240 -      if (avail >= num_regions) {
   7.241 -        res = _hrs->obj_allocate(word_size);
   7.242 -        if (res != NULL) {
   7.243 -          remove_allocated_regions_from_lists();
   7.244 -          eliminated_allocated_from_lists = true;
   7.245 -        }
   7.246 +      first = humongous_obj_allocate_find_first(num_regions, word_size);
   7.247 +      assert(first != -1, "this should have worked");
   7.248 +    }
   7.249 +  }
   7.250 +
   7.251 +  if (first != -1) {
   7.252 +    // Index of last region in the series + 1.
   7.253 +    int last = first + (int) num_regions;
   7.254 +
   7.255 +    // We need to initialize the region(s) we just discovered. This is
   7.256 +    // a bit tricky given that it can happen concurrently with
   7.257 +    // refinement threads refining cards on these regions and
   7.258 +    // potentially wanting to refine the BOT as they are scanning
   7.259 +    // those cards (this can happen shortly after a cleanup; see CR
   7.260 +    // 6991377). So we have to set up the region(s) carefully and in
   7.261 +    // a specific order.
   7.262 +
   7.263 +    // The word size sum of all the regions we will allocate.
   7.264 +    size_t word_size_sum = num_regions * HeapRegion::GrainWords;
   7.265 +    assert(word_size <= word_size_sum, "sanity");
   7.266 +
   7.267 +    // This will be the "starts humongous" region.
   7.268 +    HeapRegion* first_hr = _hrs->at(first);
   7.269 +    // The header of the new object will be placed at the bottom of
   7.270 +    // the first region.
   7.271 +    HeapWord* new_obj = first_hr->bottom();
   7.272 +    // This will be the new end of the first region in the series that
   7.273 +    // should also match the end of the last region in the seriers.
   7.274 +    HeapWord* new_end = new_obj + word_size_sum;
   7.275 +    // This will be the new top of the first region that will reflect
   7.276 +    // this allocation.
   7.277 +    HeapWord* new_top = new_obj + word_size;
   7.278 +
   7.279 +    // First, we need to zero the header of the space that we will be
   7.280 +    // allocating. When we update top further down, some refinement
   7.281 +    // threads might try to scan the region. By zeroing the header we
   7.282 +    // ensure that any thread that will try to scan the region will
   7.283 +    // come across the zero klass word and bail out.
   7.284 +    //
   7.285 +    // NOTE: It would not have been correct to have used
   7.286 +    // CollectedHeap::fill_with_object() and make the space look like
   7.287 +    // an int array. The thread that is doing the allocation will
   7.288 +    // later update the object header to a potentially different array
   7.289 +    // type and, for a very short period of time, the klass and length
   7.290 +    // fields will be inconsistent. This could cause a refinement
   7.291 +    // thread to calculate the object size incorrectly.
   7.292 +    Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
   7.293 +
   7.294 +    // We will set up the first region as "starts humongous". This
   7.295 +    // will also update the BOT covering all the regions to reflect
   7.296 +    // that there is a single object that starts at the bottom of the
   7.297 +    // first region.
   7.298 +    first_hr->set_startsHumongous(new_top, new_end);
   7.299 +
   7.300 +    // Then, if there are any, we will set up the "continues
   7.301 +    // humongous" regions.
   7.302 +    HeapRegion* hr = NULL;
   7.303 +    for (int i = first + 1; i < last; ++i) {
   7.304 +      hr = _hrs->at(i);
   7.305 +      hr->set_continuesHumongous(first_hr);
   7.306 +    }
   7.307 +    // If we have "continues humongous" regions (hr != NULL), then the
   7.308 +    // end of the last one should match new_end.
   7.309 +    assert(hr == NULL || hr->end() == new_end, "sanity");
   7.310 +
   7.311 +    // Up to this point no concurrent thread would have been able to
   7.312 +    // do any scanning on any region in this series. All the top
   7.313 +    // fields still point to bottom, so the intersection between
   7.314 +    // [bottom,top] and [card_start,card_end] will be empty. Before we
   7.315 +    // update the top fields, we'll do a storestore to make sure that
   7.316 +    // no thread sees the update to top before the zeroing of the
   7.317 +    // object header and the BOT initialization.
   7.318 +    OrderAccess::storestore();
   7.319 +
   7.320 +    // Now that the BOT and the object header have been initialized,
   7.321 +    // we can update top of the "starts humongous" region.
   7.322 +    assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
   7.323 +           "new_top should be in this region");
   7.324 +    first_hr->set_top(new_top);
   7.325 +
   7.326 +    // Now, we will update the top fields of the "continues humongous"
   7.327 +    // regions. The reason we need to do this is that, otherwise,
   7.328 +    // these regions would look empty and this will confuse parts of
   7.329 +    // G1. For example, the code that looks for a consecutive number
   7.330 +    // of empty regions will consider them empty and try to
   7.331 +    // re-allocate them. We can extend is_empty() to also include
   7.332 +    // !continuesHumongous(), but it is easier to just update the top
   7.333 +    // fields here. The way we set top for all regions (i.e., top ==
   7.334 +    // end for all regions but the last one, top == new_top for the
   7.335 +    // last one) is actually used when we will free up the humongous
   7.336 +    // region in free_humongous_region().
   7.337 +    hr = NULL;
   7.338 +    for (int i = first + 1; i < last; ++i) {
   7.339 +      hr = _hrs->at(i);
   7.340 +      if ((i + 1) == last) {
   7.341 +        // last continues humongous region
   7.342 +        assert(hr->bottom() < new_top && new_top <= hr->end(),
   7.343 +               "new_top should fall on this region");
   7.344 +        hr->set_top(new_top);
   7.345 +      } else {
   7.346 +        // not last one
   7.347 +        assert(new_top > hr->end(), "new_top should be above this region");
   7.348 +        hr->set_top(hr->end());
   7.349        }
   7.350      }
   7.351 -  }
   7.352 -  if (res != NULL) {
   7.353 -    // Increment by the number of regions allocated.
   7.354 -    // FIXME: Assumes regions all of size GrainBytes.
   7.355 -#ifndef PRODUCT
   7.356 -    mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
   7.357 -                                           HeapRegion::GrainWords));
   7.358 -#endif
   7.359 -    if (!eliminated_allocated_from_lists)
   7.360 -      remove_allocated_regions_from_lists();
   7.361 -    _summary_bytes_used += word_size * HeapWordSize;
   7.362 -    _free_regions -= num_regions;
   7.363 -    _num_humongous_regions += (int) num_regions;
   7.364 -  }
   7.365 -  assert(regions_accounted_for(), "Region Leakage");
   7.366 -  return res;
   7.367 +    // If we have continues humongous regions (hr != NULL), then the
   7.368 +    // end of the last one should match new_end and its top should
   7.369 +    // match new_top.
   7.370 +    assert(hr == NULL ||
   7.371 +           (hr->end() == new_end && hr->top() == new_top), "sanity");
   7.372 +
   7.373 +    assert(first_hr->used() == word_size * HeapWordSize, "invariant");
   7.374 +    _summary_bytes_used += first_hr->used();
   7.375 +    _humongous_set.add(first_hr);
   7.376 +
   7.377 +    return new_obj;
   7.378 +  }
   7.379 +
   7.380 +  verify_region_sets_optional();
   7.381 +  return NULL;
   7.382  }
   7.383  
   7.384  void
   7.385  G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
   7.386 -  // The cleanup operation might update _summary_bytes_used
   7.387 -  // concurrently with this method. So, right now, if we don't wait
   7.388 -  // for it to complete, updates to _summary_bytes_used might get
   7.389 -  // lost. This will be resolved in the near future when the operation
   7.390 -  // of the free region list is revamped as part of CR 6977804.
   7.391 -  wait_for_cleanup_complete();
   7.392 -
   7.393    // Other threads might still be trying to allocate using CASes out
   7.394    // of the region we are retiring, as they can do so without holding
   7.395    // the Heap_lock. So we first have to make sure that noone else can
   7.396 @@ -654,7 +800,7 @@
   7.397                                                         bool at_safepoint,
   7.398                                                         bool do_dirtying,
   7.399                                                         bool can_expand) {
   7.400 -  assert_heap_locked_or_at_safepoint();
   7.401 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   7.402    assert(_cur_alloc_region == NULL,
   7.403           "replace_cur_alloc_region_and_allocate() should only be called "
   7.404           "after retiring the previous current alloc region");
   7.405 @@ -665,25 +811,12 @@
   7.406           "we are not allowed to expand the young gen");
   7.407  
   7.408    if (can_expand || !g1_policy()->is_young_list_full()) {
   7.409 -    if (!at_safepoint) {
   7.410 -      // The cleanup operation might update _summary_bytes_used
   7.411 -      // concurrently with this method. So, right now, if we don't
   7.412 -      // wait for it to complete, updates to _summary_bytes_used might
   7.413 -      // get lost. This will be resolved in the near future when the
   7.414 -      // operation of the free region list is revamped as part of
   7.415 -      // CR 6977804. If we're already at a safepoint, this call is
   7.416 -      // unnecessary, not to mention wrong.
   7.417 -      wait_for_cleanup_complete();
   7.418 -    }
   7.419 -
   7.420 -    HeapRegion* new_cur_alloc_region = newAllocRegion(word_size,
   7.421 -                                                      false /* zero_filled */);
   7.422 +    HeapRegion* new_cur_alloc_region = new_alloc_region(word_size);
   7.423      if (new_cur_alloc_region != NULL) {
   7.424        assert(new_cur_alloc_region->is_empty(),
   7.425               "the newly-allocated region should be empty, "
   7.426               "as right now we only allocate new regions out of the free list");
   7.427        g1_policy()->update_region_num(true /* next_is_young */);
   7.428 -      _summary_bytes_used -= new_cur_alloc_region->used();
   7.429        set_region_short_lived_locked(new_cur_alloc_region);
   7.430  
   7.431        assert(!new_cur_alloc_region->isHumongous(),
   7.432 @@ -733,7 +866,7 @@
   7.433  
   7.434    assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
   7.435           "alloc region, it should still be NULL");
   7.436 -  assert_heap_locked_or_at_safepoint();
   7.437 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   7.438    return NULL;
   7.439  }
   7.440  
   7.441 @@ -745,6 +878,10 @@
   7.442    assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
   7.443           "used for humongous allocations");
   7.444  
   7.445 +  // We should only reach here when we were unable to allocate
   7.446 +  // otherwise. So, we should have not active current alloc region.
   7.447 +  assert(_cur_alloc_region == NULL, "current alloc region should be NULL");
   7.448 +
   7.449    // We will loop while succeeded is false, which means that we tried
   7.450    // to do a collection, but the VM op did not succeed. So, when we
   7.451    // exit the loop, either one of the allocation attempts was
   7.452 @@ -756,30 +893,6 @@
   7.453      // Every time we go round the loop we should be holding the Heap_lock.
   7.454      assert_heap_locked();
   7.455  
   7.456 -    {
   7.457 -      // We may have concurrent cleanup working at the time. Wait for
   7.458 -      // it to complete. In the future we would probably want to make
   7.459 -      // the concurrent cleanup truly concurrent by decoupling it from
   7.460 -      // the allocation. This will happen in the near future as part
   7.461 -      // of CR 6977804 which will revamp the operation of the free
   7.462 -      // region list. The fact that wait_for_cleanup_complete() will
   7.463 -      // do a wait() means that we'll give up the Heap_lock. So, it's
   7.464 -      // possible that when we exit wait_for_cleanup_complete() we
   7.465 -      // might be able to allocate successfully (since somebody else
   7.466 -      // might have done a collection meanwhile). So, we'll attempt to
   7.467 -      // allocate again, just in case. When we make cleanup truly
   7.468 -      // concurrent with allocation, we should remove this allocation
   7.469 -      // attempt as it's redundant (we only reach here after an
   7.470 -      // allocation attempt has been unsuccessful).
   7.471 -      wait_for_cleanup_complete();
   7.472 -
   7.473 -      HeapWord* result = attempt_allocation_locked(word_size);
   7.474 -      if (result != NULL) {
   7.475 -        assert_heap_not_locked();
   7.476 -        return result;
   7.477 -      }
   7.478 -    }
   7.479 -
   7.480      if (GC_locker::is_active_and_needs_gc()) {
   7.481        // We are locked out of GC because of the GC locker. We can
   7.482        // allocate a new region only if we can expand the young gen.
   7.483 @@ -894,7 +1007,7 @@
   7.484    // allocation paths that attempt to allocate a humongous object
   7.485    // should eventually reach here. Currently, the only paths are from
   7.486    // mem_allocate() and attempt_allocation_at_safepoint().
   7.487 -  assert_heap_locked_or_at_safepoint();
   7.488 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   7.489    assert(isHumongous(word_size), "attempt_allocation_humongous() "
   7.490           "should only be used for humongous allocations");
   7.491    assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   7.492 @@ -971,13 +1084,13 @@
   7.493      }
   7.494    }
   7.495  
   7.496 -  assert_heap_locked_or_at_safepoint();
   7.497 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   7.498    return NULL;
   7.499  }
   7.500  
   7.501  HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
   7.502                                             bool expect_null_cur_alloc_region) {
   7.503 -  assert_at_safepoint();
   7.504 +  assert_at_safepoint(true /* should_be_vm_thread */);
   7.505    assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
   7.506           err_msg("the current alloc region was unexpectedly found "
   7.507                   "to be non-NULL, cur alloc region: "PTR_FORMAT" "
   7.508 @@ -1131,22 +1244,18 @@
   7.509  }
   7.510  
   7.511  void G1CollectedHeap::abandon_cur_alloc_region() {
   7.512 -  if (_cur_alloc_region != NULL) {
   7.513 -    // We're finished with the _cur_alloc_region.
   7.514 -    if (_cur_alloc_region->is_empty()) {
   7.515 -      _free_regions++;
   7.516 -      free_region(_cur_alloc_region);
   7.517 -    } else {
   7.518 -      // As we're builing (at least the young portion) of the collection
   7.519 -      // set incrementally we'll add the current allocation region to
   7.520 -      // the collection set here.
   7.521 -      if (_cur_alloc_region->is_young()) {
   7.522 -        g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
   7.523 -      }
   7.524 -      _summary_bytes_used += _cur_alloc_region->used();
   7.525 -    }
   7.526 -    _cur_alloc_region = NULL;
   7.527 -  }
   7.528 +  assert_at_safepoint(true /* should_be_vm_thread */);
   7.529 +
   7.530 +  HeapRegion* cur_alloc_region = _cur_alloc_region;
   7.531 +  if (cur_alloc_region != NULL) {
   7.532 +    assert(!cur_alloc_region->is_empty(),
   7.533 +           "the current alloc region can never be empty");
   7.534 +    assert(cur_alloc_region->is_young(),
   7.535 +           "the current alloc region should be young");
   7.536 +
   7.537 +    retire_cur_alloc_region_common(cur_alloc_region);
   7.538 +  }
   7.539 +  assert(_cur_alloc_region == NULL, "post-condition");
   7.540  }
   7.541  
   7.542  void G1CollectedHeap::abandon_gc_alloc_regions() {
   7.543 @@ -1227,6 +1336,8 @@
   7.544  bool G1CollectedHeap::do_collection(bool explicit_gc,
   7.545                                      bool clear_all_soft_refs,
   7.546                                      size_t word_size) {
   7.547 +  assert_at_safepoint(true /* should_be_vm_thread */);
   7.548 +
   7.549    if (GC_locker::check_active_before_gc()) {
   7.550      return false;
   7.551    }
   7.552 @@ -1238,8 +1349,7 @@
   7.553      Universe::print_heap_before_gc();
   7.554    }
   7.555  
   7.556 -  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   7.557 -  assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
   7.558 +  verify_region_sets_optional();
   7.559  
   7.560    const bool do_clear_all_soft_refs = clear_all_soft_refs ||
   7.561                             collector_policy()->should_clear_all_soft_refs();
   7.562 @@ -1262,6 +1372,9 @@
   7.563      double start = os::elapsedTime();
   7.564      g1_policy()->record_full_collection_start();
   7.565  
   7.566 +    wait_while_free_regions_coming();
   7.567 +    append_secondary_free_list_if_not_empty();
   7.568 +
   7.569      gc_prologue(true);
   7.570      increment_total_collections(true /* full gc */);
   7.571  
   7.572 @@ -1274,7 +1387,6 @@
   7.573        gclog_or_tty->print(" VerifyBeforeGC:");
   7.574        Universe::verify(true);
   7.575      }
   7.576 -    assert(regions_accounted_for(), "Region leakage!");
   7.577  
   7.578      COMPILER2_PRESENT(DerivedPointerTable::clear());
   7.579  
   7.580 @@ -1298,7 +1410,6 @@
   7.581      assert(_cur_alloc_region == NULL, "Invariant.");
   7.582      g1_rem_set()->cleanupHRRS();
   7.583      tear_down_region_lists();
   7.584 -    set_used_regions_to_need_zero_fill();
   7.585  
   7.586      // We may have added regions to the current incremental collection
   7.587      // set between the last GC or pause and now. We need to clear the
   7.588 @@ -1333,9 +1444,7 @@
   7.589        HandleMark hm;  // Discard invalid handles created during gc
   7.590        G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
   7.591      }
   7.592 -    // Because freeing humongous regions may have added some unclean
   7.593 -    // regions, it is necessary to tear down again before rebuilding.
   7.594 -    tear_down_region_lists();
   7.595 +    assert(free_regions() == 0, "we should not have added any free regions");
   7.596      rebuild_region_lists();
   7.597  
   7.598      _summary_bytes_used = recalculate_used();
   7.599 @@ -1417,7 +1526,6 @@
   7.600      JavaThread::dirty_card_queue_set().abandon_logs();
   7.601      assert(!G1DeferredRSUpdate
   7.602             || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
   7.603 -    assert(regions_accounted_for(), "Region leakage!");
   7.604    }
   7.605  
   7.606    if (g1_policy()->in_young_gc_mode()) {
   7.607 @@ -1431,6 +1539,8 @@
   7.608    // Update the number of full collections that have been completed.
   7.609    increment_full_collections_completed(false /* concurrent */);
   7.610  
   7.611 +  verify_region_sets_optional();
   7.612 +
   7.613    if (PrintHeapAtGC) {
   7.614      Universe::print_heap_after_gc();
   7.615    }
   7.616 @@ -1571,10 +1681,7 @@
   7.617  HeapWord*
   7.618  G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
   7.619                                             bool* succeeded) {
   7.620 -  assert(SafepointSynchronize::is_at_safepoint(),
   7.621 -         "satisfy_failed_allocation() should only be called at a safepoint");
   7.622 -  assert(Thread::current()->is_VM_thread(),
   7.623 -         "satisfy_failed_allocation() should only be called by the VM thread");
   7.624 +  assert_at_safepoint(true /* should_be_vm_thread */);
   7.625  
   7.626    *succeeded = true;
   7.627    // Let's attempt the allocation first.
   7.628 @@ -1646,53 +1753,22 @@
   7.629  // allocated block, or else "NULL".
   7.630  
   7.631  HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
   7.632 -  assert(SafepointSynchronize::is_at_safepoint(),
   7.633 -         "expand_and_allocate() should only be called at a safepoint");
   7.634 -  assert(Thread::current()->is_VM_thread(),
   7.635 -         "expand_and_allocate() should only be called by the VM thread");
   7.636 +  assert_at_safepoint(true /* should_be_vm_thread */);
   7.637 +
   7.638 +  verify_region_sets_optional();
   7.639  
   7.640    size_t expand_bytes = word_size * HeapWordSize;
   7.641    if (expand_bytes < MinHeapDeltaBytes) {
   7.642      expand_bytes = MinHeapDeltaBytes;
   7.643    }
   7.644    expand(expand_bytes);
   7.645 -  assert(regions_accounted_for(), "Region leakage!");
   7.646 +
   7.647 +  verify_region_sets_optional();
   7.648  
   7.649    return attempt_allocation_at_safepoint(word_size,
   7.650                                       false /* expect_null_cur_alloc_region */);
   7.651  }
   7.652  
   7.653 -size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
   7.654 -  size_t pre_used = 0;
   7.655 -  size_t cleared_h_regions = 0;
   7.656 -  size_t freed_regions = 0;
   7.657 -  UncleanRegionList local_list;
   7.658 -  free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
   7.659 -                                    freed_regions, &local_list);
   7.660 -
   7.661 -  finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
   7.662 -                          &local_list);
   7.663 -  return pre_used;
   7.664 -}
   7.665 -
   7.666 -void
   7.667 -G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
   7.668 -                                                   size_t& pre_used,
   7.669 -                                                   size_t& cleared_h,
   7.670 -                                                   size_t& freed_regions,
   7.671 -                                                   UncleanRegionList* list,
   7.672 -                                                   bool par) {
   7.673 -  assert(!hr->continuesHumongous(), "should have filtered these out");
   7.674 -  size_t res = 0;
   7.675 -  if (hr->used() > 0 && hr->garbage_bytes() == hr->used() &&
   7.676 -      !hr->is_young()) {
   7.677 -    if (G1PolicyVerbose > 0)
   7.678 -      gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
   7.679 -                                                                               " during cleanup", hr, hr->used());
   7.680 -    free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
   7.681 -  }
   7.682 -}
   7.683 -
   7.684  // FIXME: both this and shrink could probably be more efficient by
   7.685  // doing one "VirtualSpace::expand_by" call rather than several.
   7.686  void G1CollectedHeap::expand(size_t expand_bytes) {
   7.687 @@ -1725,19 +1801,7 @@
   7.688  
   7.689        // Add it to the HeapRegionSeq.
   7.690        _hrs->insert(hr);
   7.691 -      // Set the zero-fill state, according to whether it's already
   7.692 -      // zeroed.
   7.693 -      {
   7.694 -        MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   7.695 -        if (is_zeroed) {
   7.696 -          hr->set_zero_fill_complete();
   7.697 -          put_free_region_on_list_locked(hr);
   7.698 -        } else {
   7.699 -          hr->set_zero_fill_needed();
   7.700 -          put_region_on_unclean_list_locked(hr);
   7.701 -        }
   7.702 -      }
   7.703 -      _free_regions++;
   7.704 +      _free_list.add_as_tail(hr);
   7.705        // And we used up an expansion region to create it.
   7.706        _expansion_regions--;
   7.707        // Tell the cardtable about it.
   7.708 @@ -1746,6 +1810,7 @@
   7.709        _bot_shared->resize(_g1_committed.word_size());
   7.710      }
   7.711    }
   7.712 +
   7.713    if (Verbose && PrintGC) {
   7.714      size_t new_mem_size = _g1_storage.committed_size();
   7.715      gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
   7.716 @@ -1770,7 +1835,6 @@
   7.717    assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
   7.718  
   7.719    _g1_committed.set_end(mr.start());
   7.720 -  _free_regions -= num_regions_deleted;
   7.721    _expansion_regions += num_regions_deleted;
   7.722  
   7.723    // Tell the cardtable about it.
   7.724 @@ -1790,10 +1854,17 @@
   7.725  }
   7.726  
   7.727  void G1CollectedHeap::shrink(size_t shrink_bytes) {
   7.728 +  verify_region_sets_optional();
   7.729 +
   7.730    release_gc_alloc_regions(true /* totally */);
   7.731 +  // Instead of tearing down / rebuilding the free lists here, we
   7.732 +  // could instead use the remove_all_pending() method on free_list to
   7.733 +  // remove only the ones that we need to remove.
   7.734    tear_down_region_lists();  // We will rebuild them in a moment.
   7.735    shrink_helper(shrink_bytes);
   7.736    rebuild_region_lists();
   7.737 +
   7.738 +  verify_region_sets_optional();
   7.739  }
   7.740  
   7.741  // Public methods.
   7.742 @@ -1812,18 +1883,17 @@
   7.743    _ref_processor(NULL),
   7.744    _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
   7.745    _bot_shared(NULL),
   7.746 -  _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
   7.747    _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
   7.748    _evac_failure_scan_stack(NULL) ,
   7.749    _mark_in_progress(false),
   7.750 -  _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
   7.751 +  _cg1r(NULL), _summary_bytes_used(0),
   7.752    _cur_alloc_region(NULL),
   7.753    _refine_cte_cl(NULL),
   7.754 -  _free_region_list(NULL), _free_region_list_size(0),
   7.755 -  _free_regions(0),
   7.756    _full_collection(false),
   7.757 -  _unclean_region_list(),
   7.758 -  _unclean_regions_coming(false),
   7.759 +  _free_list("Master Free List"),
   7.760 +  _secondary_free_list("Secondary Free List"),
   7.761 +  _humongous_set("Master Humongous Set"),
   7.762 +  _free_regions_coming(false),
   7.763    _young_list(new YoungList(this)),
   7.764    _gc_time_stamp(0),
   7.765    _surviving_young_words(NULL),
   7.766 @@ -1944,8 +2014,6 @@
   7.767  
   7.768    _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
   7.769  
   7.770 -  _num_humongous_regions = 0;
   7.771 -
   7.772    // Create the gen rem set (and barrier set) for the entire reserved region.
   7.773    _rem_set = collector_policy()->create_rem_set(_reserved, 2);
   7.774    set_barrier_set(rem_set()->bs());
   7.775 @@ -1990,6 +2058,8 @@
   7.776    guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
   7.777              "too many cards per region");
   7.778  
   7.779 +  HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
   7.780 +
   7.781    _bot_shared = new G1BlockOffsetSharedArray(_reserved,
   7.782                                               heap_word_size(init_byte_size));
   7.783  
   7.784 @@ -2014,11 +2084,6 @@
   7.785    _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
   7.786    _cmThread = _cm->cmThread();
   7.787  
   7.788 -  // ...and the concurrent zero-fill thread, if necessary.
   7.789 -  if (G1ConcZeroFill) {
   7.790 -    _czft = new ConcurrentZFThread();
   7.791 -  }
   7.792 -
   7.793    // Initialize the from_card cache structure of HeapRegionRemSet.
   7.794    HeapRegionRemSet::init_heap(max_regions());
   7.795  
   7.796 @@ -2192,7 +2257,7 @@
   7.797  #endif // PRODUCT
   7.798  
   7.799  size_t G1CollectedHeap::unsafe_max_alloc() {
   7.800 -  if (_free_regions > 0) return HeapRegion::GrainBytes;
   7.801 +  if (free_regions() > 0) return HeapRegion::GrainBytes;
   7.802    // otherwise, is there space in the current allocation region?
   7.803  
   7.804    // We need to store the current allocation region in a local variable
   7.805 @@ -2272,8 +2337,7 @@
   7.806  }
   7.807  
   7.808  void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   7.809 -  assert(Thread::current()->is_VM_thread(), "Precondition#1");
   7.810 -  assert(Heap_lock->is_locked(), "Precondition#2");
   7.811 +  assert_at_safepoint(true /* should_be_vm_thread */);
   7.812    GCCauseSetter gcs(this, cause);
   7.813    switch (cause) {
   7.814      case GCCause::_heap_inspection:
   7.815 @@ -2296,12 +2360,6 @@
   7.816    {
   7.817      MutexLocker ml(Heap_lock);
   7.818  
   7.819 -    // Don't want to do a GC until cleanup is completed. This
   7.820 -    // limitation will be removed in the near future when the
   7.821 -    // operation of the free region list is revamped as part of
   7.822 -    // CR 6977804.
   7.823 -    wait_for_cleanup_complete();
   7.824 -
   7.825      // Read the GC count while holding the Heap_lock
   7.826      gc_count_before = SharedHeap::heap()->total_collections();
   7.827      full_gc_count_before = SharedHeap::heap()->total_full_collections();
   7.828 @@ -2680,10 +2738,6 @@
   7.829    }
   7.830  }
   7.831  
   7.832 -bool G1CollectedHeap::allocs_are_zero_filled() {
   7.833 -  return false;
   7.834 -}
   7.835 -
   7.836  size_t G1CollectedHeap::large_typearray_limit() {
   7.837    // FIXME
   7.838    return HeapRegion::GrainBytes/HeapWordSize;
   7.839 @@ -2698,7 +2752,6 @@
   7.840    return 0;
   7.841  }
   7.842  
   7.843 -
   7.844  void G1CollectedHeap::prepare_for_verify() {
   7.845    if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
   7.846      ensure_parsability(false);
   7.847 @@ -2909,7 +2962,9 @@
   7.848                           &rootsCl);
   7.849      bool failures = rootsCl.failures();
   7.850      rem_set()->invalidate(perm_gen()->used_region(), false);
   7.851 -    if (!silent) { gclog_or_tty->print("heapRegions "); }
   7.852 +    if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
   7.853 +    verify_region_sets();
   7.854 +    if (!silent) { gclog_or_tty->print("HeapRegions "); }
   7.855      if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
   7.856        assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
   7.857               "sanity check");
   7.858 @@ -2937,7 +2992,7 @@
   7.859          failures = true;
   7.860        }
   7.861      }
   7.862 -    if (!silent) gclog_or_tty->print("remset ");
   7.863 +    if (!silent) gclog_or_tty->print("RemSet ");
   7.864      rem_set()->verify();
   7.865  
   7.866      if (failures) {
   7.867 @@ -3008,15 +3063,10 @@
   7.868    if (G1CollectedHeap::use_parallel_gc_threads()) {
   7.869      workers()->print_worker_threads_on(st);
   7.870    }
   7.871 -
   7.872    _cmThread->print_on(st);
   7.873    st->cr();
   7.874 -
   7.875    _cm->print_worker_threads_on(st);
   7.876 -
   7.877    _cg1r->print_worker_threads_on(st);
   7.878 -
   7.879 -  _czft->print_on(st);
   7.880    st->cr();
   7.881  }
   7.882  
   7.883 @@ -3026,7 +3076,6 @@
   7.884    }
   7.885    tc->do_thread(_cmThread);
   7.886    _cg1r->threads_do(tc);
   7.887 -  tc->do_thread(_czft);
   7.888  }
   7.889  
   7.890  void G1CollectedHeap::print_tracing_info() const {
   7.891 @@ -3042,15 +3091,10 @@
   7.892    if (G1SummarizeConcMark) {
   7.893      concurrent_mark()->print_summary_info();
   7.894    }
   7.895 -  if (G1SummarizeZFStats) {
   7.896 -    ConcurrentZFThread::print_summary_info();
   7.897 -  }
   7.898    g1_policy()->print_yg_surv_rate_info();
   7.899 -
   7.900    SpecializationStats::print();
   7.901  }
   7.902  
   7.903 -
   7.904  int G1CollectedHeap::addr_to_arena_id(void* addr) const {
   7.905    HeapRegion* hr = heap_region_containing(addr);
   7.906    if (hr == NULL) {
   7.907 @@ -3249,6 +3293,9 @@
   7.908  
   7.909  bool
   7.910  G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
   7.911 +  assert_at_safepoint(true /* should_be_vm_thread */);
   7.912 +  guarantee(!is_gc_active(), "collection is not reentrant");
   7.913 +
   7.914    if (GC_locker::check_active_before_gc()) {
   7.915      return false;
   7.916    }
   7.917 @@ -3260,6 +3307,8 @@
   7.918      Universe::print_heap_before_gc();
   7.919    }
   7.920  
   7.921 +  verify_region_sets_optional();
   7.922 +
   7.923    {
   7.924      // This call will decide whether this pause is an initial-mark
   7.925      // pause. If it is, during_initial_mark_pause() will return true
   7.926 @@ -3290,10 +3339,16 @@
   7.927  
   7.928      TraceMemoryManagerStats tms(false /* fullGC */);
   7.929  
   7.930 -    assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   7.931 -    assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
   7.932 -    guarantee(!is_gc_active(), "collection is not reentrant");
   7.933 -    assert(regions_accounted_for(), "Region leakage!");
   7.934 +    // If there are any free regions available on the secondary_free_list
   7.935 +    // make sure we append them to the free_list. However, we don't
   7.936 +    // have to wait for the rest of the cleanup operation to
   7.937 +    // finish. If it's still going on that's OK. If we run out of
   7.938 +    // regions, the region allocation code will check the
   7.939 +    // secondary_free_list and potentially wait if more free regions
   7.940 +    // are coming (see new_region_try_secondary_free_list()).
   7.941 +    if (!G1StressConcRegionFreeing) {
   7.942 +      append_secondary_free_list_if_not_empty();
   7.943 +    }
   7.944  
   7.945      increment_gc_time_stamp();
   7.946  
   7.947 @@ -3373,8 +3428,6 @@
   7.948        // progress, this will be zero.
   7.949        _cm->set_oops_do_bound();
   7.950  
   7.951 -      assert(regions_accounted_for(), "Region leakage.");
   7.952 -
   7.953        if (mark_in_progress())
   7.954          concurrent_mark()->newCSet();
   7.955  
   7.956 @@ -3470,8 +3523,6 @@
   7.957        g1_policy()->record_pause_time_ms(pause_time_ms);
   7.958        g1_policy()->record_collection_pause_end();
   7.959  
   7.960 -      assert(regions_accounted_for(), "Region leakage.");
   7.961 -
   7.962        MemoryService::track_memory_usage();
   7.963  
   7.964        if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
   7.965 @@ -3502,8 +3553,6 @@
   7.966        gc_epilogue(false);
   7.967      }
   7.968  
   7.969 -    assert(verify_region_lists(), "Bad region lists.");
   7.970 -
   7.971      if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
   7.972        gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
   7.973        print_tracing_info();
   7.974 @@ -3511,6 +3560,8 @@
   7.975      }
   7.976    }
   7.977  
   7.978 +  verify_region_sets_optional();
   7.979 +
   7.980    TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
   7.981    TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
   7.982  
   7.983 @@ -3617,7 +3668,7 @@
   7.984  
   7.985  void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
   7.986    assert(Thread::current()->is_VM_thread() ||
   7.987 -         par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
   7.988 +         FreeList_lock->owned_by_self(), "Precondition");
   7.989    assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
   7.990           "Precondition.");
   7.991    hr->set_is_gc_alloc_region(true);
   7.992 @@ -3639,7 +3690,7 @@
   7.993  #endif // G1_DEBUG
   7.994  
   7.995  void G1CollectedHeap::forget_alloc_region_list() {
   7.996 -  assert(Thread::current()->is_VM_thread(), "Precondition");
   7.997 +  assert_at_safepoint(true /* should_be_vm_thread */);
   7.998    while (_gc_alloc_region_list != NULL) {
   7.999      HeapRegion* r = _gc_alloc_region_list;
  7.1000      assert(r->is_gc_alloc_region(), "Invariant.");
  7.1001 @@ -3659,9 +3710,6 @@
  7.1002          _young_list->add_survivor_region(r);
  7.1003        }
  7.1004      }
  7.1005 -    if (r->is_empty()) {
  7.1006 -      ++_free_regions;
  7.1007 -    }
  7.1008    }
  7.1009  #ifdef G1_DEBUG
  7.1010    FindGCAllocRegion fa;
  7.1011 @@ -3714,7 +3762,7 @@
  7.1012  
  7.1013      if (alloc_region == NULL) {
  7.1014        // we will get a new GC alloc region
  7.1015 -      alloc_region = newAllocRegionWithExpansion(ap, 0);
  7.1016 +      alloc_region = new_gc_alloc_region(ap, 0);
  7.1017      } else {
  7.1018        // the region was retained from the last collection
  7.1019        ++_gc_alloc_region_counts[ap];
  7.1020 @@ -3769,11 +3817,9 @@
  7.1021        set_gc_alloc_region(ap, NULL);
  7.1022  
  7.1023        if (r->is_empty()) {
  7.1024 -        // we didn't actually allocate anything in it; let's just put
  7.1025 -        // it on the free list
  7.1026 -        MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1027 -        r->set_zero_fill_complete();
  7.1028 -        put_free_region_on_list_locked(r);
  7.1029 +        // We didn't actually allocate anything in it; let's just put
  7.1030 +        // it back on the free list.
  7.1031 +        _free_list.add_as_tail(r);
  7.1032        } else if (_retain_gc_alloc_region[ap] && !totally) {
  7.1033          // retain it so that we can use it at the beginning of the next GC
  7.1034          _retained_gc_alloc_regions[ap] = r;
  7.1035 @@ -4128,8 +4174,6 @@
  7.1036  
  7.1037    HeapWord* block = alloc_region->par_allocate(word_size);
  7.1038    if (block == NULL) {
  7.1039 -    MutexLockerEx x(par_alloc_during_gc_lock(),
  7.1040 -                    Mutex::_no_safepoint_check_flag);
  7.1041      block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
  7.1042    }
  7.1043    return block;
  7.1044 @@ -4158,6 +4202,12 @@
  7.1045           err_msg("we should not be seeing humongous allocation requests "
  7.1046                   "during GC, word_size = "SIZE_FORMAT, word_size));
  7.1047  
  7.1048 +  // We need to make sure we serialize calls to this method. Given
  7.1049 +  // that the FreeList_lock guards accesses to the free_list anyway,
  7.1050 +  // and we need to potentially remove a region from it, we'll use it
  7.1051 +  // to protect the whole call.
  7.1052 +  MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  7.1053 +
  7.1054    HeapWord* block = NULL;
  7.1055    // In the parallel case, a previous thread to obtain the lock may have
  7.1056    // already assigned a new gc_alloc_region.
  7.1057 @@ -4203,7 +4253,7 @@
  7.1058    }
  7.1059  
  7.1060    // Now allocate a new region for allocation.
  7.1061 -  alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
  7.1062 +  alloc_region = new_gc_alloc_region(purpose, word_size);
  7.1063  
  7.1064    // let the caller handle alloc failure
  7.1065    if (alloc_region != NULL) {
  7.1066 @@ -4211,9 +4261,6 @@
  7.1067      assert(check_gc_alloc_regions(), "alloc regions messed up");
  7.1068      assert(alloc_region->saved_mark_at_top(),
  7.1069             "Mark should have been saved already.");
  7.1070 -    // We used to assert that the region was zero-filled here, but no
  7.1071 -    // longer.
  7.1072 -
  7.1073      // This must be done last: once it's installed, other regions may
  7.1074      // allocate in it (without holding the lock.)
  7.1075      set_gc_alloc_region(purpose, alloc_region);
  7.1076 @@ -4878,91 +4925,91 @@
  7.1077    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  7.1078  }
  7.1079  
  7.1080 -void G1CollectedHeap::free_region(HeapRegion* hr) {
  7.1081 -  size_t pre_used = 0;
  7.1082 -  size_t cleared_h_regions = 0;
  7.1083 -  size_t freed_regions = 0;
  7.1084 -  UncleanRegionList local_list;
  7.1085 -
  7.1086 -  HeapWord* start = hr->bottom();
  7.1087 -  HeapWord* end   = hr->prev_top_at_mark_start();
  7.1088 -  size_t used_bytes = hr->used();
  7.1089 -  size_t live_bytes = hr->max_live_bytes();
  7.1090 -  if (used_bytes > 0) {
  7.1091 -    guarantee( live_bytes <= used_bytes, "invariant" );
  7.1092 -  } else {
  7.1093 -    guarantee( live_bytes == 0, "invariant" );
  7.1094 -  }
  7.1095 -
  7.1096 -  size_t garbage_bytes = used_bytes - live_bytes;
  7.1097 -  if (garbage_bytes > 0)
  7.1098 -    g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
  7.1099 -
  7.1100 -  free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
  7.1101 -                   &local_list);
  7.1102 -  finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
  7.1103 -                          &local_list);
  7.1104 +void G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr,
  7.1105 +                                     size_t* pre_used,
  7.1106 +                                     FreeRegionList* free_list,
  7.1107 +                                     HumongousRegionSet* humongous_proxy_set,
  7.1108 +                                     bool par) {
  7.1109 +  if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
  7.1110 +    if (hr->isHumongous()) {
  7.1111 +      assert(hr->startsHumongous(), "we should only see starts humongous");
  7.1112 +      free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
  7.1113 +    } else {
  7.1114 +      free_region(hr, pre_used, free_list, par);
  7.1115 +    }
  7.1116 +  }
  7.1117  }
  7.1118  
  7.1119 -void
  7.1120 -G1CollectedHeap::free_region_work(HeapRegion* hr,
  7.1121 -                                  size_t& pre_used,
  7.1122 -                                  size_t& cleared_h_regions,
  7.1123 -                                  size_t& freed_regions,
  7.1124 -                                  UncleanRegionList* list,
  7.1125 +void G1CollectedHeap::free_region(HeapRegion* hr,
  7.1126 +                                  size_t* pre_used,
  7.1127 +                                  FreeRegionList* free_list,
  7.1128                                    bool par) {
  7.1129 -  pre_used += hr->used();
  7.1130 -  if (hr->isHumongous()) {
  7.1131 -    assert(hr->startsHumongous(),
  7.1132 -           "Only the start of a humongous region should be freed.");
  7.1133 -    int ind = _hrs->find(hr);
  7.1134 -    assert(ind != -1, "Should have an index.");
  7.1135 -    // Clear the start region.
  7.1136 -    hr->hr_clear(par, true /*clear_space*/);
  7.1137 -    list->insert_before_head(hr);
  7.1138 -    cleared_h_regions++;
  7.1139 -    freed_regions++;
  7.1140 -    // Clear any continued regions.
  7.1141 -    ind++;
  7.1142 -    while ((size_t)ind < n_regions()) {
  7.1143 -      HeapRegion* hrc = _hrs->at(ind);
  7.1144 -      if (!hrc->continuesHumongous()) break;
  7.1145 -      // Otherwise, does continue the H region.
  7.1146 -      assert(hrc->humongous_start_region() == hr, "Huh?");
  7.1147 -      hrc->hr_clear(par, true /*clear_space*/);
  7.1148 -      cleared_h_regions++;
  7.1149 -      freed_regions++;
  7.1150 -      list->insert_before_head(hrc);
  7.1151 -      ind++;
  7.1152 +  assert(!hr->isHumongous(), "this is only for non-humongous regions");
  7.1153 +  assert(!hr->is_empty(), "the region should not be empty");
  7.1154 +  assert(free_list != NULL, "pre-condition");
  7.1155 +
  7.1156 +  *pre_used += hr->used();
  7.1157 +  hr->hr_clear(par, true /* clear_space */);
  7.1158 +  free_list->add_as_tail(hr);
  7.1159 +}
  7.1160 +
  7.1161 +void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
  7.1162 +                                     size_t* pre_used,
  7.1163 +                                     FreeRegionList* free_list,
  7.1164 +                                     HumongousRegionSet* humongous_proxy_set,
  7.1165 +                                     bool par) {
  7.1166 +  assert(hr->startsHumongous(), "this is only for starts humongous regions");
  7.1167 +  assert(free_list != NULL, "pre-condition");
  7.1168 +  assert(humongous_proxy_set != NULL, "pre-condition");
  7.1169 +
  7.1170 +  size_t hr_used = hr->used();
  7.1171 +  size_t hr_capacity = hr->capacity();
  7.1172 +  size_t hr_pre_used = 0;
  7.1173 +  _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
  7.1174 +  hr->set_notHumongous();
  7.1175 +  free_region(hr, &hr_pre_used, free_list, par);
  7.1176 +
  7.1177 +  int i = hr->hrs_index() + 1;
  7.1178 +  size_t num = 1;
  7.1179 +  while ((size_t) i < n_regions()) {
  7.1180 +    HeapRegion* curr_hr = _hrs->at(i);
  7.1181 +    if (!curr_hr->continuesHumongous()) {
  7.1182 +      break;
  7.1183      }
  7.1184 -  } else {
  7.1185 -    hr->hr_clear(par, true /*clear_space*/);
  7.1186 -    list->insert_before_head(hr);
  7.1187 -    freed_regions++;
  7.1188 -    // If we're using clear2, this should not be enabled.
  7.1189 -    // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
  7.1190 -  }
  7.1191 +    curr_hr->set_notHumongous();
  7.1192 +    free_region(curr_hr, &hr_pre_used, free_list, par);
  7.1193 +    num += 1;
  7.1194 +    i += 1;
  7.1195 +  }
  7.1196 +  assert(hr_pre_used == hr_used,
  7.1197 +         err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
  7.1198 +                 "should be the same", hr_pre_used, hr_used));
  7.1199 +  *pre_used += hr_pre_used;
  7.1200  }
  7.1201  
  7.1202 -void G1CollectedHeap::finish_free_region_work(size_t pre_used,
  7.1203 -                                              size_t cleared_h_regions,
  7.1204 -                                              size_t freed_regions,
  7.1205 -                                              UncleanRegionList* list) {
  7.1206 -  if (list != NULL && list->sz() > 0) {
  7.1207 -    prepend_region_list_on_unclean_list(list);
  7.1208 -  }
  7.1209 -  // Acquire a lock, if we're parallel, to update possibly-shared
  7.1210 -  // variables.
  7.1211 -  Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
  7.1212 -  {
  7.1213 +void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
  7.1214 +                                       FreeRegionList* free_list,
  7.1215 +                                       HumongousRegionSet* humongous_proxy_set,
  7.1216 +                                       bool par) {
  7.1217 +  if (pre_used > 0) {
  7.1218 +    Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
  7.1219      MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  7.1220 +    assert(_summary_bytes_used >= pre_used,
  7.1221 +           err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
  7.1222 +                   "should be >= pre_used: "SIZE_FORMAT,
  7.1223 +                   _summary_bytes_used, pre_used));
  7.1224      _summary_bytes_used -= pre_used;
  7.1225 -    _num_humongous_regions -= (int) cleared_h_regions;
  7.1226 -    _free_regions += freed_regions;
  7.1227 +  }
  7.1228 +  if (free_list != NULL && !free_list->is_empty()) {
  7.1229 +    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  7.1230 +    _free_list.add_as_tail(free_list);
  7.1231 +  }
  7.1232 +  if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
  7.1233 +    MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
  7.1234 +    _humongous_set.update_from_proxy(humongous_proxy_set);
  7.1235    }
  7.1236  }
  7.1237  
  7.1238 -
  7.1239  void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
  7.1240    while (list != NULL) {
  7.1241      guarantee( list->is_young(), "invariant" );
  7.1242 @@ -5085,6 +5132,9 @@
  7.1243  }
  7.1244  
  7.1245  void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  7.1246 +  size_t pre_used = 0;
  7.1247 +  FreeRegionList local_free_list("Local List for CSet Freeing");
  7.1248 +
  7.1249    double young_time_ms     = 0.0;
  7.1250    double non_young_time_ms = 0.0;
  7.1251  
  7.1252 @@ -5103,6 +5153,8 @@
  7.1253    size_t rs_lengths = 0;
  7.1254  
  7.1255    while (cur != NULL) {
  7.1256 +    assert(!is_on_free_list(cur), "sanity");
  7.1257 +
  7.1258      if (non_young) {
  7.1259        if (cur->is_young()) {
  7.1260          double end_sec = os::elapsedTime();
  7.1261 @@ -5113,14 +5165,12 @@
  7.1262          non_young = false;
  7.1263        }
  7.1264      } else {
  7.1265 -      if (!cur->is_on_free_list()) {
  7.1266 -        double end_sec = os::elapsedTime();
  7.1267 -        double elapsed_ms = (end_sec - start_sec) * 1000.0;
  7.1268 -        young_time_ms += elapsed_ms;
  7.1269 -
  7.1270 -        start_sec = os::elapsedTime();
  7.1271 -        non_young = true;
  7.1272 -      }
  7.1273 +      double end_sec = os::elapsedTime();
  7.1274 +      double elapsed_ms = (end_sec - start_sec) * 1000.0;
  7.1275 +      young_time_ms += elapsed_ms;
  7.1276 +
  7.1277 +      start_sec = os::elapsedTime();
  7.1278 +      non_young = true;
  7.1279      }
  7.1280  
  7.1281      rs_lengths += cur->rem_set()->occupied();
  7.1282 @@ -5153,9 +5203,8 @@
  7.1283  
  7.1284      if (!cur->evacuation_failed()) {
  7.1285        // And the region is empty.
  7.1286 -      assert(!cur->is_empty(),
  7.1287 -             "Should not have empty regions in a CS.");
  7.1288 -      free_region(cur);
  7.1289 +      assert(!cur->is_empty(), "Should not have empty regions in a CS.");
  7.1290 +      free_region(cur, &pre_used, &local_free_list, false /* par */);
  7.1291      } else {
  7.1292        cur->uninstall_surv_rate_group();
  7.1293        if (cur->is_young())
  7.1294 @@ -5176,6 +5225,9 @@
  7.1295    else
  7.1296      young_time_ms += elapsed_ms;
  7.1297  
  7.1298 +  update_sets_after_freeing_regions(pre_used, &local_free_list,
  7.1299 +                                    NULL /* humongous_proxy_set */,
  7.1300 +                                    false /* par */);
  7.1301    policy->record_young_free_cset_time_ms(young_time_ms);
  7.1302    policy->record_non_young_free_cset_time_ms(non_young_time_ms);
  7.1303  }
  7.1304 @@ -5201,291 +5253,53 @@
  7.1305    }
  7.1306  }
  7.1307  
  7.1308 -HeapRegion*
  7.1309 -G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
  7.1310 -  assert(ZF_mon->owned_by_self(), "Precondition");
  7.1311 -  HeapRegion* res = pop_unclean_region_list_locked();
  7.1312 -  if (res != NULL) {
  7.1313 -    assert(!res->continuesHumongous() &&
  7.1314 -           res->zero_fill_state() != HeapRegion::Allocated,
  7.1315 -           "Only free regions on unclean list.");
  7.1316 -    if (zero_filled) {
  7.1317 -      res->ensure_zero_filled_locked();
  7.1318 -      res->set_zero_fill_allocated();
  7.1319 +void G1CollectedHeap::set_free_regions_coming() {
  7.1320 +  if (G1ConcRegionFreeingVerbose) {
  7.1321 +    gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  7.1322 +                           "setting free regions coming");
  7.1323 +  }
  7.1324 +
  7.1325 +  assert(!free_regions_coming(), "pre-condition");
  7.1326 +  _free_regions_coming = true;
  7.1327 +}
  7.1328 +
  7.1329 +void G1CollectedHeap::reset_free_regions_coming() {
  7.1330 +  {
  7.1331 +    assert(free_regions_coming(), "pre-condition");
  7.1332 +    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  7.1333 +    _free_regions_coming = false;
  7.1334 +    SecondaryFreeList_lock->notify_all();
  7.1335 +  }
  7.1336 +
  7.1337 +  if (G1ConcRegionFreeingVerbose) {
  7.1338 +    gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  7.1339 +                           "reset free regions coming");
  7.1340 +  }
  7.1341 +}
  7.1342 +
  7.1343 +void G1CollectedHeap::wait_while_free_regions_coming() {
  7.1344 +  // Most of the time we won't have to wait, so let's do a quick test
  7.1345 +  // first before we take the lock.
  7.1346 +  if (!free_regions_coming()) {
  7.1347 +    return;
  7.1348 +  }
  7.1349 +
  7.1350 +  if (G1ConcRegionFreeingVerbose) {
  7.1351 +    gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  7.1352 +                           "waiting for free regions");
  7.1353 +  }
  7.1354 +
  7.1355 +  {
  7.1356 +    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  7.1357 +    while (free_regions_coming()) {
  7.1358 +      SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
  7.1359      }
  7.1360    }
  7.1361 -  return res;
  7.1362 -}
  7.1363 -
  7.1364 -HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
  7.1365 -  MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1366 -  return alloc_region_from_unclean_list_locked(zero_filled);
  7.1367 -}
  7.1368 -
  7.1369 -void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
  7.1370 -  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1371 -  put_region_on_unclean_list_locked(r);
  7.1372 -  if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
  7.1373 -}
  7.1374 -
  7.1375 -void G1CollectedHeap::set_unclean_regions_coming(bool b) {
  7.1376 -  MutexLockerEx x(Cleanup_mon);
  7.1377 -  set_unclean_regions_coming_locked(b);
  7.1378 -}
  7.1379 -
  7.1380 -void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
  7.1381 -  assert(Cleanup_mon->owned_by_self(), "Precondition");
  7.1382 -  _unclean_regions_coming = b;
  7.1383 -  // Wake up mutator threads that might be waiting for completeCleanup to
  7.1384 -  // finish.
  7.1385 -  if (!b) Cleanup_mon->notify_all();
  7.1386 -}
  7.1387 -
  7.1388 -void G1CollectedHeap::wait_for_cleanup_complete() {
  7.1389 -  assert_not_at_safepoint();
  7.1390 -  MutexLockerEx x(Cleanup_mon);
  7.1391 -  wait_for_cleanup_complete_locked();
  7.1392 -}
  7.1393 -
  7.1394 -void G1CollectedHeap::wait_for_cleanup_complete_locked() {
  7.1395 -  assert(Cleanup_mon->owned_by_self(), "precondition");
  7.1396 -  while (_unclean_regions_coming) {
  7.1397 -    Cleanup_mon->wait();
  7.1398 -  }
  7.1399 -}
  7.1400 -
  7.1401 -void
  7.1402 -G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
  7.1403 -  assert(ZF_mon->owned_by_self(), "precondition.");
  7.1404 -#ifdef ASSERT
  7.1405 -  if (r->is_gc_alloc_region()) {
  7.1406 -    ResourceMark rm;
  7.1407 -    stringStream region_str;
  7.1408 -    print_on(&region_str);
  7.1409 -    assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s",
  7.1410 -                                             region_str.as_string()));
  7.1411 -  }
  7.1412 -#endif
  7.1413 -  _unclean_region_list.insert_before_head(r);
  7.1414 -}
  7.1415 -
  7.1416 -void
  7.1417 -G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
  7.1418 -  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1419 -  prepend_region_list_on_unclean_list_locked(list);
  7.1420 -  if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
  7.1421 -}
  7.1422 -
  7.1423 -void
  7.1424 -G1CollectedHeap::
  7.1425 -prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
  7.1426 -  assert(ZF_mon->owned_by_self(), "precondition.");
  7.1427 -  _unclean_region_list.prepend_list(list);
  7.1428 -}
  7.1429 -
  7.1430 -HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
  7.1431 -  assert(ZF_mon->owned_by_self(), "precondition.");
  7.1432 -  HeapRegion* res = _unclean_region_list.pop();
  7.1433 -  if (res != NULL) {
  7.1434 -    // Inform ZF thread that there's a new unclean head.
  7.1435 -    if (_unclean_region_list.hd() != NULL && should_zf())
  7.1436 -      ZF_mon->notify_all();
  7.1437 -  }
  7.1438 -  return res;
  7.1439 -}
  7.1440 -
  7.1441 -HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
  7.1442 -  assert(ZF_mon->owned_by_self(), "precondition.");
  7.1443 -  return _unclean_region_list.hd();
  7.1444 -}
  7.1445 -
  7.1446 -
  7.1447 -bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
  7.1448 -  assert(ZF_mon->owned_by_self(), "Precondition");
  7.1449 -  HeapRegion* r = peek_unclean_region_list_locked();
  7.1450 -  if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
  7.1451 -    // Result of below must be equal to "r", since we hold the lock.
  7.1452 -    (void)pop_unclean_region_list_locked();
  7.1453 -    put_free_region_on_list_locked(r);
  7.1454 -    return true;
  7.1455 -  } else {
  7.1456 -    return false;
  7.1457 -  }
  7.1458 -}
  7.1459 -
  7.1460 -bool G1CollectedHeap::move_cleaned_region_to_free_list() {
  7.1461 -  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1462 -  return move_cleaned_region_to_free_list_locked();
  7.1463 -}
  7.1464 -
  7.1465 -
  7.1466 -void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
  7.1467 -  assert(ZF_mon->owned_by_self(), "precondition.");
  7.1468 -  assert(_free_region_list_size == free_region_list_length(), "Inv");
  7.1469 -  assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
  7.1470 -        "Regions on free list must be zero filled");
  7.1471 -  assert(!r->isHumongous(), "Must not be humongous.");
  7.1472 -  assert(r->is_empty(), "Better be empty");
  7.1473 -  assert(!r->is_on_free_list(),
  7.1474 -         "Better not already be on free list");
  7.1475 -  assert(!r->is_on_unclean_list(),
  7.1476 -         "Better not already be on unclean list");
  7.1477 -  r->set_on_free_list(true);
  7.1478 -  r->set_next_on_free_list(_free_region_list);
  7.1479 -  _free_region_list = r;
  7.1480 -  _free_region_list_size++;
  7.1481 -  assert(_free_region_list_size == free_region_list_length(), "Inv");
  7.1482 -}
  7.1483 -
  7.1484 -void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
  7.1485 -  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1486 -  put_free_region_on_list_locked(r);
  7.1487 -}
  7.1488 -
  7.1489 -HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
  7.1490 -  assert(ZF_mon->owned_by_self(), "precondition.");
  7.1491 -  assert(_free_region_list_size == free_region_list_length(), "Inv");
  7.1492 -  HeapRegion* res = _free_region_list;
  7.1493 -  if (res != NULL) {
  7.1494 -    _free_region_list = res->next_from_free_list();
  7.1495 -    _free_region_list_size--;
  7.1496 -    res->set_on_free_list(false);
  7.1497 -    res->set_next_on_free_list(NULL);
  7.1498 -    assert(_free_region_list_size == free_region_list_length(), "Inv");
  7.1499 -  }
  7.1500 -  return res;
  7.1501 -}
  7.1502 -
  7.1503 -
  7.1504 -HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
  7.1505 -  // By self, or on behalf of self.
  7.1506 -  assert(Heap_lock->is_locked(), "Precondition");
  7.1507 -  HeapRegion* res = NULL;
  7.1508 -  bool first = true;
  7.1509 -  while (res == NULL) {
  7.1510 -    if (zero_filled || !first) {
  7.1511 -      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1512 -      res = pop_free_region_list_locked();
  7.1513 -      if (res != NULL) {
  7.1514 -        assert(!res->zero_fill_is_allocated(),
  7.1515 -               "No allocated regions on free list.");
  7.1516 -        res->set_zero_fill_allocated();
  7.1517 -      } else if (!first) {
  7.1518 -        break;  // We tried both, time to return NULL.
  7.1519 -      }
  7.1520 -    }
  7.1521 -
  7.1522 -    if (res == NULL) {
  7.1523 -      res = alloc_region_from_unclean_list(zero_filled);
  7.1524 -    }
  7.1525 -    assert(res == NULL ||
  7.1526 -           !zero_filled ||
  7.1527 -           res->zero_fill_is_allocated(),
  7.1528 -           "We must have allocated the region we're returning");
  7.1529 -    first = false;
  7.1530 -  }
  7.1531 -  return res;
  7.1532 -}
  7.1533 -
  7.1534 -void G1CollectedHeap::remove_allocated_regions_from_lists() {
  7.1535 -  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1536 -  {
  7.1537 -    HeapRegion* prev = NULL;
  7.1538 -    HeapRegion* cur = _unclean_region_list.hd();
  7.1539 -    while (cur != NULL) {
  7.1540 -      HeapRegion* next = cur->next_from_unclean_list();
  7.1541 -      if (cur->zero_fill_is_allocated()) {
  7.1542 -        // Remove from the list.
  7.1543 -        if (prev == NULL) {
  7.1544 -          (void)_unclean_region_list.pop();
  7.1545 -        } else {
  7.1546 -          _unclean_region_list.delete_after(prev);
  7.1547 -        }
  7.1548 -        cur->set_on_unclean_list(false);
  7.1549 -        cur->set_next_on_unclean_list(NULL);
  7.1550 -      } else {
  7.1551 -        prev = cur;
  7.1552 -      }
  7.1553 -      cur = next;
  7.1554 -    }
  7.1555 -    assert(_unclean_region_list.sz() == unclean_region_list_length(),
  7.1556 -           "Inv");
  7.1557 -  }
  7.1558 -
  7.1559 -  {
  7.1560 -    HeapRegion* prev = NULL;
  7.1561 -    HeapRegion* cur = _free_region_list;
  7.1562 -    while (cur != NULL) {
  7.1563 -      HeapRegion* next = cur->next_from_free_list();
  7.1564 -      if (cur->zero_fill_is_allocated()) {
  7.1565 -        // Remove from the list.
  7.1566 -        if (prev == NULL) {
  7.1567 -          _free_region_list = cur->next_from_free_list();
  7.1568 -        } else {
  7.1569 -          prev->set_next_on_free_list(cur->next_from_free_list());
  7.1570 -        }
  7.1571 -        cur->set_on_free_list(false);
  7.1572 -        cur->set_next_on_free_list(NULL);
  7.1573 -        _free_region_list_size--;
  7.1574 -      } else {
  7.1575 -        prev = cur;
  7.1576 -      }
  7.1577 -      cur = next;
  7.1578 -    }
  7.1579 -    assert(_free_region_list_size == free_region_list_length(), "Inv");
  7.1580 -  }
  7.1581 -}
  7.1582 -
  7.1583 -bool G1CollectedHeap::verify_region_lists() {
  7.1584 -  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1585 -  return verify_region_lists_locked();
  7.1586 -}
  7.1587 -
  7.1588 -bool G1CollectedHeap::verify_region_lists_locked() {
  7.1589 -  HeapRegion* unclean = _unclean_region_list.hd();
  7.1590 -  while (unclean != NULL) {
  7.1591 -    guarantee(unclean->is_on_unclean_list(), "Well, it is!");
  7.1592 -    guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
  7.1593 -    guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
  7.1594 -              "Everything else is possible.");
  7.1595 -    unclean = unclean->next_from_unclean_list();
  7.1596 -  }
  7.1597 -  guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
  7.1598 -
  7.1599 -  HeapRegion* free_r = _free_region_list;
  7.1600 -  while (free_r != NULL) {
  7.1601 -    assert(free_r->is_on_free_list(), "Well, it is!");
  7.1602 -    assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
  7.1603 -    switch (free_r->zero_fill_state()) {
  7.1604 -    case HeapRegion::NotZeroFilled:
  7.1605 -    case HeapRegion::ZeroFilling:
  7.1606 -      guarantee(false, "Should not be on free list.");
  7.1607 -      break;
  7.1608 -    default:
  7.1609 -      // Everything else is possible.
  7.1610 -      break;
  7.1611 -    }
  7.1612 -    free_r = free_r->next_from_free_list();
  7.1613 -  }
  7.1614 -  guarantee(_free_region_list_size == free_region_list_length(), "Inv");
  7.1615 -  // If we didn't do an assertion...
  7.1616 -  return true;
  7.1617 -}
  7.1618 -
  7.1619 -size_t G1CollectedHeap::free_region_list_length() {
  7.1620 -  assert(ZF_mon->owned_by_self(), "precondition.");
  7.1621 -  size_t len = 0;
  7.1622 -  HeapRegion* cur = _free_region_list;
  7.1623 -  while (cur != NULL) {
  7.1624 -    len++;
  7.1625 -    cur = cur->next_from_free_list();
  7.1626 -  }
  7.1627 -  return len;
  7.1628 -}
  7.1629 -
  7.1630 -size_t G1CollectedHeap::unclean_region_list_length() {
  7.1631 -  assert(ZF_mon->owned_by_self(), "precondition.");
  7.1632 -  return _unclean_region_list.length();
  7.1633 +
  7.1634 +  if (G1ConcRegionFreeingVerbose) {
  7.1635 +    gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  7.1636 +                           "done waiting for free regions");
  7.1637 +  }
  7.1638  }
  7.1639  
  7.1640  size_t G1CollectedHeap::n_regions() {
  7.1641 @@ -5498,55 +5312,6 @@
  7.1642      HeapRegion::GrainBytes;
  7.1643  }
  7.1644  
  7.1645 -size_t G1CollectedHeap::free_regions() {
  7.1646 -  /* Possibly-expensive assert.
  7.1647 -  assert(_free_regions == count_free_regions(),
  7.1648 -         "_free_regions is off.");
  7.1649 -  */
  7.1650 -  return _free_regions;
  7.1651 -}
  7.1652 -
  7.1653 -bool G1CollectedHeap::should_zf() {
  7.1654 -  return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
  7.1655 -}
  7.1656 -
  7.1657 -class RegionCounter: public HeapRegionClosure {
  7.1658 -  size_t _n;
  7.1659 -public:
  7.1660 -  RegionCounter() : _n(0) {}
  7.1661 -  bool doHeapRegion(HeapRegion* r) {
  7.1662 -    if (r->is_empty()) {
  7.1663 -      assert(!r->isHumongous(), "H regions should not be empty.");
  7.1664 -      _n++;
  7.1665 -    }
  7.1666 -    return false;
  7.1667 -  }
  7.1668 -  int res() { return (int) _n; }
  7.1669 -};
  7.1670 -
  7.1671 -size_t G1CollectedHeap::count_free_regions() {
  7.1672 -  RegionCounter rc;
  7.1673 -  heap_region_iterate(&rc);
  7.1674 -  size_t n = rc.res();
  7.1675 -  if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
  7.1676 -    n--;
  7.1677 -  return n;
  7.1678 -}
  7.1679 -
  7.1680 -size_t G1CollectedHeap::count_free_regions_list() {
  7.1681 -  size_t n = 0;
  7.1682 -  size_t o = 0;
  7.1683 -  ZF_mon->lock_without_safepoint_check();
  7.1684 -  HeapRegion* cur = _free_region_list;
  7.1685 -  while (cur != NULL) {
  7.1686 -    cur = cur->next_from_free_list();
  7.1687 -    n++;
  7.1688 -  }
  7.1689 -  size_t m = unclean_region_list_length();
  7.1690 -  ZF_mon->unlock();
  7.1691 -  return n + m;
  7.1692 -}
  7.1693 -
  7.1694  void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  7.1695    assert(heap_lock_held_for_gc(),
  7.1696                "the heap lock should already be held by or for this thread");
  7.1697 @@ -5618,28 +5383,19 @@
  7.1698    }
  7.1699  }
  7.1700  
  7.1701 -
  7.1702  // Done at the start of full GC.
  7.1703  void G1CollectedHeap::tear_down_region_lists() {
  7.1704 -  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1705 -  while (pop_unclean_region_list_locked() != NULL) ;
  7.1706 -  assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
  7.1707 -         "Postconditions of loop.");
  7.1708 -  while (pop_free_region_list_locked() != NULL) ;
  7.1709 -  assert(_free_region_list == NULL, "Postcondition of loop.");
  7.1710 -  if (_free_region_list_size != 0) {
  7.1711 -    gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
  7.1712 -    print_on(gclog_or_tty, true /* extended */);
  7.1713 -  }
  7.1714 -  assert(_free_region_list_size == 0, "Postconditions of loop.");
  7.1715 +  _free_list.remove_all();
  7.1716  }
  7.1717  
  7.1718 -
  7.1719  class RegionResetter: public HeapRegionClosure {
  7.1720 -  G1CollectedHeap* _g1;
  7.1721 -  int _n;
  7.1722 +  G1CollectedHeap* _g1h;
  7.1723 +  FreeRegionList _local_free_list;
  7.1724 +
  7.1725  public:
  7.1726 -  RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  7.1727 +  RegionResetter() : _g1h(G1CollectedHeap::heap()),
  7.1728 +                     _local_free_list("Local Free List for RegionResetter") { }
  7.1729 +
  7.1730    bool doHeapRegion(HeapRegion* r) {
  7.1731      if (r->continuesHumongous()) return false;
  7.1732      if (r->top() > r->bottom()) {
  7.1733 @@ -5647,152 +5403,32 @@
  7.1734          Copy::fill_to_words(r->top(),
  7.1735                            pointer_delta(r->end(), r->top()));
  7.1736        }
  7.1737 -      r->set_zero_fill_allocated();
  7.1738      } else {
  7.1739        assert(r->is_empty(), "tautology");
  7.1740 -      _n++;
  7.1741 -      switch (r->zero_fill_state()) {
  7.1742 -        case HeapRegion::NotZeroFilled:
  7.1743 -        case HeapRegion::ZeroFilling:
  7.1744 -          _g1->put_region_on_unclean_list_locked(r);
  7.1745 -          break;
  7.1746 -        case HeapRegion::Allocated:
  7.1747 -          r->set_zero_fill_complete();
  7.1748 -          // no break; go on to put on free list.
  7.1749 -        case HeapRegion::ZeroFilled:
  7.1750 -          _g1->put_free_region_on_list_locked(r);
  7.1751 -          break;
  7.1752 -      }
  7.1753 +      _local_free_list.add_as_tail(r);
  7.1754      }
  7.1755      return false;
  7.1756    }
  7.1757  
  7.1758 -  int getFreeRegionCount() {return _n;}
  7.1759 +  void update_free_lists() {
  7.1760 +    _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL,
  7.1761 +                                            false /* par */);
  7.1762 +  }
  7.1763  };
  7.1764  
  7.1765  // Done at the end of full GC.
  7.1766  void G1CollectedHeap::rebuild_region_lists() {
  7.1767 -  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1768    // This needs to go at the end of the full GC.
  7.1769    RegionResetter rs;
  7.1770    heap_region_iterate(&rs);
  7.1771 -  _free_regions = rs.getFreeRegionCount();
  7.1772 -  // Tell the ZF thread it may have work to do.
  7.1773 -  if (should_zf()) ZF_mon->notify_all();
  7.1774 -}
  7.1775 -
  7.1776 -class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
  7.1777 -  G1CollectedHeap* _g1;
  7.1778 -  int _n;
  7.1779 -public:
  7.1780 -  UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  7.1781 -  bool doHeapRegion(HeapRegion* r) {
  7.1782 -    if (r->continuesHumongous()) return false;
  7.1783 -    if (r->top() > r->bottom()) {
  7.1784 -      // There are assertions in "set_zero_fill_needed()" below that
  7.1785 -      // require top() == bottom(), so this is technically illegal.
  7.1786 -      // We'll skirt the law here, by making that true temporarily.
  7.1787 -      DEBUG_ONLY(HeapWord* save_top = r->top();
  7.1788 -                 r->set_top(r->bottom()));
  7.1789 -      r->set_zero_fill_needed();
  7.1790 -      DEBUG_ONLY(r->set_top(save_top));
  7.1791 -    }
  7.1792 -    return false;
  7.1793 -  }
  7.1794 -};
  7.1795 -
  7.1796 -// Done at the start of full GC.
  7.1797 -void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
  7.1798 -  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  7.1799 -  // This needs to go at the end of the full GC.
  7.1800 -  UsedRegionsNeedZeroFillSetter rs;
  7.1801 -  heap_region_iterate(&rs);
  7.1802 +  rs.update_free_lists();
  7.1803  }
  7.1804  
  7.1805  void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  7.1806    _refine_cte_cl->set_concurrent(concurrent);
  7.1807  }
  7.1808  
  7.1809 -#ifndef PRODUCT
  7.1810 -
  7.1811 -class PrintHeapRegionClosure: public HeapRegionClosure {
  7.1812 -public:
  7.1813 -  bool doHeapRegion(HeapRegion *r) {
  7.1814 -    gclog_or_tty->print("Region: "PTR_FORMAT":", r);
  7.1815 -    if (r != NULL) {
  7.1816 -      if (r->is_on_free_list())
  7.1817 -        gclog_or_tty->print("Free ");
  7.1818 -      if (r->is_young())
  7.1819 -        gclog_or_tty->print("Young ");
  7.1820 -      if (r->isHumongous())
  7.1821 -        gclog_or_tty->print("Is Humongous ");
  7.1822 -      r->print();
  7.1823 -    }
  7.1824 -    return false;
  7.1825 -  }
  7.1826 -};
  7.1827 -
  7.1828 -class SortHeapRegionClosure : public HeapRegionClosure {
  7.1829 -  size_t young_regions,free_regions, unclean_regions;
  7.1830 -  size_t hum_regions, count;
  7.1831 -  size_t unaccounted, cur_unclean, cur_alloc;
  7.1832 -  size_t total_free;
  7.1833 -  HeapRegion* cur;
  7.1834 -public:
  7.1835 -  SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
  7.1836 -    free_regions(0), unclean_regions(0),
  7.1837 -    hum_regions(0),
  7.1838 -    count(0), unaccounted(0),
  7.1839 -    cur_alloc(0), total_free(0)
  7.1840 -  {}
  7.1841 -  bool doHeapRegion(HeapRegion *r) {
  7.1842 -    count++;
  7.1843 -    if (r->is_on_free_list()) free_regions++;
  7.1844 -    else if (r->is_on_unclean_list()) unclean_regions++;
  7.1845 -    else if (r->isHumongous())  hum_regions++;
  7.1846 -    else if (r->is_young()) young_regions++;
  7.1847 -    else if (r == cur) cur_alloc++;
  7.1848 -    else unaccounted++;
  7.1849 -    return false;
  7.1850 -  }
  7.1851 -  void print() {
  7.1852 -    total_free = free_regions + unclean_regions;
  7.1853 -    gclog_or_tty->print("%d regions\n", count);
  7.1854 -    gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
  7.1855 -                        total_free, free_regions, unclean_regions);
  7.1856 -    gclog_or_tty->print("%d humongous %d young\n",
  7.1857 -                        hum_regions, young_regions);
  7.1858 -    gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
  7.1859 -    gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
  7.1860 -  }
  7.1861 -};
  7.1862 -
  7.1863 -void G1CollectedHeap::print_region_counts() {
  7.1864 -  SortHeapRegionClosure sc(_cur_alloc_region);
  7.1865 -  PrintHeapRegionClosure cl;
  7.1866 -  heap_region_iterate(&cl);
  7.1867 -  heap_region_iterate(&sc);
  7.1868 -  sc.print();
  7.1869 -  print_region_accounting_info();
  7.1870 -};
  7.1871 -
  7.1872 -bool G1CollectedHeap::regions_accounted_for() {
  7.1873 -  // TODO: regions accounting for young/survivor/tenured
  7.1874 -  return true;
  7.1875 -}
  7.1876 -
  7.1877 -bool G1CollectedHeap::print_region_accounting_info() {
  7.1878 -  gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
  7.1879 -                         free_regions(),
  7.1880 -                         count_free_regions(), count_free_regions_list(),
  7.1881 -                         _free_region_list_size, _unclean_region_list.sz());
  7.1882 -  gclog_or_tty->print_cr("cur_alloc: %d.",
  7.1883 -                         (_cur_alloc_region == NULL ? 0 : 1));
  7.1884 -  gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
  7.1885 -
  7.1886 -  // TODO: check regions accounting for young/survivor/tenured
  7.1887 -  return true;
  7.1888 -}
  7.1889 +#ifdef ASSERT
  7.1890  
  7.1891  bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  7.1892    HeapRegion* hr = heap_region_containing(p);
  7.1893 @@ -5802,8 +5438,84 @@
  7.1894      return hr->is_in(p);
  7.1895    }
  7.1896  }
  7.1897 -#endif // !PRODUCT
  7.1898 -
  7.1899 -void G1CollectedHeap::g1_unimplemented() {
  7.1900 -  // Unimplemented();
  7.1901 +#endif // ASSERT
  7.1902 +
  7.1903 +class VerifyRegionListsClosure : public HeapRegionClosure {
  7.1904 +private:
  7.1905 +  HumongousRegionSet* _humongous_set;
  7.1906 +  FreeRegionList*     _free_list;
  7.1907 +  size_t              _region_count;
  7.1908 +
  7.1909 +public:
  7.1910 +  VerifyRegionListsClosure(HumongousRegionSet* humongous_set,
  7.1911 +                           FreeRegionList* free_list) :
  7.1912 +    _humongous_set(humongous_set), _free_list(free_list),
  7.1913 +    _region_count(0) { }
  7.1914 +
  7.1915 +  size_t region_count()      { return _region_count;      }
  7.1916 +
  7.1917 +  bool doHeapRegion(HeapRegion* hr) {
  7.1918 +    _region_count += 1;
  7.1919 +
  7.1920 +    if (hr->continuesHumongous()) {
  7.1921 +      return false;
  7.1922 +    }
  7.1923 +
  7.1924 +    if (hr->is_young()) {
  7.1925 +      // TODO
  7.1926 +    } else if (hr->startsHumongous()) {
  7.1927 +      _humongous_set->verify_next_region(hr);
  7.1928 +    } else if (hr->is_empty()) {
  7.1929 +      _free_list->verify_next_region(hr);
  7.1930 +    }
  7.1931 +    return false;
  7.1932 +  }
  7.1933 +};
  7.1934 +
  7.1935 +void G1CollectedHeap::verify_region_sets() {
  7.1936 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  7.1937 +
  7.1938 +  // First, check the explicit lists.
  7.1939 +  _free_list.verify();
  7.1940 +  {
  7.1941 +    // Given that a concurrent operation might be adding regions to
  7.1942 +    // the secondary free list we have to take the lock before
  7.1943 +    // verifying it.
  7.1944 +    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  7.1945 +    _secondary_free_list.verify();
  7.1946 +  }
  7.1947 +  _humongous_set.verify();
  7.1948 +
  7.1949 +  // If a concurrent region freeing operation is in progress it will
  7.1950 +  // be difficult to correctly attributed any free regions we come
  7.1951 +  // across to the correct free list given that they might belong to
  7.1952 +  // one of several (free_list, secondary_free_list, any local lists,
  7.1953 +  // etc.). So, if that's the case we will skip the rest of the
  7.1954 +  // verification operation. Alternatively, waiting for the concurrent
  7.1955 +  // operation to complete will have a non-trivial effect on the GC's
  7.1956 +  // operation (no concurrent operation will last longer than the
  7.1957 +  // interval between two calls to verification) and it might hide
  7.1958 +  // any issues that we would like to catch during testing.
  7.1959 +  if (free_regions_coming()) {
  7.1960 +    return;
  7.1961 +  }
  7.1962 +
  7.1963 +  {
  7.1964 +    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  7.1965 +    // Make sure we append the secondary_free_list on the free_list so
  7.1966 +    // that all free regions we will come across can be safely
  7.1967 +    // attributed to the free_list.
  7.1968 +    append_secondary_free_list();
  7.1969 +  }
  7.1970 +
  7.1971 +  // Finally, make sure that the region accounting in the lists is
  7.1972 +  // consistent with what we see in the heap.
  7.1973 +  _humongous_set.verify_start();
  7.1974 +  _free_list.verify_start();
  7.1975 +
  7.1976 +  VerifyRegionListsClosure cl(&_humongous_set, &_free_list);
  7.1977 +  heap_region_iterate(&cl);
  7.1978 +
  7.1979 +  _humongous_set.verify_end();
  7.1980 +  _free_list.verify_end();
  7.1981  }
     8.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jan 19 19:24:34 2011 -0800
     8.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jan 20 13:57:12 2011 -0800
     8.3 @@ -27,7 +27,7 @@
     8.4  
     8.5  #include "gc_implementation/g1/concurrentMark.hpp"
     8.6  #include "gc_implementation/g1/g1RemSet.hpp"
     8.7 -#include "gc_implementation/g1/heapRegion.hpp"
     8.8 +#include "gc_implementation/g1/heapRegionSets.hpp"
     8.9  #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
    8.10  #include "memory/barrierSet.hpp"
    8.11  #include "memory/memRegion.hpp"
    8.12 @@ -66,8 +66,7 @@
    8.13  enum G1GCThreadGroups {
    8.14    G1CRGroup = 0,
    8.15    G1ZFGroup = 1,
    8.16 -  G1CMGroup = 2,
    8.17 -  G1CLGroup = 3
    8.18 +  G1CMGroup = 2
    8.19  };
    8.20  
    8.21  enum GCAllocPurpose {
    8.22 @@ -155,6 +154,7 @@
    8.23    friend class RefineCardTableEntryClosure;
    8.24    friend class G1PrepareCompactClosure;
    8.25    friend class RegionSorter;
    8.26 +  friend class RegionResetter;
    8.27    friend class CountRCClosure;
    8.28    friend class EvacPopObjClosure;
    8.29    friend class G1ParCleanupCTTask;
    8.30 @@ -178,17 +178,20 @@
    8.31    // The maximum part of _g1_storage that has ever been committed.
    8.32    MemRegion _g1_max_committed;
    8.33  
    8.34 -  // The number of regions that are completely free.
    8.35 -  size_t _free_regions;
    8.36 +  // The master free list. It will satisfy all new region allocations.
    8.37 +  MasterFreeRegionList      _free_list;
    8.38 +
    8.39 +  // The secondary free list which contains regions that have been
    8.40 +  // freed up during the cleanup process. This will be appended to the
    8.41 +  // master free list when appropriate.
    8.42 +  SecondaryFreeRegionList   _secondary_free_list;
    8.43 +
    8.44 +  // It keeps track of the humongous regions.
    8.45 +  MasterHumongousRegionSet  _humongous_set;
    8.46  
    8.47    // The number of regions we could create by expansion.
    8.48    size_t _expansion_regions;
    8.49  
    8.50 -  // Return the number of free regions in the heap (by direct counting.)
    8.51 -  size_t count_free_regions();
    8.52 -  // Return the number of free regions on the free and unclean lists.
    8.53 -  size_t count_free_regions_list();
    8.54 -
    8.55    // The block offset table for the G1 heap.
    8.56    G1BlockOffsetSharedArray* _bot_shared;
    8.57  
    8.58 @@ -196,9 +199,6 @@
    8.59    // lists, before and after full GC.
    8.60    void tear_down_region_lists();
    8.61    void rebuild_region_lists();
    8.62 -  // This sets all non-empty regions to need zero-fill (which they will if
    8.63 -  // they are empty after full collection.)
    8.64 -  void set_used_regions_to_need_zero_fill();
    8.65  
    8.66    // The sequence of all heap regions in the heap.
    8.67    HeapRegionSeq* _hrs;
    8.68 @@ -231,7 +231,7 @@
    8.69    // Determines PLAB size for a particular allocation purpose.
    8.70    static size_t desired_plab_sz(GCAllocPurpose purpose);
    8.71  
    8.72 -  // When called by par thread, require par_alloc_during_gc_lock() to be held.
    8.73 +  // When called by par thread, requires the FreeList_lock to be held.
    8.74    void push_gc_alloc_region(HeapRegion* hr);
    8.75  
    8.76    // This should only be called single-threaded.  Undeclares all GC alloc
    8.77 @@ -294,10 +294,11 @@
    8.78    // line number, file, etc.
    8.79  
    8.80  #define heap_locking_asserts_err_msg(__extra_message)                         \
    8.81 -  err_msg("%s : Heap_lock %slocked, %sat a safepoint",                        \
    8.82 +  err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",    \
    8.83            (__extra_message),                                                  \
    8.84 -          (!Heap_lock->owned_by_self()) ? "NOT " : "",                        \
    8.85 -          (!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "")
    8.86 +          BOOL_TO_STR(Heap_lock->owned_by_self()),                            \
    8.87 +          BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),               \
    8.88 +          BOOL_TO_STR(Thread::current()->is_VM_thread()))
    8.89  
    8.90  #define assert_heap_locked()                                                  \
    8.91    do {                                                                        \
    8.92 @@ -305,10 +306,11 @@
    8.93             heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
    8.94    } while (0)
    8.95  
    8.96 -#define assert_heap_locked_or_at_safepoint()                                  \
    8.97 +#define assert_heap_locked_or_at_safepoint(__should_be_vm_thread)             \
    8.98    do {                                                                        \
    8.99      assert(Heap_lock->owned_by_self() ||                                      \
   8.100 -                                     SafepointSynchronize::is_at_safepoint(), \
   8.101 +           (SafepointSynchronize::is_at_safepoint() &&                        \
   8.102 +             ((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \
   8.103             heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
   8.104                                          "should be at a safepoint"));         \
   8.105    } while (0)
   8.106 @@ -335,9 +337,10 @@
   8.107                                     "should not be at a safepoint"));          \
   8.108    } while (0)
   8.109  
   8.110 -#define assert_at_safepoint()                                                 \
   8.111 +#define assert_at_safepoint(__should_be_vm_thread)                            \
   8.112    do {                                                                        \
   8.113 -    assert(SafepointSynchronize::is_at_safepoint(),                           \
   8.114 +    assert(SafepointSynchronize::is_at_safepoint() &&                         \
   8.115 +              ((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \
   8.116             heap_locking_asserts_err_msg("should be at a safepoint"));         \
   8.117    } while (0)
   8.118  
   8.119 @@ -362,31 +365,41 @@
   8.120    // The current policy object for the collector.
   8.121    G1CollectorPolicy* _g1_policy;
   8.122  
   8.123 -  // Parallel allocation lock to protect the current allocation region.
   8.124 -  Mutex  _par_alloc_during_gc_lock;
   8.125 -  Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; }
   8.126 +  // This is the second level of trying to allocate a new region. If
   8.127 +  // new_region_work didn't find a region in the free_list, this call
   8.128 +  // will check whether there's anything available in the
   8.129 +  // secondary_free_list and/or wait for more regions to appear in that
   8.130 +  // list, if _free_regions_coming is set.
   8.131 +  HeapRegion* new_region_try_secondary_free_list(size_t word_size);
   8.132  
   8.133 -  // If possible/desirable, allocate a new HeapRegion for normal object
   8.134 -  // allocation sufficient for an allocation of the given "word_size".
   8.135 -  // If "do_expand" is true, will attempt to expand the heap if necessary
   8.136 -  // to to satisfy the request.  If "zero_filled" is true, requires a
   8.137 -  // zero-filled region.
   8.138 -  // (Returning NULL will trigger a GC.)
   8.139 -  virtual HeapRegion* newAllocRegion_work(size_t word_size,
   8.140 -                                          bool do_expand,
   8.141 -                                          bool zero_filled);
   8.142 +  // It will try to allocate a single non-humongous HeapRegion
   8.143 +  // sufficient for an allocation of the given word_size.  If
   8.144 +  // do_expand is true, it will attempt to expand the heap if
   8.145 +  // necessary to satisfy the allocation request. Note that word_size
   8.146 +  // is only used to make sure that we expand sufficiently but, given
   8.147 +  // that the allocation request is assumed not to be humongous,
   8.148 +  // having word_size is not strictly necessary (expanding by a single
   8.149 +  // region will always be sufficient). But let's keep that parameter
   8.150 +  // in case we need it in the future.
   8.151 +  HeapRegion* new_region_work(size_t word_size, bool do_expand);
   8.152  
   8.153 -  virtual HeapRegion* newAllocRegion(size_t word_size,
   8.154 -                                     bool zero_filled = true) {
   8.155 -    return newAllocRegion_work(word_size, false, zero_filled);
   8.156 +  // It will try to allocate a new region to be used for allocation by
   8.157 +  // mutator threads. It will not try to expand the heap if not region
   8.158 +  // is available.
   8.159 +  HeapRegion* new_alloc_region(size_t word_size) {
   8.160 +    return new_region_work(word_size, false /* do_expand */);
   8.161    }
   8.162 -  virtual HeapRegion* newAllocRegionWithExpansion(int purpose,
   8.163 -                                                  size_t word_size,
   8.164 -                                                  bool zero_filled = true);
   8.165 +
   8.166 +  // It will try to allocate a new region to be used for allocation by
   8.167 +  // a GC thread. It will try to expand the heap if no region is
   8.168 +  // available.
   8.169 +  HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
   8.170 +
   8.171 +  int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
   8.172  
   8.173    // Attempt to allocate an object of the given (very large) "word_size".
   8.174    // Returns "NULL" on failure.
   8.175 -  virtual HeapWord* humongous_obj_allocate(size_t word_size);
   8.176 +  HeapWord* humongous_obj_allocate(size_t word_size);
   8.177  
   8.178    // The following two methods, allocate_new_tlab() and
   8.179    // mem_allocate(), are the two main entry points from the runtime
   8.180 @@ -760,20 +773,29 @@
   8.181    // Invoke "save_marks" on all heap regions.
   8.182    void save_marks();
   8.183  
   8.184 -  // Free a heap region.
   8.185 -  void free_region(HeapRegion* hr);
   8.186 -  // A component of "free_region", exposed for 'batching'.
   8.187 -  // All the params after "hr" are out params: the used bytes of the freed
   8.188 -  // region(s), the number of H regions cleared, the number of regions
   8.189 -  // freed, and pointers to the head and tail of a list of freed contig
   8.190 -  // regions, linked throught the "next_on_unclean_list" field.
   8.191 -  void free_region_work(HeapRegion* hr,
   8.192 -                        size_t& pre_used,
   8.193 -                        size_t& cleared_h,
   8.194 -                        size_t& freed_regions,
   8.195 -                        UncleanRegionList* list,
   8.196 -                        bool par = false);
   8.197 +  // It frees a non-humongous region by initializing its contents and
   8.198 +  // adding it to the free list that's passed as a parameter (this is
   8.199 +  // usually a local list which will be appended to the master free
   8.200 +  // list later). The used bytes of freed regions are accumulated in
   8.201 +  // pre_used. If par is true, the region's RSet will not be freed
   8.202 +  // up. The assumption is that this will be done later.
   8.203 +  void free_region(HeapRegion* hr,
   8.204 +                   size_t* pre_used,
   8.205 +                   FreeRegionList* free_list,
   8.206 +                   bool par);
   8.207  
   8.208 +  // It frees a humongous region by collapsing it into individual
   8.209 +  // regions and calling free_region() for each of them. The freed
   8.210 +  // regions will be added to the free list that's passed as a parameter
   8.211 +  // (this is usually a local list which will be appended to the
   8.212 +  // master free list later). The used bytes of freed regions are
   8.213 +  // accumulated in pre_used. If par is true, the region's RSet will
   8.214 +  // not be freed up. The assumption is that this will be done later.
   8.215 +  void free_humongous_region(HeapRegion* hr,
   8.216 +                             size_t* pre_used,
   8.217 +                             FreeRegionList* free_list,
   8.218 +                             HumongousRegionSet* humongous_proxy_set,
   8.219 +                             bool par);
   8.220  
   8.221    // The concurrent marker (and the thread it runs in.)
   8.222    ConcurrentMark* _cm;
   8.223 @@ -783,9 +805,6 @@
   8.224    // The concurrent refiner.
   8.225    ConcurrentG1Refine* _cg1r;
   8.226  
   8.227 -  // The concurrent zero-fill thread.
   8.228 -  ConcurrentZFThread* _czft;
   8.229 -
   8.230    // The parallel task queues
   8.231    RefToScanQueueSet *_task_queues;
   8.232  
   8.233 @@ -877,9 +896,7 @@
   8.234  
   8.235    SubTasksDone* _process_strong_tasks;
   8.236  
   8.237 -  // List of regions which require zero filling.
   8.238 -  UncleanRegionList _unclean_region_list;
   8.239 -  bool _unclean_regions_coming;
   8.240 +  volatile bool _free_regions_coming;
   8.241  
   8.242  public:
   8.243  
   8.244 @@ -1002,71 +1019,64 @@
   8.245    size_t max_regions();
   8.246  
   8.247    // The number of regions that are completely free.
   8.248 -  size_t free_regions();
   8.249 +  size_t free_regions() {
   8.250 +    return _free_list.length();
   8.251 +  }
   8.252  
   8.253    // The number of regions that are not completely free.
   8.254    size_t used_regions() { return n_regions() - free_regions(); }
   8.255  
   8.256 -  // True iff the ZF thread should run.
   8.257 -  bool should_zf();
   8.258 -
   8.259    // The number of regions available for "regular" expansion.
   8.260    size_t expansion_regions() { return _expansion_regions; }
   8.261  
   8.262 -#ifndef PRODUCT
   8.263 -  bool regions_accounted_for();
   8.264 -  bool print_region_accounting_info();
   8.265 -  void print_region_counts();
   8.266 -#endif
   8.267 +  // verify_region_sets() performs verification over the region
   8.268 +  // lists. It will be compiled in the product code to be used when
   8.269 +  // necessary (i.e., during heap verification).
   8.270 +  void verify_region_sets();
   8.271  
   8.272 -  HeapRegion* alloc_region_from_unclean_list(bool zero_filled);
   8.273 -  HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled);
   8.274 +  // verify_region_sets_optional() is planted in the code for
   8.275 +  // list verification in non-product builds (and it can be enabled in
   8.276 +  // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
   8.277 +#if HEAP_REGION_SET_FORCE_VERIFY
   8.278 +  void verify_region_sets_optional() {
   8.279 +    verify_region_sets();
   8.280 +  }
   8.281 +#else // HEAP_REGION_SET_FORCE_VERIFY
   8.282 +  void verify_region_sets_optional() { }
   8.283 +#endif // HEAP_REGION_SET_FORCE_VERIFY
   8.284  
   8.285 -  void put_region_on_unclean_list(HeapRegion* r);
   8.286 -  void put_region_on_unclean_list_locked(HeapRegion* r);
   8.287 +#ifdef ASSERT
   8.288 +  bool is_on_free_list(HeapRegion* hr) {
   8.289 +    return hr->containing_set() == &_free_list;
   8.290 +  }
   8.291  
   8.292 -  void prepend_region_list_on_unclean_list(UncleanRegionList* list);
   8.293 -  void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list);
   8.294 +  bool is_on_humongous_set(HeapRegion* hr) {
   8.295 +    return hr->containing_set() == &_humongous_set;
   8.296 +}
   8.297 +#endif // ASSERT
   8.298  
   8.299 -  void set_unclean_regions_coming(bool b);
   8.300 -  void set_unclean_regions_coming_locked(bool b);
   8.301 -  // Wait for cleanup to be complete.
   8.302 -  void wait_for_cleanup_complete();
   8.303 -  // Like above, but assumes that the calling thread owns the Heap_lock.
   8.304 -  void wait_for_cleanup_complete_locked();
   8.305 +  // Wrapper for the region list operations that can be called from
   8.306 +  // methods outside this class.
   8.307  
   8.308 -  // Return the head of the unclean list.
   8.309 -  HeapRegion* peek_unclean_region_list_locked();
   8.310 -  // Remove and return the head of the unclean list.
   8.311 -  HeapRegion* pop_unclean_region_list_locked();
   8.312 +  void secondary_free_list_add_as_tail(FreeRegionList* list) {
   8.313 +    _secondary_free_list.add_as_tail(list);
   8.314 +  }
   8.315  
   8.316 -  // List of regions which are zero filled and ready for allocation.
   8.317 -  HeapRegion* _free_region_list;
   8.318 -  // Number of elements on the free list.
   8.319 -  size_t _free_region_list_size;
   8.320 +  void append_secondary_free_list() {
   8.321 +    _free_list.add_as_tail(&_secondary_free_list);
   8.322 +  }
   8.323  
   8.324 -  // If the head of the unclean list is ZeroFilled, move it to the free
   8.325 -  // list.
   8.326 -  bool move_cleaned_region_to_free_list_locked();
   8.327 -  bool move_cleaned_region_to_free_list();
   8.328 +  void append_secondary_free_list_if_not_empty() {
   8.329 +    if (!_secondary_free_list.is_empty()) {
   8.330 +      MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   8.331 +      append_secondary_free_list();
   8.332 +    }
   8.333 +  }
   8.334  
   8.335 -  void put_free_region_on_list_locked(HeapRegion* r);
   8.336 -  void put_free_region_on_list(HeapRegion* r);
   8.337 -
   8.338 -  // Remove and return the head element of the free list.
   8.339 -  HeapRegion* pop_free_region_list_locked();
   8.340 -
   8.341 -  // If "zero_filled" is true, we first try the free list, then we try the
   8.342 -  // unclean list, zero-filling the result.  If "zero_filled" is false, we
   8.343 -  // first try the unclean list, then the zero-filled list.
   8.344 -  HeapRegion* alloc_free_region_from_lists(bool zero_filled);
   8.345 -
   8.346 -  // Verify the integrity of the region lists.
   8.347 -  void remove_allocated_regions_from_lists();
   8.348 -  bool verify_region_lists();
   8.349 -  bool verify_region_lists_locked();
   8.350 -  size_t unclean_region_list_length();
   8.351 -  size_t free_region_list_length();
   8.352 +  void set_free_regions_coming();
   8.353 +  void reset_free_regions_coming();
   8.354 +  bool free_regions_coming() { return _free_regions_coming; }
   8.355 +  void wait_while_free_regions_coming();
   8.356  
   8.357    // Perform a collection of the heap; intended for use in implementing
   8.358    // "System.gc".  This probably implies as full a collection as the
   8.359 @@ -1085,23 +1095,24 @@
   8.360    // True iff a evacuation has failed in the most-recent collection.
   8.361    bool evacuation_failed() { return _evacuation_failed; }
   8.362  
   8.363 -  // Free a region if it is totally full of garbage.  Returns the number of
   8.364 -  // bytes freed (0 ==> didn't free it).
   8.365 -  size_t free_region_if_totally_empty(HeapRegion *hr);
   8.366 -  void free_region_if_totally_empty_work(HeapRegion *hr,
   8.367 -                                         size_t& pre_used,
   8.368 -                                         size_t& cleared_h_regions,
   8.369 -                                         size_t& freed_regions,
   8.370 -                                         UncleanRegionList* list,
   8.371 -                                         bool par = false);
   8.372 +  // It will free a region if it has allocated objects in it that are
   8.373 +  // all dead. It calls either free_region() or
   8.374 +  // free_humongous_region() depending on the type of the region that
   8.375 +  // is passed to it.
   8.376 +  void free_region_if_totally_empty(HeapRegion* hr,
   8.377 +                                    size_t* pre_used,
   8.378 +                                    FreeRegionList* free_list,
   8.379 +                                    HumongousRegionSet* humongous_proxy_set,
   8.380 +                                    bool par);
   8.381  
   8.382 -  // If we've done free region work that yields the given changes, update
   8.383 -  // the relevant global variables.
   8.384 -  void finish_free_region_work(size_t pre_used,
   8.385 -                               size_t cleared_h_regions,
   8.386 -                               size_t freed_regions,
   8.387 -                               UncleanRegionList* list);
   8.388 -
   8.389 +  // It appends the free list to the master free list and updates the
   8.390 +  // master humongous list according to the contents of the proxy
   8.391 +  // list. It also adjusts the total used bytes according to pre_used
   8.392 +  // (if par is true, it will do so by taking the ParGCRareEvent_lock).
   8.393 +  void update_sets_after_freeing_regions(size_t pre_used,
   8.394 +                                       FreeRegionList* free_list,
   8.395 +                                       HumongousRegionSet* humongous_proxy_set,
   8.396 +                                       bool par);
   8.397  
   8.398    // Returns "TRUE" iff "p" points into the allocated area of the heap.
   8.399    virtual bool is_in(const void* p) const;
   8.400 @@ -1314,8 +1325,6 @@
   8.401      return true;
   8.402    }
   8.403  
   8.404 -  virtual bool allocs_are_zero_filled();
   8.405 -
   8.406    // The boundary between a "large" and "small" array of primitives, in
   8.407    // words.
   8.408    virtual size_t large_typearray_limit();
   8.409 @@ -1546,13 +1555,6 @@
   8.410  
   8.411  protected:
   8.412    size_t _max_heap_capacity;
   8.413 -
   8.414 -public:
   8.415 -  // Temporary: call to mark things unimplemented for the G1 heap (e.g.,
   8.416 -  // MemoryService).  In productization, we can make this assert false
   8.417 -  // to catch such places (as well as searching for calls to this...)
   8.418 -  static void g1_unimplemented();
   8.419 -
   8.420  };
   8.421  
   8.422  #define use_local_bitmaps         1
     9.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Jan 19 19:24:34 2011 -0800
     9.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Jan 20 13:57:12 2011 -0800
     9.3 @@ -28,7 +28,7 @@
     9.4  #include "gc_implementation/g1/concurrentMark.hpp"
     9.5  #include "gc_implementation/g1/g1CollectedHeap.hpp"
     9.6  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
     9.7 -#include "gc_implementation/g1/heapRegionSeq.hpp"
     9.8 +#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
     9.9  #include "utilities/taskqueue.hpp"
    9.10  
    9.11  // Inline functions for G1CollectedHeap
    9.12 @@ -135,7 +135,7 @@
    9.13  
    9.14  inline void
    9.15  G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
    9.16 -  assert_heap_locked_or_at_safepoint();
    9.17 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
    9.18    assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
    9.19           "pre-condition of the call");
    9.20    assert(cur_alloc_region->is_young(),
    10.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jan 19 19:24:34 2011 -0800
    10.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Jan 20 13:57:12 2011 -0800
    10.3 @@ -1,5 +1,5 @@
    10.4  /*
    10.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    10.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    10.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8   *
    10.9   * This code is free software; you can redistribute it and/or modify it
   10.10 @@ -2875,8 +2875,6 @@
   10.11    // Adjust for expansion and slop.
   10.12    max_live_bytes = max_live_bytes + expansion_bytes;
   10.13  
   10.14 -  assert(_g1->regions_accounted_for(), "Region leakage!");
   10.15 -
   10.16    HeapRegion* hr;
   10.17    if (in_young_gc_mode()) {
   10.18      double young_start_time_sec = os::elapsedTime();
    11.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Jan 19 19:24:34 2011 -0800
    11.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Jan 20 13:57:12 2011 -0800
    11.3 @@ -1,5 +1,5 @@
    11.4  /*
    11.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    11.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    11.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    11.8   *
    11.9   * This code is free software; you can redistribute it and/or modify it
   11.10 @@ -181,26 +181,46 @@
   11.11  }
   11.12  
   11.13  class G1PrepareCompactClosure: public HeapRegionClosure {
   11.14 +  G1CollectedHeap* _g1h;
   11.15    ModRefBarrierSet* _mrbs;
   11.16    CompactPoint _cp;
   11.17 +  size_t _pre_used;
   11.18 +  FreeRegionList _free_list;
   11.19 +  HumongousRegionSet _humongous_proxy_set;
   11.20  
   11.21    void free_humongous_region(HeapRegion* hr) {
   11.22 -    HeapWord* bot = hr->bottom();
   11.23      HeapWord* end = hr->end();
   11.24      assert(hr->startsHumongous(),
   11.25             "Only the start of a humongous region should be freed.");
   11.26 -    G1CollectedHeap::heap()->free_region(hr);
   11.27 +    _g1h->free_humongous_region(hr, &_pre_used, &_free_list,
   11.28 +                                &_humongous_proxy_set, false /* par */);
   11.29 +    // Do we also need to do this for the continues humongous regions
   11.30 +    // we just collapsed?
   11.31      hr->prepare_for_compaction(&_cp);
   11.32      // Also clear the part of the card table that will be unused after
   11.33      // compaction.
   11.34 -    _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
   11.35 +    _mrbs->clear(MemRegion(hr->compaction_top(), end));
   11.36    }
   11.37  
   11.38  public:
   11.39 -  G1PrepareCompactClosure(CompactibleSpace* cs) :
   11.40 +  G1PrepareCompactClosure(CompactibleSpace* cs)
   11.41 +  : _g1h(G1CollectedHeap::heap()),
   11.42 +    _mrbs(G1CollectedHeap::heap()->mr_bs()),
   11.43      _cp(NULL, cs, cs->initialize_threshold()),
   11.44 -    _mrbs(G1CollectedHeap::heap()->mr_bs())
   11.45 -  {}
   11.46 +    _pre_used(0),
   11.47 +    _free_list("Local Free List for G1MarkSweep"),
   11.48 +    _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
   11.49 +
   11.50 +  void update_sets() {
   11.51 +    // We'll recalculate total used bytes and recreate the free list
   11.52 +    // at the end of the GC, so no point in updating those values here.
   11.53 +    _g1h->update_sets_after_freeing_regions(0, /* pre_used */
   11.54 +                                            NULL, /* free_list */
   11.55 +                                            &_humongous_proxy_set,
   11.56 +                                            false /* par */);
   11.57 +    _free_list.remove_all();
   11.58 +  }
   11.59 +
   11.60    bool doHeapRegion(HeapRegion* hr) {
   11.61      if (hr->isHumongous()) {
   11.62        if (hr->startsHumongous()) {
   11.63 @@ -266,6 +286,7 @@
   11.64  
   11.65    G1PrepareCompactClosure blk(sp);
   11.66    g1h->heap_region_iterate(&blk);
   11.67 +  blk.update_sets();
   11.68  
   11.69    CompactPoint perm_cp(pg, NULL, NULL);
   11.70    pg->prepare_for_compaction(&perm_cp);
    12.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Jan 19 19:24:34 2011 -0800
    12.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Jan 20 13:57:12 2011 -0800
    12.3 @@ -1,5 +1,5 @@
    12.4  /*
    12.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    12.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    12.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.8   *
    12.9   * This code is free software; you can redistribute it and/or modify it
   12.10 @@ -75,21 +75,12 @@
   12.11            "(0 means do not periodically generate this info); "              \
   12.12            "it also requires -XX:+G1SummarizeRSetStats")                     \
   12.13                                                                              \
   12.14 -  diagnostic(bool, G1SummarizeZFStats, false,                               \
   12.15 -          "Summarize zero-filling info")                                    \
   12.16 -                                                                            \
   12.17    diagnostic(bool, G1TraceConcRefinement, false,                            \
   12.18            "Trace G1 concurrent refinement")                                 \
   12.19                                                                              \
   12.20    product(intx, G1MarkRegionStackSize, 1024 * 1024,                         \
   12.21            "Size of the region stack for concurrent marking.")               \
   12.22                                                                              \
   12.23 -  develop(bool, G1ConcZeroFill, true,                                       \
   12.24 -          "If true, run concurrent zero-filling thread")                    \
   12.25 -                                                                            \
   12.26 -  develop(intx, G1ConcZFMaxRegions, 1,                                      \
   12.27 -          "Stop zero-filling when # of zf'd regions reaches")               \
   12.28 -                                                                            \
   12.29    develop(bool, G1SATBBarrierPrintNullPreVals, false,                       \
   12.30            "If true, count frac of ptr writes with null pre-vals.")          \
   12.31                                                                              \
   12.32 @@ -99,6 +90,13 @@
   12.33    develop(intx, G1SATBProcessCompletedThreshold, 20,                        \
   12.34            "Number of completed buffers that triggers log processing.")      \
   12.35                                                                              \
   12.36 +  product(uintx, G1SATBBufferEnqueueingThresholdPercent, 60,                \
   12.37 +          "Before enqueueing them, each mutator thread tries to do some "   \
   12.38 +          "filtering on the SATB buffers it generates. If post-filtering "  \
   12.39 +          "the percentage of retained entries is over this threshold "      \
   12.40 +          "the buffer will be enqueued for processing. A value of 0 "       \
   12.41 +          "specifies that mutator threads should not do such filtering.")   \
   12.42 +                                                                            \
   12.43    develop(intx, G1ExtraRegionSurvRate, 33,                                  \
   12.44            "If the young survival rate is S, and there's room left in "      \
   12.45            "to-space, we will allow regions whose survival rate is up to "   \
   12.46 @@ -282,7 +280,20 @@
   12.47            "Size of a work unit of cards claimed by a worker thread"         \
   12.48            "during RSet scanning.")                                          \
   12.49                                                                              \
   12.50 -  develop(bool, ReduceInitialCardMarksForG1, false,                         \
   12.51 +  develop(uintx, G1SecondaryFreeListAppendLength, 5,                        \
   12.52 +          "The number of regions we will add to the secondary free list "   \
   12.53 +          "at every append operation")                                      \
   12.54 +                                                                            \
   12.55 +  develop(bool, G1ConcRegionFreeingVerbose, false,                          \
   12.56 +          "Enables verboseness during concurrent region freeing")           \
   12.57 +                                                                            \
   12.58 +  develop(bool, G1StressConcRegionFreeing, false,                           \
   12.59 +          "It stresses the concurrent region freeing operation")            \
   12.60 +                                                                            \
   12.61 +  develop(uintx, G1StressConcRegionFreeingDelayMillis, 0,                   \
   12.62 +          "Artificial delay during concurrent region freeing")              \
   12.63 +                                                                            \
   12.64 +   develop(bool, ReduceInitialCardMarksForG1, false,                        \
   12.65            "When ReduceInitialCardMarks is true, this flag setting "         \
   12.66            " controls whether G1 allows the RICM optimization")
   12.67  
    13.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jan 19 19:24:34 2011 -0800
    13.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jan 20 13:57:12 2011 -0800
    13.3 @@ -23,7 +23,6 @@
    13.4   */
    13.5  
    13.6  #include "precompiled.hpp"
    13.7 -#include "gc_implementation/g1/concurrentZFThread.hpp"
    13.8  #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
    13.9  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
   13.10  #include "gc_implementation/g1/g1OopClosures.inline.hpp"
   13.11 @@ -348,22 +347,20 @@
   13.12  }
   13.13  
   13.14  void HeapRegion::hr_clear(bool par, bool clear_space) {
   13.15 -  _humongous_type = NotHumongous;
   13.16 -  _humongous_start_region = NULL;
   13.17 +  assert(_humongous_type == NotHumongous,
   13.18 +         "we should have already filtered out humongous regions");
   13.19 +  assert(_humongous_start_region == NULL,
   13.20 +         "we should have already filtered out humongous regions");
   13.21 +  assert(_end == _orig_end,
   13.22 +         "we should have already filtered out humongous regions");
   13.23 +
   13.24    _in_collection_set = false;
   13.25    _is_gc_alloc_region = false;
   13.26  
   13.27 -  // Age stuff (if parallel, this will be done separately, since it needs
   13.28 -  // to be sequential).
   13.29 -  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   13.30 -
   13.31    set_young_index_in_cset(-1);
   13.32    uninstall_surv_rate_group();
   13.33    set_young_type(NotYoung);
   13.34  
   13.35 -  // In case it had been the start of a humongous sequence, reset its end.
   13.36 -  set_end(_orig_end);
   13.37 -
   13.38    if (!par) {
   13.39      // If this is parallel, this will be done later.
   13.40      HeapRegionRemSet* hrrs = rem_set();
   13.41 @@ -387,6 +384,7 @@
   13.42  // </PREDICTION>
   13.43  
   13.44  void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
   13.45 +  assert(!isHumongous(), "sanity / pre-condition");
   13.46    assert(end() == _orig_end,
   13.47           "Should be normal before the humongous object allocation");
   13.48    assert(top() == bottom(), "should be empty");
   13.49 @@ -400,6 +398,7 @@
   13.50  }
   13.51  
   13.52  void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
   13.53 +  assert(!isHumongous(), "sanity / pre-condition");
   13.54    assert(end() == _orig_end,
   13.55           "Should be normal before the humongous object allocation");
   13.56    assert(top() == bottom(), "should be empty");
   13.57 @@ -409,6 +408,26 @@
   13.58    _humongous_start_region = first_hr;
   13.59  }
   13.60  
   13.61 +void HeapRegion::set_notHumongous() {
   13.62 +  assert(isHumongous(), "pre-condition");
   13.63 +
   13.64 +  if (startsHumongous()) {
   13.65 +    assert(top() <= end(), "pre-condition");
   13.66 +    set_end(_orig_end);
   13.67 +    if (top() > end()) {
   13.68 +      // at least one "continues humongous" region after it
   13.69 +      set_top(end());
   13.70 +    }
   13.71 +  } else {
   13.72 +    // continues humongous
   13.73 +    assert(end() == _orig_end, "sanity");
   13.74 +  }
   13.75 +
   13.76 +  assert(capacity() == (size_t) HeapRegion::GrainBytes, "pre-condition");
   13.77 +  _humongous_type = NotHumongous;
   13.78 +  _humongous_start_region = NULL;
   13.79 +}
   13.80 +
   13.81  bool HeapRegion::claimHeapRegion(jint claimValue) {
   13.82    jint current = _claimed;
   13.83    if (current != claimValue) {
   13.84 @@ -443,15 +462,6 @@
   13.85    return low;
   13.86  }
   13.87  
   13.88 -void HeapRegion::set_next_on_unclean_list(HeapRegion* r) {
   13.89 -  assert(r == NULL || r->is_on_unclean_list(), "Malformed unclean list.");
   13.90 -  _next_in_special_set = r;
   13.91 -}
   13.92 -
   13.93 -void HeapRegion::set_on_unclean_list(bool b) {
   13.94 -  _is_on_unclean_list = b;
   13.95 -}
   13.96 -
   13.97  void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
   13.98    G1OffsetTableContigSpace::initialize(mr, false, mangle_space);
   13.99    hr_clear(false/*par*/, clear_space);
  13.100 @@ -469,15 +479,16 @@
  13.101      _hrs_index(-1),
  13.102      _humongous_type(NotHumongous), _humongous_start_region(NULL),
  13.103      _in_collection_set(false), _is_gc_alloc_region(false),
  13.104 -    _is_on_free_list(false), _is_on_unclean_list(false),
  13.105      _next_in_special_set(NULL), _orig_end(NULL),
  13.106      _claimed(InitialClaimValue), _evacuation_failed(false),
  13.107      _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
  13.108      _young_type(NotYoung), _next_young_region(NULL),
  13.109 -    _next_dirty_cards_region(NULL),
  13.110 -    _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
  13.111 -    _rem_set(NULL), _zfs(NotZeroFilled),
  13.112 -    _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
  13.113 +    _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
  13.114 +#ifdef ASSERT
  13.115 +    _containing_set(NULL),
  13.116 +#endif // ASSERT
  13.117 +     _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
  13.118 +    _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
  13.119      _predicted_bytes_to_copy(0)
  13.120  {
  13.121    _orig_end = mr.end();
  13.122 @@ -552,86 +563,6 @@
  13.123    oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
  13.124  }
  13.125  
  13.126 -#ifdef DEBUG
  13.127 -HeapWord* HeapRegion::allocate(size_t size) {
  13.128 -  jint state = zero_fill_state();
  13.129 -  assert(!G1CollectedHeap::heap()->allocs_are_zero_filled() ||
  13.130 -         zero_fill_is_allocated(),
  13.131 -         "When ZF is on, only alloc in ZF'd regions");
  13.132 -  return G1OffsetTableContigSpace::allocate(size);
  13.133 -}
  13.134 -#endif
  13.135 -
  13.136 -void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
  13.137 -  assert(ZF_mon->owned_by_self() ||
  13.138 -         Universe::heap()->is_gc_active(),
  13.139 -         "Must hold the lock or be a full GC to modify.");
  13.140 -#ifdef ASSERT
  13.141 -  if (top() != bottom() && zfs != Allocated) {
  13.142 -    ResourceMark rm;
  13.143 -    stringStream region_str;
  13.144 -    print_on(&region_str);
  13.145 -    assert(top() == bottom() || zfs == Allocated,
  13.146 -           err_msg("Region must be empty, or we must be setting it to allocated. "
  13.147 -                   "_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
  13.148 -  }
  13.149 -#endif
  13.150 -  _zfs = zfs;
  13.151 -}
  13.152 -
  13.153 -void HeapRegion::set_zero_fill_complete() {
  13.154 -  set_zero_fill_state_work(ZeroFilled);
  13.155 -  if (ZF_mon->owned_by_self()) {
  13.156 -    ZF_mon->notify_all();
  13.157 -  }
  13.158 -}
  13.159 -
  13.160 -
  13.161 -void HeapRegion::ensure_zero_filled() {
  13.162 -  MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  13.163 -  ensure_zero_filled_locked();
  13.164 -}
  13.165 -
  13.166 -void HeapRegion::ensure_zero_filled_locked() {
  13.167 -  assert(ZF_mon->owned_by_self(), "Precondition");
  13.168 -  bool should_ignore_zf = SafepointSynchronize::is_at_safepoint();
  13.169 -  assert(should_ignore_zf || Heap_lock->is_locked(),
  13.170 -         "Either we're in a GC or we're allocating a region.");
  13.171 -  switch (zero_fill_state()) {
  13.172 -  case HeapRegion::NotZeroFilled:
  13.173 -    set_zero_fill_in_progress(Thread::current());
  13.174 -    {
  13.175 -      ZF_mon->unlock();
  13.176 -      Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
  13.177 -      ZF_mon->lock_without_safepoint_check();
  13.178 -    }
  13.179 -    // A trap.
  13.180 -    guarantee(zero_fill_state() == HeapRegion::ZeroFilling
  13.181 -              && zero_filler() == Thread::current(),
  13.182 -              "AHA!  Tell Dave D if you see this...");
  13.183 -    set_zero_fill_complete();
  13.184 -    // gclog_or_tty->print_cr("Did sync ZF.");
  13.185 -    ConcurrentZFThread::note_sync_zfs();
  13.186 -    break;
  13.187 -  case HeapRegion::ZeroFilling:
  13.188 -    if (should_ignore_zf) {
  13.189 -      // We can "break" the lock and take over the work.
  13.190 -      Copy::fill_to_words(bottom(), capacity()/HeapWordSize);
  13.191 -      set_zero_fill_complete();
  13.192 -      ConcurrentZFThread::note_sync_zfs();
  13.193 -      break;
  13.194 -    } else {
  13.195 -      ConcurrentZFThread::wait_for_ZF_completed(this);
  13.196 -    }
  13.197 -  case HeapRegion::ZeroFilled:
  13.198 -    // Nothing to do.
  13.199 -    break;
  13.200 -  case HeapRegion::Allocated:
  13.201 -    guarantee(false, "Should not call on allocated regions.");
  13.202 -  }
  13.203 -  assert(zero_fill_state() == HeapRegion::ZeroFilled, "Post");
  13.204 -}
  13.205 -
  13.206  HeapWord*
  13.207  HeapRegion::object_iterate_mem_careful(MemRegion mr,
  13.208                                                   ObjectClosure* cl) {
  13.209 @@ -1010,67 +941,3 @@
  13.210    _offsets.set_space(this);
  13.211    initialize(mr, !is_zeroed, SpaceDecorator::Mangle);
  13.212  }
  13.213 -
  13.214 -size_t RegionList::length() {
  13.215 -  size_t len = 0;
  13.216 -  HeapRegion* cur = hd();
  13.217 -  DEBUG_ONLY(HeapRegion* last = NULL);
  13.218 -  while (cur != NULL) {
  13.219 -    len++;
  13.220 -    DEBUG_ONLY(last = cur);
  13.221 -    cur = get_next(cur);
  13.222 -  }
  13.223 -  assert(last == tl(), "Invariant");
  13.224 -  return len;
  13.225 -}
  13.226 -
  13.227 -void RegionList::insert_before_head(HeapRegion* r) {
  13.228 -  assert(well_formed(), "Inv");
  13.229 -  set_next(r, hd());
  13.230 -  _hd = r;
  13.231 -  _sz++;
  13.232 -  if (tl() == NULL) _tl = r;
  13.233 -  assert(well_formed(), "Inv");
  13.234 -}
  13.235 -
  13.236 -void RegionList::prepend_list(RegionList* new_list) {
  13.237 -  assert(well_formed(), "Precondition");
  13.238 -  assert(new_list->well_formed(), "Precondition");
  13.239 -  HeapRegion* new_tl = new_list->tl();
  13.240 -  if (new_tl != NULL) {
  13.241 -    set_next(new_tl, hd());
  13.242 -    _hd = new_list->hd();
  13.243 -    _sz += new_list->sz();
  13.244 -    if (tl() == NULL) _tl = new_list->tl();
  13.245 -  } else {
  13.246 -    assert(new_list->hd() == NULL && new_list->sz() == 0, "Inv");
  13.247 -  }
  13.248 -  assert(well_formed(), "Inv");
  13.249 -}
  13.250 -
  13.251 -void RegionList::delete_after(HeapRegion* r) {
  13.252 -  assert(well_formed(), "Precondition");
  13.253 -  HeapRegion* next = get_next(r);
  13.254 -  assert(r != NULL, "Precondition");
  13.255 -  HeapRegion* next_tl = get_next(next);
  13.256 -  set_next(r, next_tl);
  13.257 -  dec_sz();
  13.258 -  if (next == tl()) {
  13.259 -    assert(next_tl == NULL, "Inv");
  13.260 -    _tl = r;
  13.261 -  }
  13.262 -  assert(well_formed(), "Inv");
  13.263 -}
  13.264 -
  13.265 -HeapRegion* RegionList::pop() {
  13.266 -  assert(well_formed(), "Inv");
  13.267 -  HeapRegion* res = hd();
  13.268 -  if (res != NULL) {
  13.269 -    _hd = get_next(res);
  13.270 -    _sz--;
  13.271 -    set_next(res, NULL);
  13.272 -    if (sz() == 0) _tl = NULL;
  13.273 -  }
  13.274 -  assert(well_formed(), "Inv");
  13.275 -  return res;
  13.276 -}
    14.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jan 19 19:24:34 2011 -0800
    14.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Jan 20 13:57:12 2011 -0800
    14.3 @@ -50,6 +50,11 @@
    14.4  class HeapRegionRemSet;
    14.5  class HeapRegionRemSetIterator;
    14.6  class HeapRegion;
    14.7 +class HeapRegionSetBase;
    14.8 +
    14.9 +#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
   14.10 +#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \
   14.11 +                               (__hr)->top(), (__hr)->end()
   14.12  
   14.13  // A dirty card to oop closure for heap regions. It
   14.14  // knows how to get the G1 heap and how to use the bitmap
   14.15 @@ -227,12 +232,6 @@
   14.16    // True iff the region is in current collection_set.
   14.17    bool _in_collection_set;
   14.18  
   14.19 -    // True iff the region is on the unclean list, waiting to be zero filled.
   14.20 -  bool _is_on_unclean_list;
   14.21 -
   14.22 -  // True iff the region is on the free list, ready for allocation.
   14.23 -  bool _is_on_free_list;
   14.24 -
   14.25    // Is this or has it been an allocation region in the current collection
   14.26    // pause.
   14.27    bool _is_gc_alloc_region;
   14.28 @@ -254,6 +253,13 @@
   14.29    // Next region whose cards need cleaning
   14.30    HeapRegion* _next_dirty_cards_region;
   14.31  
   14.32 +  // Fields used by the HeapRegionSetBase class and subclasses.
   14.33 +  HeapRegion* _next;
   14.34 +#ifdef ASSERT
   14.35 +  HeapRegionSetBase* _containing_set;
   14.36 +#endif // ASSERT
   14.37 +  bool _pending_removal;
   14.38 +
   14.39    // For parallel heapRegion traversal.
   14.40    jint _claimed;
   14.41  
   14.42 @@ -305,10 +311,6 @@
   14.43      _top_at_conc_mark_count = bot;
   14.44    }
   14.45  
   14.46 -  jint _zfs;  // A member of ZeroFillState.  Protected by ZF_lock.
   14.47 -  Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
   14.48 -                        // made it so.
   14.49 -
   14.50    void set_young_type(YoungType new_type) {
   14.51      //assert(_young_type != new_type, "setting the same type" );
   14.52      // TODO: add more assertions here
   14.53 @@ -362,16 +364,6 @@
   14.54      RebuildRSClaimValue   = 5
   14.55    };
   14.56  
   14.57 -  // Concurrent refinement requires contiguous heap regions (in which TLABs
   14.58 -  // might be allocated) to be zero-filled.  Each region therefore has a
   14.59 -  // zero-fill-state.
   14.60 -  enum ZeroFillState {
   14.61 -    NotZeroFilled,
   14.62 -    ZeroFilling,
   14.63 -    ZeroFilled,
   14.64 -    Allocated
   14.65 -  };
   14.66 -
   14.67    inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
   14.68      assert(is_young(), "we can only skip BOT updates on young regions");
   14.69      return ContiguousSpace::par_allocate(word_size);
   14.70 @@ -456,6 +448,9 @@
   14.71    // which this region will be part of.
   14.72    void set_continuesHumongous(HeapRegion* first_hr);
   14.73  
   14.74 +  // Unsets the humongous-related fields on the region.
   14.75 +  void set_notHumongous();
   14.76 +
   14.77    // If the region has a remembered set, return a pointer to it.
   14.78    HeapRegionRemSet* rem_set() const {
   14.79      return _rem_set;
   14.80 @@ -502,45 +497,56 @@
   14.81      _next_in_special_set = r;
   14.82    }
   14.83  
   14.84 -  bool is_on_free_list() {
   14.85 -    return _is_on_free_list;
   14.86 +  // Methods used by the HeapRegionSetBase class and subclasses.
   14.87 +
   14.88 +  // Getter and setter for the next field used to link regions into
   14.89 +  // linked lists.
   14.90 +  HeapRegion* next()              { return _next; }
   14.91 +
   14.92 +  void set_next(HeapRegion* next) { _next = next; }
   14.93 +
   14.94 +  // Every region added to a set is tagged with a reference to that
   14.95 +  // set. This is used for doing consistency checking to make sure that
   14.96 +  // the contents of a set are as they should be and it's only
   14.97 +  // available in non-product builds.
   14.98 +#ifdef ASSERT
   14.99 +  void set_containing_set(HeapRegionSetBase* containing_set) {
  14.100 +    assert((containing_set == NULL && _containing_set != NULL) ||
  14.101 +           (containing_set != NULL && _containing_set == NULL),
  14.102 +           err_msg("containing_set: "PTR_FORMAT" "
  14.103 +                   "_containing_set: "PTR_FORMAT,
  14.104 +                   containing_set, _containing_set));
  14.105 +
  14.106 +    _containing_set = containing_set;
  14.107 +}
  14.108 +
  14.109 +  HeapRegionSetBase* containing_set() { return _containing_set; }
  14.110 +#else // ASSERT
  14.111 +  void set_containing_set(HeapRegionSetBase* containing_set) { }
  14.112 +
  14.113 +  // containing_set() is only used in asserts so there's not reason
  14.114 +  // to provide a dummy version of it.
  14.115 +#endif // ASSERT
  14.116 +
  14.117 +  // If we want to remove regions from a list in bulk we can simply tag
  14.118 +  // them with the pending_removal tag and call the
  14.119 +  // remove_all_pending() method on the list.
  14.120 +
  14.121 +  bool pending_removal() { return _pending_removal; }
  14.122 +
  14.123 +  void set_pending_removal(bool pending_removal) {
  14.124 +    // We can only set pending_removal to true, if it's false and the
  14.125 +    // region belongs to a set.
  14.126 +    assert(!pending_removal ||
  14.127 +           (!_pending_removal && containing_set() != NULL), "pre-condition");
  14.128 +    // We can only set pending_removal to false, if it's true and the
  14.129 +    // region does not belong to a set.
  14.130 +    assert( pending_removal ||
  14.131 +           ( _pending_removal && containing_set() == NULL), "pre-condition");
  14.132 +
  14.133 +    _pending_removal = pending_removal;
  14.134    }
  14.135  
  14.136 -  void set_on_free_list(bool b) {
  14.137 -    _is_on_free_list = b;
  14.138 -  }
  14.139 -
  14.140 -  HeapRegion* next_from_free_list() {
  14.141 -    assert(is_on_free_list(),
  14.142 -           "Should only invoke on free space.");
  14.143 -    assert(_next_in_special_set == NULL ||
  14.144 -           _next_in_special_set->is_on_free_list(),
  14.145 -           "Malformed Free List.");
  14.146 -    return _next_in_special_set;
  14.147 -  }
  14.148 -
  14.149 -  void set_next_on_free_list(HeapRegion* r) {
  14.150 -    assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
  14.151 -    _next_in_special_set = r;
  14.152 -  }
  14.153 -
  14.154 -  bool is_on_unclean_list() {
  14.155 -    return _is_on_unclean_list;
  14.156 -  }
  14.157 -
  14.158 -  void set_on_unclean_list(bool b);
  14.159 -
  14.160 -  HeapRegion* next_from_unclean_list() {
  14.161 -    assert(is_on_unclean_list(),
  14.162 -           "Should only invoke on unclean space.");
  14.163 -    assert(_next_in_special_set == NULL ||
  14.164 -           _next_in_special_set->is_on_unclean_list(),
  14.165 -           "Malformed unclean List.");
  14.166 -    return _next_in_special_set;
  14.167 -  }
  14.168 -
  14.169 -  void set_next_on_unclean_list(HeapRegion* r);
  14.170 -
  14.171    HeapRegion* get_next_young_region() { return _next_young_region; }
  14.172    void set_next_young_region(HeapRegion* hr) {
  14.173      _next_young_region = hr;
  14.174 @@ -559,11 +565,6 @@
  14.175  
  14.176    void initialize(MemRegion mr, bool clear_space, bool mangle_space);
  14.177  
  14.178 -  // Ensure that "this" is zero-filled.
  14.179 -  void ensure_zero_filled();
  14.180 -  // This one requires that the calling thread holds ZF_mon.
  14.181 -  void ensure_zero_filled_locked();
  14.182 -
  14.183    // Get the start of the unmarked area in this region.
  14.184    HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
  14.185    HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
  14.186 @@ -798,36 +799,6 @@
  14.187    // "end" of the region if there is no such block.
  14.188    HeapWord* next_block_start_careful(HeapWord* addr);
  14.189  
  14.190 -  // Returns the zero-fill-state of the current region.
  14.191 -  ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
  14.192 -  bool zero_fill_is_allocated() { return _zfs == Allocated; }
  14.193 -  Thread* zero_filler() { return _zero_filler; }
  14.194 -
  14.195 -  // Indicate that the contents of the region are unknown, and therefore
  14.196 -  // might require zero-filling.
  14.197 -  void set_zero_fill_needed() {
  14.198 -    set_zero_fill_state_work(NotZeroFilled);
  14.199 -  }
  14.200 -  void set_zero_fill_in_progress(Thread* t) {
  14.201 -    set_zero_fill_state_work(ZeroFilling);
  14.202 -    _zero_filler = t;
  14.203 -  }
  14.204 -  void set_zero_fill_complete();
  14.205 -  void set_zero_fill_allocated() {
  14.206 -    set_zero_fill_state_work(Allocated);
  14.207 -  }
  14.208 -
  14.209 -  void set_zero_fill_state_work(ZeroFillState zfs);
  14.210 -
  14.211 -  // This is called when a full collection shrinks the heap.
  14.212 -  // We want to set the heap region to a value which says
  14.213 -  // it is no longer part of the heap.  For now, we'll let "NotZF" fill
  14.214 -  // that role.
  14.215 -  void reset_zero_fill() {
  14.216 -    set_zero_fill_state_work(NotZeroFilled);
  14.217 -    _zero_filler = NULL;
  14.218 -  }
  14.219 -
  14.220    size_t recorded_rs_length() const        { return _recorded_rs_length; }
  14.221    double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
  14.222    size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
  14.223 @@ -866,10 +837,6 @@
  14.224  
  14.225    // Override; it uses the "prev" marking information
  14.226    virtual void verify(bool allow_dirty) const;
  14.227 -
  14.228 -#ifdef DEBUG
  14.229 -  HeapWord* allocate(size_t size);
  14.230 -#endif
  14.231  };
  14.232  
  14.233  // HeapRegionClosure is used for iterating over regions.
  14.234 @@ -892,113 +859,6 @@
  14.235    bool complete() { return _complete; }
  14.236  };
  14.237  
  14.238 -// A linked lists of heap regions.  It leaves the "next" field
  14.239 -// unspecified; that's up to subtypes.
  14.240 -class RegionList VALUE_OBJ_CLASS_SPEC {
  14.241 -protected:
  14.242 -  virtual HeapRegion* get_next(HeapRegion* chr) = 0;
  14.243 -  virtual void set_next(HeapRegion* chr,
  14.244 -                        HeapRegion* new_next) = 0;
  14.245 -
  14.246 -  HeapRegion* _hd;
  14.247 -  HeapRegion* _tl;
  14.248 -  size_t _sz;
  14.249 -
  14.250 -  // Protected constructor because this type is only meaningful
  14.251 -  // when the _get/_set next functions are defined.
  14.252 -  RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
  14.253 -public:
  14.254 -  void reset() {
  14.255 -    _hd = NULL;
  14.256 -    _tl = NULL;
  14.257 -    _sz = 0;
  14.258 -  }
  14.259 -  HeapRegion* hd() { return _hd; }
  14.260 -  HeapRegion* tl() { return _tl; }
  14.261 -  size_t sz() { return _sz; }
  14.262 -  size_t length();
  14.263 -
  14.264 -  bool well_formed() {
  14.265 -    return
  14.266 -      ((hd() == NULL && tl() == NULL && sz() == 0)
  14.267 -       || (hd() != NULL && tl() != NULL && sz() > 0))
  14.268 -      && (sz() == length());
  14.269 -  }
  14.270 -  virtual void insert_before_head(HeapRegion* r);
  14.271 -  void prepend_list(RegionList* new_list);
  14.272 -  virtual HeapRegion* pop();
  14.273 -  void dec_sz() { _sz--; }
  14.274 -  // Requires that "r" is an element of the list, and is not the tail.
  14.275 -  void delete_after(HeapRegion* r);
  14.276 -};
  14.277 -
  14.278 -class EmptyNonHRegionList: public RegionList {
  14.279 -protected:
  14.280 -  // Protected constructor because this type is only meaningful
  14.281 -  // when the _get/_set next functions are defined.
  14.282 -  EmptyNonHRegionList() : RegionList() {}
  14.283 -
  14.284 -public:
  14.285 -  void insert_before_head(HeapRegion* r) {
  14.286 -    //    assert(r->is_empty(), "Better be empty");
  14.287 -    assert(!r->isHumongous(), "Better not be humongous.");
  14.288 -    RegionList::insert_before_head(r);
  14.289 -  }
  14.290 -  void prepend_list(EmptyNonHRegionList* new_list) {
  14.291 -    //    assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
  14.292 -    //     "Better be empty");
  14.293 -    assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
  14.294 -           "Better not be humongous.");
  14.295 -    //    assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
  14.296 -    //     "Better be empty");
  14.297 -    assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
  14.298 -           "Better not be humongous.");
  14.299 -    RegionList::prepend_list(new_list);
  14.300 -  }
  14.301 -};
  14.302 -
  14.303 -class UncleanRegionList: public EmptyNonHRegionList {
  14.304 -public:
  14.305 -  HeapRegion* get_next(HeapRegion* hr) {
  14.306 -    return hr->next_from_unclean_list();
  14.307 -  }
  14.308 -  void set_next(HeapRegion* hr, HeapRegion* new_next) {
  14.309 -    hr->set_next_on_unclean_list(new_next);
  14.310 -  }
  14.311 -
  14.312 -  UncleanRegionList() : EmptyNonHRegionList() {}
  14.313 -
  14.314 -  void insert_before_head(HeapRegion* r) {
  14.315 -    assert(!r->is_on_free_list(),
  14.316 -           "Better not already be on free list");
  14.317 -    assert(!r->is_on_unclean_list(),
  14.318 -           "Better not already be on unclean list");
  14.319 -    r->set_zero_fill_needed();
  14.320 -    r->set_on_unclean_list(true);
  14.321 -    EmptyNonHRegionList::insert_before_head(r);
  14.322 -  }
  14.323 -  void prepend_list(UncleanRegionList* new_list) {
  14.324 -    assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
  14.325 -           "Better not already be on free list");
  14.326 -    assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
  14.327 -           "Better already be marked as on unclean list");
  14.328 -    assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
  14.329 -           "Better not already be on free list");
  14.330 -    assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
  14.331 -           "Better already be marked as on unclean list");
  14.332 -    EmptyNonHRegionList::prepend_list(new_list);
  14.333 -  }
  14.334 -  HeapRegion* pop() {
  14.335 -    HeapRegion* res = RegionList::pop();
  14.336 -    if (res != NULL) res->set_on_unclean_list(false);
  14.337 -    return res;
  14.338 -  }
  14.339 -};
  14.340 -
  14.341 -// Local Variables: ***
  14.342 -// c-indentation-style: gnu ***
  14.343 -// End: ***
  14.344 -
  14.345  #endif // SERIALGC
  14.346  
  14.347  #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
    15.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Jan 19 19:24:34 2011 -0800
    15.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Jan 20 13:57:12 2011 -0800
    15.3 @@ -65,152 +65,6 @@
    15.4  
    15.5  // Private methods.
    15.6  
    15.7 -HeapWord*
    15.8 -HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
    15.9 -  assert(G1CollectedHeap::isHumongous(word_size),
   15.10 -         "Allocation size should be humongous");
   15.11 -  int cur = ind;
   15.12 -  int first = cur;
   15.13 -  size_t sumSizes = 0;
   15.14 -  while (cur < _regions.length() && sumSizes < word_size) {
   15.15 -    // Loop invariant:
   15.16 -    //  For all i in [first, cur):
   15.17 -    //       _regions.at(i)->is_empty()
   15.18 -    //    && _regions.at(i) is contiguous with its predecessor, if any
   15.19 -    //  && sumSizes is the sum of the sizes of the regions in the interval
   15.20 -    //       [first, cur)
   15.21 -    HeapRegion* curhr = _regions.at(cur);
   15.22 -    if (curhr->is_empty()
   15.23 -        && (first == cur
   15.24 -            || (_regions.at(cur-1)->end() ==
   15.25 -                curhr->bottom()))) {
   15.26 -      sumSizes += curhr->capacity() / HeapWordSize;
   15.27 -    } else {
   15.28 -      first = cur + 1;
   15.29 -      sumSizes = 0;
   15.30 -    }
   15.31 -    cur++;
   15.32 -  }
   15.33 -  if (sumSizes >= word_size) {
   15.34 -    _alloc_search_start = cur;
   15.35 -
   15.36 -    // We need to initialize the region(s) we just discovered. This is
   15.37 -    // a bit tricky given that it can happen concurrently with
   15.38 -    // refinement threads refining cards on these regions and
   15.39 -    // potentially wanting to refine the BOT as they are scanning
   15.40 -    // those cards (this can happen shortly after a cleanup; see CR
   15.41 -    // 6991377). So we have to set up the region(s) carefully and in
   15.42 -    // a specific order.
   15.43 -
   15.44 -    // Currently, allocs_are_zero_filled() returns false. The zero
   15.45 -    // filling infrastructure will be going away soon (see CR 6977804).
   15.46 -    // So no need to do anything else here.
   15.47 -    bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
   15.48 -    assert(!zf, "not supported");
   15.49 -
   15.50 -    // This will be the "starts humongous" region.
   15.51 -    HeapRegion* first_hr = _regions.at(first);
   15.52 -    {
   15.53 -      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   15.54 -      first_hr->set_zero_fill_allocated();
   15.55 -    }
   15.56 -    // The header of the new object will be placed at the bottom of
   15.57 -    // the first region.
   15.58 -    HeapWord* new_obj = first_hr->bottom();
   15.59 -    // This will be the new end of the first region in the series that
   15.60 -    // should also match the end of the last region in the seriers.
   15.61 -    // (Note: sumSizes = "region size" x "number of regions we found").
   15.62 -    HeapWord* new_end = new_obj + sumSizes;
   15.63 -    // This will be the new top of the first region that will reflect
   15.64 -    // this allocation.
   15.65 -    HeapWord* new_top = new_obj + word_size;
   15.66 -
   15.67 -    // First, we need to zero the header of the space that we will be
   15.68 -    // allocating. When we update top further down, some refinement
   15.69 -    // threads might try to scan the region. By zeroing the header we
   15.70 -    // ensure that any thread that will try to scan the region will
   15.71 -    // come across the zero klass word and bail out.
   15.72 -    //
   15.73 -    // NOTE: It would not have been correct to have used
   15.74 -    // CollectedHeap::fill_with_object() and make the space look like
   15.75 -    // an int array. The thread that is doing the allocation will
   15.76 -    // later update the object header to a potentially different array
   15.77 -    // type and, for a very short period of time, the klass and length
   15.78 -    // fields will be inconsistent. This could cause a refinement
   15.79 -    // thread to calculate the object size incorrectly.
   15.80 -    Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
   15.81 -
   15.82 -    // We will set up the first region as "starts humongous". This
   15.83 -    // will also update the BOT covering all the regions to reflect
   15.84 -    // that there is a single object that starts at the bottom of the
   15.85 -    // first region.
   15.86 -    first_hr->set_startsHumongous(new_top, new_end);
   15.87 -
   15.88 -    // Then, if there are any, we will set up the "continues
   15.89 -    // humongous" regions.
   15.90 -    HeapRegion* hr = NULL;
   15.91 -    for (int i = first + 1; i < cur; ++i) {
   15.92 -      hr = _regions.at(i);
   15.93 -      {
   15.94 -        MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   15.95 -        hr->set_zero_fill_allocated();
   15.96 -      }
   15.97 -      hr->set_continuesHumongous(first_hr);
   15.98 -    }
   15.99 -    // If we have "continues humongous" regions (hr != NULL), then the
  15.100 -    // end of the last one should match new_end.
  15.101 -    assert(hr == NULL || hr->end() == new_end, "sanity");
  15.102 -
  15.103 -    // Up to this point no concurrent thread would have been able to
  15.104 -    // do any scanning on any region in this series. All the top
  15.105 -    // fields still point to bottom, so the intersection between
  15.106 -    // [bottom,top] and [card_start,card_end] will be empty. Before we
  15.107 -    // update the top fields, we'll do a storestore to make sure that
  15.108 -    // no thread sees the update to top before the zeroing of the
  15.109 -    // object header and the BOT initialization.
  15.110 -    OrderAccess::storestore();
  15.111 -
  15.112 -    // Now that the BOT and the object header have been initialized,
  15.113 -    // we can update top of the "starts humongous" region.
  15.114 -    assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
  15.115 -           "new_top should be in this region");
  15.116 -    first_hr->set_top(new_top);
  15.117 -
  15.118 -    // Now, we will update the top fields of the "continues humongous"
  15.119 -    // regions. The reason we need to do this is that, otherwise,
  15.120 -    // these regions would look empty and this will confuse parts of
  15.121 -    // G1. For example, the code that looks for a consecutive number
  15.122 -    // of empty regions will consider them empty and try to
  15.123 -    // re-allocate them. We can extend is_empty() to also include
  15.124 -    // !continuesHumongous(), but it is easier to just update the top
  15.125 -    // fields here.
  15.126 -    hr = NULL;
  15.127 -    for (int i = first + 1; i < cur; ++i) {
  15.128 -      hr = _regions.at(i);
  15.129 -      if ((i + 1) == cur) {
  15.130 -        // last continues humongous region
  15.131 -        assert(hr->bottom() < new_top && new_top <= hr->end(),
  15.132 -               "new_top should fall on this region");
  15.133 -        hr->set_top(new_top);
  15.134 -      } else {
  15.135 -        // not last one
  15.136 -        assert(new_top > hr->end(), "new_top should be above this region");
  15.137 -        hr->set_top(hr->end());
  15.138 -      }
  15.139 -    }
  15.140 -    // If we have continues humongous regions (hr != NULL), then the
  15.141 -    // end of the last one should match new_end and its top should
  15.142 -    // match new_top.
  15.143 -    assert(hr == NULL ||
  15.144 -           (hr->end() == new_end && hr->top() == new_top), "sanity");
  15.145 -
  15.146 -    return new_obj;
  15.147 -  } else {
  15.148 -    // If we started from the beginning, we want to know why we can't alloc.
  15.149 -    return NULL;
  15.150 -  }
  15.151 -}
  15.152 -
  15.153  void HeapRegionSeq::print_empty_runs() {
  15.154    int empty_run = 0;
  15.155    int n_empty = 0;
  15.156 @@ -284,13 +138,67 @@
  15.157    return res;
  15.158  }
  15.159  
  15.160 -HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) {
  15.161 -  int cur = _alloc_search_start;
  15.162 -  // Make sure "cur" is a valid index.
  15.163 -  assert(cur >= 0, "Invariant.");
  15.164 -  HeapWord* res = alloc_obj_from_region_index(cur, word_size);
  15.165 -  if (res == NULL)
  15.166 -    res = alloc_obj_from_region_index(0, word_size);
  15.167 +int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
  15.168 +  assert(num > 1, "pre-condition");
  15.169 +  assert(0 <= from && from <= _regions.length(),
  15.170 +         err_msg("from: %d should be valid and <= than %d",
  15.171 +                 from, _regions.length()));
  15.172 +
  15.173 +  int curr = from;
  15.174 +  int first = -1;
  15.175 +  size_t num_so_far = 0;
  15.176 +  while (curr < _regions.length() && num_so_far < num) {
  15.177 +    HeapRegion* curr_hr = _regions.at(curr);
  15.178 +    if (curr_hr->is_empty()) {
  15.179 +      if (first == -1) {
  15.180 +        first = curr;
  15.181 +        num_so_far = 1;
  15.182 +      } else {
  15.183 +        num_so_far += 1;
  15.184 +      }
  15.185 +    } else {
  15.186 +      first = -1;
  15.187 +      num_so_far = 0;
  15.188 +    }
  15.189 +    curr += 1;
  15.190 +  }
  15.191 +
  15.192 +  assert(num_so_far <= num, "post-condition");
  15.193 +  if (num_so_far == num) {
  15.194 +    // we find enough space for the humongous object
  15.195 +    assert(from <= first && first < _regions.length(), "post-condition");
  15.196 +    assert(first < curr && (curr - first) == (int) num, "post-condition");
  15.197 +    for (int i = first; i < first + (int) num; ++i) {
  15.198 +      assert(_regions.at(i)->is_empty(), "post-condition");
  15.199 +    }
  15.200 +    return first;
  15.201 +  } else {
  15.202 +    // we failed to find enough space for the humongous object
  15.203 +    return -1;
  15.204 +  }
  15.205 +}
  15.206 +
  15.207 +int HeapRegionSeq::find_contiguous(size_t num) {
  15.208 +  assert(num > 1, "otherwise we should not be calling this");
  15.209 +  assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
  15.210 +         err_msg("_alloc_search_start: %d should be valid and <= than %d",
  15.211 +                 _alloc_search_start, _regions.length()));
  15.212 +
  15.213 +  int start = _alloc_search_start;
  15.214 +  int res = find_contiguous_from(start, num);
  15.215 +  if (res == -1 && start != 0) {
  15.216 +    // Try starting from the beginning. If _alloc_search_start was 0,
  15.217 +    // no point in doing this again.
  15.218 +    res = find_contiguous_from(0, num);
  15.219 +  }
  15.220 +  if (res != -1) {
  15.221 +    assert(0 <= res && res < _regions.length(),
  15.222 +           err_msg("res: %d should be valid", res));
  15.223 +    _alloc_search_start = res + (int) num;
  15.224 +  }
  15.225 +  assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
  15.226 +         err_msg("_alloc_search_start: %d should be valid",
  15.227 +                 _alloc_search_start));
  15.228    return res;
  15.229  }
  15.230  
  15.231 @@ -376,6 +284,10 @@
  15.232  
  15.233  MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
  15.234                                     size_t& num_regions_deleted) {
  15.235 +  // Reset this in case it's currently pointing into the regions that
  15.236 +  // we just removed.
  15.237 +  _alloc_search_start = 0;
  15.238 +
  15.239    assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
  15.240    assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
  15.241  
  15.242 @@ -395,7 +307,6 @@
  15.243      }
  15.244      assert(cur == _regions.top(), "Should be top");
  15.245      if (!cur->is_empty()) break;
  15.246 -    cur->reset_zero_fill();
  15.247      shrink_bytes -= cur->capacity();
  15.248      num_regions_deleted++;
  15.249      _regions.pop();
  15.250 @@ -410,7 +321,6 @@
  15.251    return MemRegion(last_start, end);
  15.252  }
  15.253  
  15.254 -
  15.255  class PrintHeapRegionClosure : public  HeapRegionClosure {
  15.256  public:
  15.257    bool doHeapRegion(HeapRegion* r) {
    16.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Jan 19 19:24:34 2011 -0800
    16.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Jan 20 13:57:12 2011 -0800
    16.3 @@ -1,5 +1,5 @@
    16.4  /*
    16.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    16.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    16.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    16.8   *
    16.9   * This code is free software; you can redistribute it and/or modify it
   16.10 @@ -41,9 +41,9 @@
   16.11    // (For efficiency only; private to obj_allocate after initialization.)
   16.12    int _alloc_search_start;
   16.13  
   16.14 -  // Attempts to allocate a block of the (assumed humongous) word_size,
   16.15 -  // starting at the region "ind".
   16.16 -  HeapWord* alloc_obj_from_region_index(int ind, size_t word_size);
   16.17 +  // Finds a contiguous set of empty regions of length num, starting
   16.18 +  // from a given index.
   16.19 +  int find_contiguous_from(int from, size_t num);
   16.20  
   16.21    // Currently, we're choosing collection sets in a round-robin fashion,
   16.22    // starting here.
   16.23 @@ -76,11 +76,8 @@
   16.24    // that are available for allocation.
   16.25    size_t free_suffix();
   16.26  
   16.27 -  // Requires "word_size" to be humongous (in the technical sense).  If
   16.28 -  // possible, allocates a contiguous subsequence of the heap regions to
   16.29 -  // satisfy the allocation, and returns the address of the beginning of
   16.30 -  // that sequence, otherwise returns NULL.
   16.31 -  HeapWord* obj_allocate(size_t word_size);
   16.32 +  // Finds a contiguous set of empty regions of length num.
   16.33 +  int find_contiguous(size_t num);
   16.34  
   16.35    // Apply the "doHeapRegion" method of "blk" to all regions in "this",
   16.36    // in address order, terminating the iteration early
   16.37 @@ -106,7 +103,7 @@
   16.38  
   16.39    // If "addr" falls within a region in the sequence, return that region,
   16.40    // or else NULL.
   16.41 -  HeapRegion* addr_to_region(const void* addr);
   16.42 +  inline HeapRegion* addr_to_region(const void* addr);
   16.43  
   16.44    void print();
   16.45  
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Thu Jan 20 13:57:12 2011 -0800
    17.3 @@ -0,0 +1,438 @@
    17.4 +/*
    17.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    17.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    17.7 + *
    17.8 + * This code is free software; you can redistribute it and/or modify it
    17.9 + * under the terms of the GNU General Public License version 2 only, as
   17.10 + * published by the Free Software Foundation.
   17.11 + *
   17.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   17.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   17.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   17.15 + * version 2 for more details (a copy is included in the LICENSE file that
   17.16 + * accompanied this code).
   17.17 + *
   17.18 + * You should have received a copy of the GNU General Public License version
   17.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   17.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   17.21 + *
   17.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   17.23 + * or visit www.oracle.com if you need additional information or have any
   17.24 + * questions.
   17.25 + *
   17.26 + */
   17.27 +
   17.28 +#include "precompiled.hpp"
   17.29 +#include "gc_implementation/g1/heapRegionSet.inline.hpp"
   17.30 +
   17.31 +size_t HeapRegionSetBase::_unrealistically_long_length = 0;
   17.32 +
   17.33 +//////////////////// HeapRegionSetBase ////////////////////
   17.34 +
   17.35 +void HeapRegionSetBase::set_unrealistically_long_length(size_t len) {
   17.36 +  guarantee(_unrealistically_long_length == 0, "should only be set once");
   17.37 +  _unrealistically_long_length = len;
   17.38 +}
   17.39 +
   17.40 +size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
   17.41 +  assert(hr->startsHumongous(), "pre-condition");
   17.42 +  assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
   17.43 +  size_t region_num = hr->capacity() >> HeapRegion::LogOfHRGrainBytes;
   17.44 +  assert(region_num > 0, "sanity");
   17.45 +  return region_num;
   17.46 +}
   17.47 +
   17.48 +void HeapRegionSetBase::fill_in_ext_msg(hrl_ext_msg* msg, const char* message) {
   17.49 +  msg->append("[%s] %s "
   17.50 +              "ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
   17.51 +              "cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
   17.52 +              name(), message, length(), region_num(),
   17.53 +              total_capacity_bytes(), total_used_bytes());
   17.54 +  fill_in_ext_msg_extra(msg);
   17.55 +}
   17.56 +
   17.57 +bool HeapRegionSetBase::verify_region(HeapRegion* hr,
   17.58 +                                  HeapRegionSetBase* expected_containing_set) {
   17.59 +  const char* error_message = NULL;
   17.60 +
   17.61 +  if (!regions_humongous()) {
   17.62 +    if (hr->isHumongous()) {
   17.63 +      error_message = "the region should not be humongous";
   17.64 +    }
   17.65 +  } else {
   17.66 +    if (!hr->isHumongous() || !hr->startsHumongous()) {
   17.67 +      error_message = "the region should be 'starts humongous'";
   17.68 +    }
   17.69 +  }
   17.70 +
   17.71 +  if (!regions_empty()) {
   17.72 +    if (hr->is_empty()) {
   17.73 +      error_message = "the region should not be empty";
   17.74 +    }
   17.75 +  } else {
   17.76 +    if (!hr->is_empty()) {
   17.77 +      error_message = "the region should be empty";
   17.78 +    }
   17.79 +  }
   17.80 +
   17.81 +#ifdef ASSERT
   17.82 +  // The _containing_set field is only available when ASSERT is defined.
   17.83 +  if (hr->containing_set() != expected_containing_set) {
   17.84 +    error_message = "inconsistent containing set found";
   17.85 +  }
   17.86 +#endif // ASSERT
   17.87 +
   17.88 +  const char* extra_error_message = verify_region_extra(hr);
   17.89 +  if (extra_error_message != NULL) {
   17.90 +    error_message = extra_error_message;
   17.91 +  }
   17.92 +
   17.93 +  if (error_message != NULL) {
   17.94 +    outputStream* out = tty;
   17.95 +    out->cr();
   17.96 +    out->print_cr("## [%s] %s", name(), error_message);
   17.97 +    out->print_cr("## Offending Region: "PTR_FORMAT, hr);
   17.98 +    out->print_cr("   "HR_FORMAT, HR_FORMAT_PARAMS(hr));
   17.99 +#ifdef ASSERT
  17.100 +    out->print_cr("   containing set: "PTR_FORMAT, hr->containing_set());
  17.101 +#endif // ASSERT
  17.102 +    out->print_cr("## Offending Region Set: "PTR_FORMAT, this);
  17.103 +    print_on(out);
  17.104 +    return false;
  17.105 +  } else {
  17.106 +    return true;
  17.107 +  }
  17.108 +}
  17.109 +
  17.110 +void HeapRegionSetBase::verify() {
  17.111 +  // It's important that we also observe the MT safety protocol even
  17.112 +  // for the verification calls. If we do verification without the
  17.113 +  // appropriate locks and the set changes underneath our feet
  17.114 +  // verification might fail and send us on a wild goose chase.
  17.115 +  hrl_assert_mt_safety_ok(this);
  17.116 +
  17.117 +  guarantee(( is_empty() && length() == 0 && region_num() == 0 &&
  17.118 +              total_used_bytes() == 0 && total_capacity_bytes() == 0) ||
  17.119 +            (!is_empty() && length() >= 0 && region_num() >= 0 &&
  17.120 +              total_used_bytes() >= 0 && total_capacity_bytes() >= 0),
  17.121 +            hrl_ext_msg(this, "invariant"));
  17.122 +
  17.123 +  guarantee((!regions_humongous() && region_num() == length()) ||
  17.124 +            ( regions_humongous() && region_num() >= length()),
  17.125 +            hrl_ext_msg(this, "invariant"));
  17.126 +
  17.127 +  guarantee(!regions_empty() || total_used_bytes() == 0,
  17.128 +            hrl_ext_msg(this, "invariant"));
  17.129 +
  17.130 +  guarantee(total_used_bytes() <= total_capacity_bytes(),
  17.131 +            hrl_ext_msg(this, "invariant"));
  17.132 +}
  17.133 +
  17.134 +void HeapRegionSetBase::verify_start() {
  17.135 +  // See comment in verify() about MT safety and verification.
  17.136 +  hrl_assert_mt_safety_ok(this);
  17.137 +  assert(!_verify_in_progress,
  17.138 +         hrl_ext_msg(this, "verification should not be in progress"));
  17.139 +
  17.140 +  // Do the basic verification first before we do the checks over the regions.
  17.141 +  HeapRegionSetBase::verify();
  17.142 +
  17.143 +  _calc_length               = 0;
  17.144 +  _calc_region_num           = 0;
  17.145 +  _calc_total_capacity_bytes = 0;
  17.146 +  _calc_total_used_bytes     = 0;
  17.147 +  _verify_in_progress        = true;
  17.148 +}
  17.149 +
  17.150 +void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
  17.151 +  // See comment in verify() about MT safety and verification.
  17.152 +  hrl_assert_mt_safety_ok(this);
  17.153 +  assert(_verify_in_progress,
  17.154 +         hrl_ext_msg(this, "verification should be in progress"));
  17.155 +
  17.156 +  guarantee(verify_region(hr, this), hrl_ext_msg(this, "region verification"));
  17.157 +
  17.158 +  _calc_length               += 1;
  17.159 +  if (!hr->isHumongous()) {
  17.160 +    _calc_region_num         += 1;
  17.161 +  } else {
  17.162 +    _calc_region_num         += calculate_region_num(hr);
  17.163 +  }
  17.164 +  _calc_total_capacity_bytes += hr->capacity();
  17.165 +  _calc_total_used_bytes     += hr->used();
  17.166 +}
  17.167 +
  17.168 +void HeapRegionSetBase::verify_end() {
  17.169 +  // See comment in verify() about MT safety and verification.
  17.170 +  hrl_assert_mt_safety_ok(this);
  17.171 +  assert(_verify_in_progress,
  17.172 +         hrl_ext_msg(this, "verification should be in progress"));
  17.173 +
  17.174 +  guarantee(length() == _calc_length,
  17.175 +            hrl_err_msg("[%s] length: "SIZE_FORMAT" should be == "
  17.176 +                        "calc length: "SIZE_FORMAT,
  17.177 +                        name(), length(), _calc_length));
  17.178 +
  17.179 +  guarantee(region_num() == _calc_region_num,
  17.180 +            hrl_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
  17.181 +                        "calc region num: "SIZE_FORMAT,
  17.182 +                        name(), region_num(), _calc_region_num));
  17.183 +
  17.184 +  guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
  17.185 +            hrl_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
  17.186 +                        "calc capacity bytes: "SIZE_FORMAT,
  17.187 +                        name(),
  17.188 +                        total_capacity_bytes(), _calc_total_capacity_bytes));
  17.189 +
  17.190 +  guarantee(total_used_bytes() == _calc_total_used_bytes,
  17.191 +            hrl_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
  17.192 +                        "calc used bytes: "SIZE_FORMAT,
  17.193 +                        name(), total_used_bytes(), _calc_total_used_bytes));
  17.194 +
  17.195 +  _verify_in_progress = false;
  17.196 +}
  17.197 +
  17.198 +void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
  17.199 +  out->cr();
  17.200 +  out->print_cr("Set: %s ("PTR_FORMAT")", name(), this);
  17.201 +  out->print_cr("  Region Assumptions");
  17.202 +  out->print_cr("    humongous         : %s", BOOL_TO_STR(regions_humongous()));
  17.203 +  out->print_cr("    empty             : %s", BOOL_TO_STR(regions_empty()));
  17.204 +  out->print_cr("  Attributes");
  17.205 +  out->print_cr("    length            : "SIZE_FORMAT_W(14), length());
  17.206 +  out->print_cr("    region num        : "SIZE_FORMAT_W(14), region_num());
  17.207 +  out->print_cr("    total capacity    : "SIZE_FORMAT_W(14)" bytes",
  17.208 +                total_capacity_bytes());
  17.209 +  out->print_cr("    total used        : "SIZE_FORMAT_W(14)" bytes",
  17.210 +                total_used_bytes());
  17.211 +}
  17.212 +
  17.213 +void HeapRegionSetBase::clear() {
  17.214 +  _length           = 0;
  17.215 +  _region_num       = 0;
  17.216 +  _total_used_bytes = 0;
  17.217 +}
  17.218 +
  17.219 +HeapRegionSetBase::HeapRegionSetBase(const char* name)
  17.220 +  : _name(name), _verify_in_progress(false),
  17.221 +    _calc_length(0), _calc_region_num(0),
  17.222 +    _calc_total_capacity_bytes(0), _calc_total_used_bytes(0) { }
  17.223 +
  17.224 +//////////////////// HeapRegionSet ////////////////////
  17.225 +
  17.226 +void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
  17.227 +  hrl_assert_mt_safety_ok(this);
  17.228 +  hrl_assert_mt_safety_ok(proxy_set);
  17.229 +  hrl_assert_sets_match(this, proxy_set);
  17.230 +
  17.231 +  verify_optional();
  17.232 +  proxy_set->verify_optional();
  17.233 +
  17.234 +  if (proxy_set->is_empty()) return;
  17.235 +
  17.236 +  assert(proxy_set->length() <= _length,
  17.237 +         hrl_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
  17.238 +                     "should be <= length: "SIZE_FORMAT,
  17.239 +                     name(), proxy_set->length(), _length));
  17.240 +  _length -= proxy_set->length();
  17.241 +
  17.242 +  assert(proxy_set->region_num() <= _region_num,
  17.243 +         hrl_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
  17.244 +                     "should be <= region num: "SIZE_FORMAT,
  17.245 +                     name(), proxy_set->region_num(), _region_num));
  17.246 +  _region_num -= proxy_set->region_num();
  17.247 +
  17.248 +  assert(proxy_set->total_used_bytes() <= _total_used_bytes,
  17.249 +         hrl_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
  17.250 +                     "should be <= used bytes: "SIZE_FORMAT,
  17.251 +                     name(), proxy_set->total_used_bytes(),
  17.252 +                     _total_used_bytes));
  17.253 +  _total_used_bytes -= proxy_set->total_used_bytes();
  17.254 +
  17.255 +  proxy_set->clear();
  17.256 +
  17.257 +  verify_optional();
  17.258 +  proxy_set->verify_optional();
  17.259 +}
  17.260 +
  17.261 +//////////////////// HeapRegionLinkedList ////////////////////
  17.262 +
  17.263 +void HeapRegionLinkedList::fill_in_ext_msg_extra(hrl_ext_msg* msg) {
  17.264 +  msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
  17.265 +}
  17.266 +
  17.267 +void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
  17.268 +  hrl_assert_mt_safety_ok(this);
  17.269 +  hrl_assert_mt_safety_ok(from_list);
  17.270 +
  17.271 +  verify_optional();
  17.272 +  from_list->verify_optional();
  17.273 +
  17.274 +  if (from_list->is_empty()) return;
  17.275 +
  17.276 +#ifdef ASSERT
  17.277 +  HeapRegionLinkedListIterator iter(from_list);
  17.278 +  while (iter.more_available()) {
  17.279 +    HeapRegion* hr = iter.get_next();
  17.280 +    // In set_containing_set() we check that we either set the value
  17.281 +    // from NULL to non-NULL or vice versa to catch bugs. So, we have
  17.282 +    // to NULL it first before setting it to the value.
  17.283 +    hr->set_containing_set(NULL);
  17.284 +    hr->set_containing_set(this);
  17.285 +  }
  17.286 +#endif // ASSERT
  17.287 +
  17.288 +  if (_tail != NULL) {
  17.289 +    assert(length() >  0 && _head != NULL, hrl_ext_msg(this, "invariant"));
  17.290 +    _tail->set_next(from_list->_head);
  17.291 +  } else {
  17.292 +    assert(length() == 0 && _head == NULL, hrl_ext_msg(this, "invariant"));
  17.293 +    _head = from_list->_head;
  17.294 +  }
  17.295 +  _tail = from_list->_tail;
  17.296 +
  17.297 +  _length           += from_list->length();
  17.298 +  _region_num       += from_list->region_num();
  17.299 +  _total_used_bytes += from_list->total_used_bytes();
  17.300 +  from_list->clear();
  17.301 +
  17.302 +  verify_optional();
  17.303 +  from_list->verify_optional();
  17.304 +}
  17.305 +
  17.306 +void HeapRegionLinkedList::remove_all() {
  17.307 +  hrl_assert_mt_safety_ok(this);
  17.308 +  verify_optional();
  17.309 +
  17.310 +  HeapRegion* curr = _head;
  17.311 +  while (curr != NULL) {
  17.312 +    hrl_assert_region_ok(this, curr, this);
  17.313 +
  17.314 +    HeapRegion* next = curr->next();
  17.315 +    curr->set_next(NULL);
  17.316 +    curr->set_containing_set(NULL);
  17.317 +    curr = next;
  17.318 +  }
  17.319 +  clear();
  17.320 +
  17.321 +  verify_optional();
  17.322 +}
  17.323 +
  17.324 +void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
  17.325 +  hrl_assert_mt_safety_ok(this);
  17.326 +  assert(target_count > 1, hrl_ext_msg(this, "pre-condition"));
  17.327 +  assert(!is_empty(), hrl_ext_msg(this, "pre-condition"));
  17.328 +
  17.329 +  verify_optional();
  17.330 +  DEBUG_ONLY(size_t old_length = length();)
  17.331 +
  17.332 +  HeapRegion* curr = _head;
  17.333 +  HeapRegion* prev = NULL;
  17.334 +  size_t count = 0;
  17.335 +  while (curr != NULL) {
  17.336 +    hrl_assert_region_ok(this, curr, this);
  17.337 +    HeapRegion* next = curr->next();
  17.338 +
  17.339 +    if (curr->pending_removal()) {
  17.340 +      assert(count < target_count,
  17.341 +             hrl_err_msg("[%s] should not come across more regions "
  17.342 +                         "pending for removal than target_count: "SIZE_FORMAT,
  17.343 +                         name(), target_count));
  17.344 +
  17.345 +      if (prev == NULL) {
  17.346 +        assert(_head == curr, hrl_ext_msg(this, "invariant"));
  17.347 +        _head = next;
  17.348 +      } else {
  17.349 +        assert(_head != curr, hrl_ext_msg(this, "invariant"));
  17.350 +        prev->set_next(next);
  17.351 +      }
  17.352 +      if (next == NULL) {
  17.353 +        assert(_tail == curr, hrl_ext_msg(this, "invariant"));
  17.354 +        _tail = prev;
  17.355 +      } else {
  17.356 +        assert(_tail != curr, hrl_ext_msg(this, "invariant"));
  17.357 +      }
  17.358 +
  17.359 +      curr->set_next(NULL);
  17.360 +      remove_internal(curr);
  17.361 +      curr->set_pending_removal(false);
  17.362 +
  17.363 +      count += 1;
  17.364 +
  17.365 +      // If we have come across the target number of regions we can
  17.366 +      // just bail out. However, for debugging purposes, we can just
  17.367 +      // carry on iterating to make sure there are not more regions
  17.368 +      // tagged with pending removal.
  17.369 +      DEBUG_ONLY(if (count == target_count) break;)
  17.370 +    } else {
  17.371 +      prev = curr;
  17.372 +    }
  17.373 +    curr = next;
  17.374 +  }
  17.375 +
  17.376 +  assert(count == target_count,
  17.377 +         hrl_err_msg("[%s] count: "SIZE_FORMAT" should be == "
  17.378 +                     "target_count: "SIZE_FORMAT, name(), count, target_count));
  17.379 +  assert(length() + target_count == old_length,
  17.380 +         hrl_err_msg("[%s] new length should be consistent "
  17.381 +                     "new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
  17.382 +                     "target_count: "SIZE_FORMAT,
  17.383 +                     name(), length(), old_length, target_count));
  17.384 +
  17.385 +  verify_optional();
  17.386 +}
  17.387 +
  17.388 +void HeapRegionLinkedList::verify() {
  17.389 +  // See comment in HeapRegionSetBase::verify() about MT safety and
  17.390 +  // verification.
  17.391 +  hrl_assert_mt_safety_ok(this);
  17.392 +
  17.393 +  // This will also do the basic verification too.
  17.394 +  verify_start();
  17.395 +
  17.396 +  HeapRegion* curr  = _head;
  17.397 +  HeapRegion* prev1 = NULL;
  17.398 +  HeapRegion* prev0 = NULL;
  17.399 +  size_t      count = 0;
  17.400 +  while (curr != NULL) {
  17.401 +    verify_next_region(curr);
  17.402 +
  17.403 +    count += 1;
  17.404 +    guarantee(count < _unrealistically_long_length,
  17.405 +              hrl_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
  17.406 +                          "seems very long, is there maybe a cycle? "
  17.407 +                          "curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
  17.408 +                          "prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
  17.409 +                          name(), count, curr, prev0, prev1, length()));
  17.410 +
  17.411 +    prev1 = prev0;
  17.412 +    prev0 = curr;
  17.413 +    curr  = curr->next();
  17.414 +  }
  17.415 +
  17.416 +  guarantee(_tail == prev0, hrl_ext_msg(this, "post-condition"));
  17.417 +
  17.418 +  verify_end();
  17.419 +}
  17.420 +
  17.421 +void HeapRegionLinkedList::clear() {
  17.422 +  HeapRegionSetBase::clear();
  17.423 +  _head = NULL;
  17.424 +  _tail = NULL;
  17.425 +}
  17.426 +
  17.427 +void HeapRegionLinkedList::print_on(outputStream* out, bool print_contents) {
  17.428 +  HeapRegionSetBase::print_on(out, print_contents);
  17.429 +  out->print_cr("  Linking");
  17.430 +  out->print_cr("    head              : "PTR_FORMAT, _head);
  17.431 +  out->print_cr("    tail              : "PTR_FORMAT, _tail);
  17.432 +
  17.433 +  if (print_contents) {
  17.434 +    out->print_cr("  Contents");
  17.435 +    HeapRegionLinkedListIterator iter(this);
  17.436 +    while (iter.more_available()) {
  17.437 +      HeapRegion* hr = iter.get_next();
  17.438 +      hr->print_on(out);
  17.439 +    }
  17.440 +  }
  17.441 +}
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Thu Jan 20 13:57:12 2011 -0800
    18.3 @@ -0,0 +1,346 @@
    18.4 +/*
    18.5 + * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    18.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    18.7 + *
    18.8 + * This code is free software; you can redistribute it and/or modify it
    18.9 + * under the terms of the GNU General Public License version 2 only, as
   18.10 + * published by the Free Software Foundation.
   18.11 + *
   18.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   18.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   18.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   18.15 + * version 2 for more details (a copy is included in the LICENSE file that
   18.16 + * accompanied this code).
   18.17 + *
   18.18 + * You should have received a copy of the GNU General Public License version
   18.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   18.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18.21 + *
   18.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   18.23 + * or visit www.oracle.com if you need additional information or have any
   18.24 + * questions.
   18.25 + *
   18.26 + */
   18.27 +
   18.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
   18.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
   18.30 +
   18.31 +#include "gc_implementation/g1/heapRegion.hpp"
   18.32 +
   18.33 +// Large buffer for some cases where the output might be larger than normal.
   18.34 +#define HRL_ERR_MSG_BUFSZ 512
   18.35 +typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg;
   18.36 +
   18.37 +// Set verification will be forced either if someone defines
   18.38 +// HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which
   18.39 +// asserts are compiled in.
   18.40 +#ifndef HEAP_REGION_SET_FORCE_VERIFY
   18.41 +#define HEAP_REGION_SET_FORCE_VERIFY defined(ASSERT)
   18.42 +#endif // HEAP_REGION_SET_FORCE_VERIFY
   18.43 +
   18.44 +//////////////////// HeapRegionSetBase ////////////////////
   18.45 +
   18.46 +// Base class for all the classes that represent heap region sets. It
   18.47 +// contains the basic attributes that each set needs to maintain
   18.48 +// (e.g., length, region num, used bytes sum) plus any shared
   18.49 +// functionality (e.g., verification).
   18.50 +
   18.51 +class hrl_ext_msg;
   18.52 +
   18.53 +class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
   18.54 +  friend class hrl_ext_msg;
   18.55 +
   18.56 +protected:
   18.57 +  static size_t calculate_region_num(HeapRegion* hr);
   18.58 +
   18.59 +  static size_t _unrealistically_long_length;
   18.60 +
   18.61 +  // The number of regions added to the set. If the set contains
   18.62 +  // only humongous regions, this reflects only 'starts humongous'
   18.63 +  // regions and does not include 'continues humongous' ones.
   18.64 +  size_t _length;
   18.65 +
   18.66 +  // The total number of regions represented by the set. If the set
   18.67 +  // does not contain humongous regions, this should be the same as
   18.68 +  // _length. If the set contains only humongous regions, this will
   18.69 +  // include the 'continues humongous' regions.
   18.70 +  size_t _region_num;
   18.71 +
   18.72 +  // We don't keep track of the total capacity explicitly, we instead
   18.73 +  // recalculate it based on _region_num and the heap region size.
   18.74 +
   18.75 +  // The sum of used bytes in the all the regions in the set.
   18.76 +  size_t _total_used_bytes;
   18.77 +
   18.78 +  const char* _name;
   18.79 +
   18.80 +  bool        _verify_in_progress;
   18.81 +  size_t      _calc_length;
   18.82 +  size_t      _calc_region_num;
   18.83 +  size_t      _calc_total_capacity_bytes;
   18.84 +  size_t      _calc_total_used_bytes;
   18.85 +
   18.86 +  // verify_region() is used to ensure that the contents of a region
   18.87 +  // added to / removed from a set are consistent. Different sets
   18.88 +  // make different assumptions about the regions added to them. So
   18.89 +  // each set can override verify_region_extra(), which is called
   18.90 +  // from verify_region(), and do any extra verification it needs to
   18.91 +  // perform in that.
   18.92 +  virtual const char* verify_region_extra(HeapRegion* hr) { return NULL; }
   18.93 +  bool verify_region(HeapRegion* hr,
   18.94 +                     HeapRegionSetBase* expected_containing_set);
   18.95 +
   18.96 +  // Indicates whether all regions in the set should be humongous or
   18.97 +  // not. Only used during verification.
   18.98 +  virtual bool regions_humongous() = 0;
   18.99 +
  18.100 +  // Indicates whether all regions in the set should be empty or
  18.101 +  // not. Only used during verification.
  18.102 +  virtual bool regions_empty() = 0;
  18.103 +
  18.104 +  // Subclasses can optionally override this to do MT safety protocol
  18.105 +  // checks. It is called in an assert from all methods that perform
  18.106 +  // updates on the set (and subclasses should also call it too).
  18.107 +  virtual bool check_mt_safety() { return true; }
  18.108 +
  18.109 +  // fill_in_ext_msg() writes the the values of the set's attributes
  18.110 +  // in the custom err_msg (hrl_ext_msg). fill_in_ext_msg_extra()
  18.111 +  // allows subclasses to append further information.
  18.112 +  virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg) { }
  18.113 +  void fill_in_ext_msg(hrl_ext_msg* msg, const char* message);
  18.114 +
  18.115 +  // It updates the fields of the set to reflect hr being added to
  18.116 +  // the set.
  18.117 +  inline void update_for_addition(HeapRegion* hr);
  18.118 +
  18.119 +  // It updates the fields of the set to reflect hr being added to
  18.120 +  // the set and tags the region appropriately.
  18.121 +  inline void add_internal(HeapRegion* hr);
  18.122 +
  18.123 +  // It updates the fields of the set to reflect hr being removed
  18.124 +  // from the set.
  18.125 +  inline void update_for_removal(HeapRegion* hr);
  18.126 +
  18.127 +  // It updates the fields of the set to reflect hr being removed
  18.128 +  // from the set and tags the region appropriately.
  18.129 +  inline void remove_internal(HeapRegion* hr);
  18.130 +
  18.131 +  // It clears all the fields of the sets. Note: it will not iterate
  18.132 +  // over the set and remove regions from it. It assumes that the
  18.133 +  // caller has already done so. It will literally just clear the fields.
  18.134 +  virtual void clear();
  18.135 +
  18.136 +  HeapRegionSetBase(const char* name);
  18.137 +
  18.138 +public:
  18.139 +  static void set_unrealistically_long_length(size_t len);
  18.140 +
  18.141 +  const char* name() { return _name; }
  18.142 +
  18.143 +  size_t length() { return _length; }
  18.144 +
  18.145 +  bool is_empty() { return _length == 0; }
  18.146 +
  18.147 +  size_t region_num() { return _region_num; }
  18.148 +
  18.149 +  size_t total_capacity_bytes() {
  18.150 +    return region_num() << HeapRegion::LogOfHRGrainBytes;
  18.151 +  }
  18.152 +
  18.153 +  size_t total_used_bytes() { return _total_used_bytes; }
  18.154 +
  18.155 +  virtual void verify();
  18.156 +  void verify_start();
  18.157 +  void verify_next_region(HeapRegion* hr);
  18.158 +  void verify_end();
  18.159 +
  18.160 +#if HEAP_REGION_SET_FORCE_VERIFY
  18.161 +  void verify_optional() {
  18.162 +    verify();
  18.163 +  }
  18.164 +#else // HEAP_REGION_SET_FORCE_VERIFY
  18.165 +  void verify_optional() { }
  18.166 +#endif // HEAP_REGION_SET_FORCE_VERIFY
  18.167 +
  18.168 +  virtual void print_on(outputStream* out, bool print_contents = false);
  18.169 +};
  18.170 +
  18.171 +// Customized err_msg for heap region sets. Apart from a
  18.172 +// assert/guarantee-specific message it also prints out the values of
  18.173 +// the fields of the associated set. This can be very helpful in
  18.174 +// diagnosing failures.
  18.175 +
  18.176 +class hrl_ext_msg : public hrl_err_msg {
  18.177 +public:
  18.178 +  hrl_ext_msg(HeapRegionSetBase* set, const char* message) : hrl_err_msg("") {
  18.179 +    set->fill_in_ext_msg(this, message);
  18.180 +  }
  18.181 +};
  18.182 +
  18.183 +// These two macros are provided for convenience, to keep the uses of
  18.184 +// these two asserts a bit more concise.
  18.185 +
  18.186 +#define hrl_assert_mt_safety_ok(_set_)                                        \
  18.187 +  do {                                                                        \
  18.188 +    assert((_set_)->check_mt_safety(), hrl_ext_msg((_set_), "MT safety"));    \
  18.189 +  } while (0)
  18.190 +
  18.191 +#define hrl_assert_region_ok(_set_, _hr_, _expected_)                         \
  18.192 +  do {                                                                        \
  18.193 +    assert((_set_)->verify_region((_hr_), (_expected_)),                      \
  18.194 +           hrl_ext_msg((_set_), "region verification"));                      \
  18.195 +  } while (0)
  18.196 +
  18.197 +//////////////////// HeapRegionSet ////////////////////
  18.198 +
  18.199 +#define hrl_assert_sets_match(_set1_, _set2_)                                 \
  18.200 +  do {                                                                        \
  18.201 +    assert(((_set1_)->regions_humongous() ==                                  \
  18.202 +                                            (_set2_)->regions_humongous()) && \
  18.203 +           ((_set1_)->regions_empty() == (_set2_)->regions_empty()),          \
  18.204 +           hrl_err_msg("the contents of set %s and set %s should match",      \
  18.205 +                       (_set1_)->name(), (_set2_)->name()));                  \
  18.206 +  } while (0)
  18.207 +
  18.208 +// This class represents heap region sets whose members are not
  18.209 +// explicitly tracked. It's helpful to group regions using such sets
  18.210 +// so that we can reason about all the region groups in the heap using
  18.211 +// the same interface (namely, the HeapRegionSetBase API).
  18.212 +
  18.213 +class HeapRegionSet : public HeapRegionSetBase {
  18.214 +protected:
  18.215 +  virtual const char* verify_region_extra(HeapRegion* hr) {
  18.216 +    if (hr->next() != NULL) {
  18.217 +      return "next() should always be NULL as we do not link the regions";
  18.218 +    }
  18.219 +
  18.220 +    return HeapRegionSetBase::verify_region_extra(hr);
  18.221 +  }
  18.222 +
  18.223 +  HeapRegionSet(const char* name) : HeapRegionSetBase(name) {
  18.224 +    clear();
  18.225 +  }
  18.226 +
  18.227 +public:
  18.228 +  // It adds hr to the set. The region should not be a member of
  18.229 +  // another set.
  18.230 +  inline void add(HeapRegion* hr);
  18.231 +
  18.232 +  // It removes hr from the set. The region should be a member of
  18.233 +  // this set.
  18.234 +  inline void remove(HeapRegion* hr);
  18.235 +
  18.236 +  // It removes a region from the set. Instead of updating the fields
  18.237 +  // of the set to reflect this removal, it accumulates the updates
  18.238 +  // in proxy_set. The idea is that proxy_set is thread-local to
  18.239 +  // avoid multiple threads updating the fields of the set
  18.240 +  // concurrently and having to synchronize. The method
  18.241 +  // update_from_proxy() will update the fields of the set from the
  18.242 +  // proxy_set.
  18.243 +  inline void remove_with_proxy(HeapRegion* hr, HeapRegionSet* proxy_set);
  18.244 +
  18.245 +  // After multiple calls to remove_with_proxy() the updates to the
  18.246 +  // fields of the set are accumulated in proxy_set. This call
  18.247 +  // updates the fields of the set from proxy_set.
  18.248 +  void update_from_proxy(HeapRegionSet* proxy_set);
  18.249 +};
  18.250 +
  18.251 +//////////////////// HeapRegionLinkedList ////////////////////
  18.252 +
  18.253 +// A set that links all the regions added to it in a singly-linked
  18.254 +// list. We should try to avoid doing operations that iterate over
  18.255 +// such lists in performance critical paths. Typically we should
  18.256 +// add / remove one region at a time or concatenate two lists. All
  18.257 +// those operations are done in constant time.
  18.258 +
  18.259 +class HeapRegionLinkedListIterator;
  18.260 +
  18.261 +class HeapRegionLinkedList : public HeapRegionSetBase {
  18.262 +  friend class HeapRegionLinkedListIterator;
  18.263 +
  18.264 +private:
  18.265 +  HeapRegion* _head;
  18.266 +  HeapRegion* _tail;
  18.267 +
  18.268 +  // These are provided for use by the friend classes.
  18.269 +  HeapRegion* head() { return _head; }
  18.270 +  HeapRegion* tail() { return _tail; }
  18.271 +
  18.272 +protected:
  18.273 +  virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg);
  18.274 +
  18.275 +  // See the comment for HeapRegionSetBase::clear()
  18.276 +  virtual void clear();
  18.277 +
  18.278 +  HeapRegionLinkedList(const char* name) : HeapRegionSetBase(name) {
  18.279 +    clear();
  18.280 +  }
  18.281 +
  18.282 +public:
  18.283 +  // It adds hr to the list as the new tail. The region should not be
  18.284 +  // a member of another set.
  18.285 +  inline void add_as_tail(HeapRegion* hr);
  18.286 +
  18.287 +  // It removes and returns the head of the list. It assumes that the
  18.288 +  // list is not empty so it will return a non-NULL value.
  18.289 +  inline HeapRegion* remove_head();
  18.290 +
  18.291 +  // Convenience method.
  18.292 +  inline HeapRegion* remove_head_or_null();
  18.293 +
  18.294 +  // It moves the regions from from_list to this list and empties
  18.295 +  // from_list. The new regions will appear in the same order as they
  18.296 +  // were in from_list and be linked in the end of this list.
  18.297 +  void add_as_tail(HeapRegionLinkedList* from_list);
  18.298 +
  18.299 +  // It empties the list by removing all regions from it.
  18.300 +  void remove_all();
  18.301 +
  18.302 +  // It removes all regions in the list that are pending for removal
  18.303 +  // (i.e., they have been tagged with "pending_removal"). The list
  18.304 +  // must not be empty, target_count should reflect the exact number
  18.305 +  // of regions that are pending for removal in the list, and
  18.306 +  // target_count should be > 1 (currently, we never need to remove a
  18.307 +  // single region using this).
  18.308 +  void remove_all_pending(size_t target_count);
  18.309 +
  18.310 +  virtual void verify();
  18.311 +
  18.312 +  virtual void print_on(outputStream* out, bool print_contents = false);
  18.313 +};
  18.314 +
  18.315 +//////////////////// HeapRegionLinkedList ////////////////////
  18.316 +
  18.317 +// Iterator class that provides a convenient way to iterator over the
  18.318 +// regions in a HeapRegionLinkedList instance.
  18.319 +
  18.320 +class HeapRegionLinkedListIterator : public StackObj {
  18.321 +private:
  18.322 +  HeapRegionLinkedList* _list;
  18.323 +  HeapRegion*           _curr;
  18.324 +
  18.325 +public:
  18.326 +  bool more_available() {
  18.327 +    return _curr != NULL;
  18.328 +  }
  18.329 +
  18.330 +  HeapRegion* get_next() {
  18.331 +    assert(more_available(),
  18.332 +           "get_next() should be called when more regions are available");
  18.333 +
  18.334 +    // If we are going to introduce a count in the iterator we should
  18.335 +    // do the "cycle" check.
  18.336 +
  18.337 +    HeapRegion* hr = _curr;
  18.338 +    assert(_list->verify_region(hr, _list), "region verification");
  18.339 +    _curr = hr->next();
  18.340 +    return hr;
  18.341 +  }
  18.342 +
  18.343 +  HeapRegionLinkedListIterator(HeapRegionLinkedList* list)
  18.344 +    : _curr(NULL), _list(list) {
  18.345 +    _curr = list->head();
  18.346 +  }
  18.347 +};
  18.348 +
  18.349 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_HPP
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Thu Jan 20 13:57:12 2011 -0800
    19.3 @@ -0,0 +1,159 @@
    19.4 +/*
    19.5 + * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    19.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    19.7 + *
    19.8 + * This code is free software; you can redistribute it and/or modify it
    19.9 + * under the terms of the GNU General Public License version 2 only, as
   19.10 + * published by the Free Software Foundation.
   19.11 + *
   19.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   19.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   19.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   19.15 + * version 2 for more details (a copy is included in the LICENSE file that
   19.16 + * accompanied this code).
   19.17 + *
   19.18 + * You should have received a copy of the GNU General Public License version
   19.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   19.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19.21 + *
   19.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   19.23 + * or visit www.oracle.com if you need additional information or have any
   19.24 + * questions.
   19.25 + *
   19.26 + */
   19.27 +
   19.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
   19.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
   19.30 +
   19.31 +#include "gc_implementation/g1/heapRegionSet.hpp"
   19.32 +
   19.33 +//////////////////// HeapRegionSetBase ////////////////////
   19.34 +
   19.35 +inline void HeapRegionSetBase::update_for_addition(HeapRegion* hr) {
   19.36 +  // Assumes the caller has already verified the region.
   19.37 +
   19.38 +  _length           += 1;
   19.39 +  if (!hr->isHumongous()) {
   19.40 +    _region_num     += 1;
   19.41 +  } else {
   19.42 +    _region_num     += calculate_region_num(hr);
   19.43 +  }
   19.44 +  _total_used_bytes += hr->used();
   19.45 +}
   19.46 +
   19.47 +inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
   19.48 +  hrl_assert_region_ok(this, hr, NULL);
   19.49 +  assert(hr->next() == NULL, hrl_ext_msg(this, "should not already be linked"));
   19.50 +
   19.51 +  update_for_addition(hr);
   19.52 +  hr->set_containing_set(this);
   19.53 +}
   19.54 +
   19.55 +inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
   19.56 +  // Assumes the caller has already verified the region.
   19.57 +  assert(_length > 0, hrl_ext_msg(this, "pre-condition"));
   19.58 +  _length -= 1;
   19.59 +
   19.60 +  size_t region_num_diff;
   19.61 +  if (!hr->isHumongous()) {
   19.62 +    region_num_diff = 1;
   19.63 +  } else {
   19.64 +    region_num_diff = calculate_region_num(hr);
   19.65 +  }
   19.66 +  assert(region_num_diff <= _region_num,
   19.67 +         hrl_err_msg("[%s] region's region num: "SIZE_FORMAT" "
   19.68 +                     "should be <= region num: "SIZE_FORMAT,
   19.69 +                     name(), region_num_diff, _region_num));
   19.70 +  _region_num -= region_num_diff;
   19.71 +
   19.72 +  size_t used_bytes = hr->used();
   19.73 +  assert(used_bytes <= _total_used_bytes,
   19.74 +         hrl_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
   19.75 +                     "should be <= used bytes: "SIZE_FORMAT,
   19.76 +                     name(), used_bytes, _total_used_bytes));
   19.77 +  _total_used_bytes -= used_bytes;
   19.78 +}
   19.79 +
   19.80 +inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
   19.81 +  hrl_assert_region_ok(this, hr, this);
   19.82 +  assert(hr->next() == NULL, hrl_ext_msg(this, "should already be unlinked"));
   19.83 +
   19.84 +  hr->set_containing_set(NULL);
   19.85 +  update_for_removal(hr);
   19.86 +}
   19.87 +
   19.88 +//////////////////// HeapRegionSet ////////////////////
   19.89 +
   19.90 +inline void HeapRegionSet::add(HeapRegion* hr) {
   19.91 +  hrl_assert_mt_safety_ok(this);
   19.92 +  // add_internal() will verify the region.
   19.93 +  add_internal(hr);
   19.94 +}
   19.95 +
   19.96 +inline void HeapRegionSet::remove(HeapRegion* hr) {
   19.97 +  hrl_assert_mt_safety_ok(this);
   19.98 +  // remove_internal() will verify the region.
   19.99 +  remove_internal(hr);
  19.100 +}
  19.101 +
  19.102 +inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
  19.103 +                                             HeapRegionSet* proxy_set) {
  19.104 +  // No need to fo the MT safety check here given that this method
  19.105 +  // does not update the contents of the set but instead accumulates
  19.106 +  // the changes in proxy_set which is assumed to be thread-local.
  19.107 +  hrl_assert_sets_match(this, proxy_set);
  19.108 +  hrl_assert_region_ok(this, hr, this);
  19.109 +
  19.110 +  hr->set_containing_set(NULL);
  19.111 +  proxy_set->update_for_addition(hr);
  19.112 +}
  19.113 +
  19.114 +//////////////////// HeapRegionLinkedList ////////////////////
  19.115 +
  19.116 +inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
  19.117 +  hrl_assert_mt_safety_ok(this);
  19.118 +  assert((length() == 0 && _head == NULL && _tail == NULL) ||
  19.119 +         (length() >  0 && _head != NULL && _tail != NULL),
  19.120 +         hrl_ext_msg(this, "invariant"));
  19.121 +  // add_internal() will verify the region.
  19.122 +  add_internal(hr);
  19.123 +
  19.124 +  // Now link the region.
  19.125 +  if (_tail != NULL) {
  19.126 +    _tail->set_next(hr);
  19.127 +  } else {
  19.128 +    _head = hr;
  19.129 +  }
  19.130 +  _tail = hr;
  19.131 +}
  19.132 +
  19.133 +inline HeapRegion* HeapRegionLinkedList::remove_head() {
  19.134 +  hrl_assert_mt_safety_ok(this);
  19.135 +  assert(!is_empty(), hrl_ext_msg(this, "the list should not be empty"));
  19.136 +  assert(length() > 0 && _head != NULL && _tail != NULL,
  19.137 +         hrl_ext_msg(this, "invariant"));
  19.138 +
  19.139 +  // We need to unlink it first.
  19.140 +  HeapRegion* hr = _head;
  19.141 +  _head = hr->next();
  19.142 +  if (_head == NULL) {
  19.143 +    _tail = NULL;
  19.144 +  }
  19.145 +  hr->set_next(NULL);
  19.146 +
  19.147 +  // remove_internal() will verify the region.
  19.148 +  remove_internal(hr);
  19.149 +  return hr;
  19.150 +}
  19.151 +
  19.152 +inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() {
  19.153 +  hrl_assert_mt_safety_ok(this);
  19.154 +
  19.155 +  if (!is_empty()) {
  19.156 +    return remove_head();
  19.157 +  } else {
  19.158 +    return NULL;
  19.159 +  }
  19.160 +}
  19.161 +
  19.162 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp	Thu Jan 20 13:57:12 2011 -0800
    20.3 @@ -0,0 +1,102 @@
    20.4 +/*
    20.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    20.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    20.7 + *
    20.8 + * This code is free software; you can redistribute it and/or modify it
    20.9 + * under the terms of the GNU General Public License version 2 only, as
   20.10 + * published by the Free Software Foundation.
   20.11 + *
   20.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   20.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   20.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   20.15 + * version 2 for more details (a copy is included in the LICENSE file that
   20.16 + * accompanied this code).
   20.17 + *
   20.18 + * You should have received a copy of the GNU General Public License version
   20.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   20.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   20.21 + *
   20.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20.23 + * or visit www.oracle.com if you need additional information or have any
   20.24 + * questions.
   20.25 + *
   20.26 + */
   20.27 +
   20.28 +#include "precompiled.hpp"
   20.29 +#include "gc_implementation/g1/heapRegionSets.hpp"
   20.30 +
   20.31 +//////////////////// FreeRegionList ////////////////////
   20.32 +
   20.33 +const char* FreeRegionList::verify_region_extra(HeapRegion* hr) {
   20.34 +  if (hr->is_young()) {
   20.35 +    return "the region should not be young";
   20.36 +  }
   20.37 +  // The superclass will check that the region is empty and
   20.38 +  // not-humongous.
   20.39 +  return HeapRegionLinkedList::verify_region_extra(hr);
   20.40 +}
   20.41 +
   20.42 +//////////////////// MasterFreeRegionList ////////////////////
   20.43 +
   20.44 +bool MasterFreeRegionList::check_mt_safety() {
   20.45 +  // Master Free List MT safety protocol:
   20.46 +  // (a) If we're at a safepoint, operations on the master free list
   20.47 +  // should be invoked by either the VM thread (which will serialize
   20.48 +  // them) or by the GC workers while holding the
   20.49 +  // FreeList_lock.
   20.50 +  // (b) If we're not at a safepoint, operations on the master free
   20.51 +  // list should be invoked while holding the Heap_lock.
   20.52 +
   20.53 +  guarantee((SafepointSynchronize::is_at_safepoint() &&
   20.54 +               (Thread::current()->is_VM_thread() ||
   20.55 +                                            FreeList_lock->owned_by_self())) ||
   20.56 +            (!SafepointSynchronize::is_at_safepoint() &&
   20.57 +                                                Heap_lock->owned_by_self()),
   20.58 +            hrl_ext_msg(this, "master free list MT safety protocol"));
   20.59 +
   20.60 +  return FreeRegionList::check_mt_safety();
   20.61 +}
   20.62 +
   20.63 +//////////////////// SecondaryFreeRegionList ////////////////////
   20.64 +
   20.65 +bool SecondaryFreeRegionList::check_mt_safety() {
   20.66 +  // Secondary Free List MT safety protocol:
   20.67 +  // Operations on the secondary free list should always be invoked
   20.68 +  // while holding the SecondaryFreeList_lock.
   20.69 +
   20.70 +  guarantee(SecondaryFreeList_lock->owned_by_self(),
   20.71 +            hrl_ext_msg(this, "secondary free list MT safety protocol"));
   20.72 +
   20.73 +  return FreeRegionList::check_mt_safety();
   20.74 +}
   20.75 +
   20.76 +//////////////////// HumongousRegionSet ////////////////////
   20.77 +
   20.78 +const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
   20.79 +  if (hr->is_young()) {
   20.80 +    return "the region should not be young";
   20.81 +  }
   20.82 +  // The superclass will check that the region is not empty and
   20.83 +  // humongous.
   20.84 +  return HeapRegionSet::verify_region_extra(hr);
   20.85 +}
   20.86 +
   20.87 +//////////////////// HumongousRegionSet ////////////////////
   20.88 +
   20.89 +bool MasterHumongousRegionSet::check_mt_safety() {
   20.90 +  // Master Humongous Set MT safety protocol:
   20.91 +  // (a) If we're at a safepoint, operations on the master humongous
   20.92 +  // set should be invoked by either the VM thread (which will
   20.93 +  // serialize them) or by the GC workers while holding the
   20.94 +  // OldSets_lock.
   20.95 +  // (b) If we're not at a safepoint, operations on the master
   20.96 +  // humongous set should be invoked while holding the Heap_lock.
   20.97 +
   20.98 +  guarantee((SafepointSynchronize::is_at_safepoint() &&
   20.99 +               (Thread::current()->is_VM_thread() ||
  20.100 +                                             OldSets_lock->owned_by_self())) ||
  20.101 +            (!SafepointSynchronize::is_at_safepoint() &&
  20.102 +                                                 Heap_lock->owned_by_self()),
  20.103 +            hrl_ext_msg(this, "master humongous set MT safety protocol"));
  20.104 +  return HumongousRegionSet::check_mt_safety();
  20.105 +}
    21.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Thu Jan 20 13:57:12 2011 -0800
    21.3 @@ -0,0 +1,86 @@
    21.4 +/*
    21.5 + * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    21.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    21.7 + *
    21.8 + * This code is free software; you can redistribute it and/or modify it
    21.9 + * under the terms of the GNU General Public License version 2 only, as
   21.10 + * published by the Free Software Foundation.
   21.11 + *
   21.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   21.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   21.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   21.15 + * version 2 for more details (a copy is included in the LICENSE file that
   21.16 + * accompanied this code).
   21.17 + *
   21.18 + * You should have received a copy of the GNU General Public License version
   21.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   21.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   21.21 + *
   21.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21.23 + * or visit www.oracle.com if you need additional information or have any
   21.24 + * questions.
   21.25 + *
   21.26 + */
   21.27 +
   21.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
   21.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
   21.30 +
   21.31 +#include "gc_implementation/g1/heapRegionSet.inline.hpp"
   21.32 +
   21.33 +//////////////////// FreeRegionList ////////////////////
   21.34 +
   21.35 +class FreeRegionList : public HeapRegionLinkedList {
   21.36 +protected:
   21.37 +  virtual const char* verify_region_extra(HeapRegion* hr);
   21.38 +
   21.39 +  virtual bool regions_humongous() { return false; }
   21.40 +  virtual bool regions_empty()     { return true;  }
   21.41 +
   21.42 +public:
   21.43 +  FreeRegionList(const char* name) : HeapRegionLinkedList(name) { }
   21.44 +};
   21.45 +
   21.46 +//////////////////// MasterFreeRegionList ////////////////////
   21.47 +
   21.48 +class MasterFreeRegionList : public FreeRegionList {
   21.49 +protected:
   21.50 +  virtual bool check_mt_safety();
   21.51 +
   21.52 +public:
   21.53 +  MasterFreeRegionList(const char* name) : FreeRegionList(name) { }
   21.54 +};
   21.55 +
   21.56 +//////////////////// SecondaryFreeRegionList ////////////////////
   21.57 +
   21.58 +class SecondaryFreeRegionList : public FreeRegionList {
   21.59 +protected:
   21.60 +  virtual bool check_mt_safety();
   21.61 +
   21.62 +public:
   21.63 +  SecondaryFreeRegionList(const char* name) : FreeRegionList(name) { }
   21.64 +};
   21.65 +
   21.66 +//////////////////// HumongousRegionSet ////////////////////
   21.67 +
   21.68 +class HumongousRegionSet : public HeapRegionSet {
   21.69 +protected:
   21.70 +  virtual const char* verify_region_extra(HeapRegion* hr);
   21.71 +
   21.72 +  virtual bool regions_humongous() { return true;  }
   21.73 +  virtual bool regions_empty()     { return false; }
   21.74 +
   21.75 +public:
   21.76 +  HumongousRegionSet(const char* name) : HeapRegionSet(name) { }
   21.77 +};
   21.78 +
   21.79 +//////////////////// MasterHumongousRegionSet ////////////////////
   21.80 +
   21.81 +class MasterHumongousRegionSet : public HumongousRegionSet {
   21.82 +protected:
   21.83 +  virtual bool check_mt_safety();
   21.84 +
   21.85 +public:
   21.86 +  MasterHumongousRegionSet(const char* name) : HumongousRegionSet(name) { }
   21.87 +};
   21.88 +
   21.89 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSETS_HPP
    22.1 --- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Wed Jan 19 19:24:34 2011 -0800
    22.2 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Thu Jan 20 13:57:12 2011 -0800
    22.3 @@ -1,5 +1,5 @@
    22.4  /*
    22.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    22.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    22.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    22.8   *
    22.9   * This code is free software; you can redistribute it and/or modify it
   22.10 @@ -38,8 +38,8 @@
   22.11  # include "thread_windows.inline.hpp"
   22.12  #endif
   22.13  
   22.14 -PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm, bool active) :
   22.15 -  _qset(qset_), _buf(NULL), _index(0), _active(active),
   22.16 +PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) :
   22.17 +  _qset(qset), _buf(NULL), _index(0), _active(active),
   22.18    _perm(perm), _lock(NULL)
   22.19  {}
   22.20  
   22.21 @@ -153,10 +153,16 @@
   22.22  }
   22.23  
   22.24  void PtrQueue::handle_zero_index() {
   22.25 -  assert(0 == _index, "Precondition.");
   22.26 +  assert(_index == 0, "Precondition.");
   22.27 +
   22.28    // This thread records the full buffer and allocates a new one (while
   22.29    // holding the lock if there is one).
   22.30    if (_buf != NULL) {
   22.31 +    if (!should_enqueue_buffer()) {
   22.32 +      assert(_index > 0, "the buffer can only be re-used if it's not full");
   22.33 +      return;
   22.34 +    }
   22.35 +
   22.36      if (_lock) {
   22.37        assert(_lock->owned_by_self(), "Required.");
   22.38  
    23.1 --- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Wed Jan 19 19:24:34 2011 -0800
    23.2 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Thu Jan 20 13:57:12 2011 -0800
    23.3 @@ -1,5 +1,5 @@
    23.4  /*
    23.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    23.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    23.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    23.8   *
    23.9   * This code is free software; you can redistribute it and/or modify it
   23.10 @@ -68,7 +68,7 @@
   23.11  public:
   23.12    // Initialize this queue to contain a null buffer, and be part of the
   23.13    // given PtrQueueSet.
   23.14 -  PtrQueue(PtrQueueSet*, bool perm = false, bool active = false);
   23.15 +  PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
   23.16    // Release any contained resources.
   23.17    void flush();
   23.18    // Calls flush() when destroyed.
   23.19 @@ -85,6 +85,14 @@
   23.20      else enqueue_known_active(ptr);
   23.21    }
   23.22  
   23.23 +  // This method is called when we're doing the zero index handling
   23.24 +  // and gives a chance to the queues to do any pre-enqueueing
   23.25 +  // processing they might want to do on the buffer. It should return
   23.26 +  // true if the buffer should be enqueued, or false if enough
   23.27 +  // entries were cleared from it so that it can be re-used. It should
   23.28 +  // not return false if the buffer is still full (otherwise we can
   23.29 +  // get into an infinite loop).
   23.30 +  virtual bool should_enqueue_buffer() { return true; }
   23.31    void handle_zero_index();
   23.32    void locking_enqueue_completed_buffer(void** buf);
   23.33  
    24.1 --- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Jan 19 19:24:34 2011 -0800
    24.2 +++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Thu Jan 20 13:57:12 2011 -0800
    24.3 @@ -1,5 +1,5 @@
    24.4  /*
    24.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    24.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    24.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    24.8   *
    24.9   * This code is free software; you can redistribute it and/or modify it
   24.10 @@ -23,12 +23,98 @@
   24.11   */
   24.12  
   24.13  #include "precompiled.hpp"
   24.14 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
   24.15  #include "gc_implementation/g1/satbQueue.hpp"
   24.16  #include "memory/allocation.inline.hpp"
   24.17  #include "memory/sharedHeap.hpp"
   24.18  #include "runtime/mutexLocker.hpp"
   24.19  #include "runtime/thread.hpp"
   24.20  
   24.21 +// This method removes entries from an SATB buffer that will not be
   24.22 +// useful to the concurrent marking threads. An entry is removed if it
   24.23 +// satisfies one of the following conditions:
   24.24 +//
   24.25 +// * it points to an object outside the G1 heap (G1's concurrent
   24.26 +//     marking only visits objects inside the G1 heap),
   24.27 +// * it points to an object that has been allocated since marking
   24.28 +//     started (according to SATB those objects do not need to be
   24.29 +//     visited during marking), or
   24.30 +// * it points to an object that has already been marked (no need to
   24.31 +//     process it again).
   24.32 +//
   24.33 +// The rest of the entries will be retained and are compacted towards
   24.34 +// the top of the buffer. If with this filtering we clear a large
   24.35 +// enough chunk of the buffer we can re-use it (instead of enqueueing
   24.36 +// it) and we can just allow the mutator to carry on executing.
   24.37 +
   24.38 +bool ObjPtrQueue::should_enqueue_buffer() {
   24.39 +  assert(_lock == NULL || _lock->owned_by_self(),
   24.40 +         "we should have taken the lock before calling this");
   24.41 +
   24.42 +  // A value of 0 means "don't filter SATB buffers".
   24.43 +  if (G1SATBBufferEnqueueingThresholdPercent == 0) {
   24.44 +    return true;
   24.45 +  }
   24.46 +
   24.47 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   24.48 +
   24.49 +  // This method should only be called if there is a non-NULL buffer
   24.50 +  // that is full.
   24.51 +  assert(_index == 0, "pre-condition");
   24.52 +  assert(_buf != NULL, "pre-condition");
   24.53 +
   24.54 +  void** buf = _buf;
   24.55 +  size_t sz = _sz;
   24.56 +
   24.57 +  // Used for sanity checking at the end of the loop.
   24.58 +  debug_only(size_t entries = 0; size_t retained = 0;)
   24.59 +
   24.60 +  size_t i = sz;
   24.61 +  size_t new_index = sz;
   24.62 +
   24.63 +  // Given that we are expecting _index == 0, we could have changed
   24.64 +  // the loop condition to (i > 0). But we are using _index for
   24.65 +  // generality.
   24.66 +  while (i > _index) {
   24.67 +    assert(i > 0, "we should have at least one more entry to process");
   24.68 +    i -= oopSize;
   24.69 +    debug_only(entries += 1;)
   24.70 +    oop* p = (oop*) &buf[byte_index_to_index((int) i)];
   24.71 +    oop obj = *p;
   24.72 +    // NULL the entry so that unused parts of the buffer contain NULLs
   24.73 +    // at the end. If we are going to retain it we will copy it to its
   24.74 +    // final place. If we have retained all entries we have visited so
   24.75 +    // far, we'll just end up copying it to the same place.
   24.76 +    *p = NULL;
   24.77 +
   24.78 +    bool retain = g1h->is_obj_ill(obj);
   24.79 +    if (retain) {
   24.80 +      assert(new_index > 0, "we should not have already filled up the buffer");
   24.81 +      new_index -= oopSize;
   24.82 +      assert(new_index >= i,
   24.83 +             "new_index should never be below i, as we alwaysr compact 'up'");
   24.84 +      oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
   24.85 +      assert(new_p >= p, "the destination location should never be below "
   24.86 +             "the source as we always compact 'up'");
   24.87 +      assert(*new_p == NULL,
   24.88 +             "we should have already cleared the destination location");
   24.89 +      *new_p = obj;
   24.90 +      debug_only(retained += 1;)
   24.91 +    }
   24.92 +  }
   24.93 +  size_t entries_calc = (sz - _index) / oopSize;
   24.94 +  assert(entries == entries_calc, "the number of entries we counted "
   24.95 +         "should match the number of entries we calculated");
   24.96 +  size_t retained_calc = (sz - new_index) / oopSize;
   24.97 +  assert(retained == retained_calc, "the number of retained entries we counted "
   24.98 +         "should match the number of retained entries we calculated");
   24.99 +  size_t perc = retained_calc * 100 / entries_calc;
  24.100 +  bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
  24.101 +  _index = new_index;
  24.102 +
  24.103 +  return should_enqueue;
  24.104 +}
  24.105 +
  24.106  void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
  24.107    if (_buf != NULL) {
  24.108      apply_closure_to_buffer(cl, _buf, _index, _sz);
    25.1 --- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Wed Jan 19 19:24:34 2011 -0800
    25.2 +++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Thu Jan 20 13:57:12 2011 -0800
    25.3 @@ -1,5 +1,5 @@
    25.4  /*
    25.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    25.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    25.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    25.8   *
    25.9   * This code is free software; you can redistribute it and/or modify it
   25.10 @@ -33,13 +33,18 @@
   25.11  // A ptrQueue whose elements are "oops", pointers to object heads.
   25.12  class ObjPtrQueue: public PtrQueue {
   25.13  public:
   25.14 -  ObjPtrQueue(PtrQueueSet* qset_, bool perm = false) :
   25.15 +  ObjPtrQueue(PtrQueueSet* qset, bool perm = false) :
   25.16      // SATB queues are only active during marking cycles. We create
   25.17      // them with their active field set to false. If a thread is
   25.18      // created during a cycle and its SATB queue needs to be activated
   25.19      // before the thread starts running, we'll need to set its active
   25.20      // field to true. This is done in JavaThread::initialize_queues().
   25.21 -    PtrQueue(qset_, perm, false /* active */) { }
   25.22 +    PtrQueue(qset, perm, false /* active */) { }
   25.23 +
   25.24 +  // Overrides PtrQueue::should_enqueue_buffer(). See the method's
   25.25 +  // definition for more information.
   25.26 +  virtual bool should_enqueue_buffer();
   25.27 +
   25.28    // Apply the closure to all elements, and reset the index to make the
   25.29    // buffer empty.
   25.30    void apply_closure(ObjectClosure* cl);
    26.1 --- a/src/share/vm/runtime/mutexLocker.cpp	Wed Jan 19 19:24:34 2011 -0800
    26.2 +++ b/src/share/vm/runtime/mutexLocker.cpp	Thu Jan 20 13:57:12 2011 -0800
    26.3 @@ -1,5 +1,5 @@
    26.4  /*
    26.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    26.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    26.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    26.8   *
    26.9   * This code is free software; you can redistribute it and/or modify it
   26.10 @@ -80,8 +80,6 @@
   26.11  Monitor* iCMS_lock                    = NULL;
   26.12  Monitor* FullGCCount_lock             = NULL;
   26.13  Monitor* CMark_lock                   = NULL;
   26.14 -Monitor* ZF_mon                       = NULL;
   26.15 -Monitor* Cleanup_mon                  = NULL;
   26.16  Mutex*   CMRegionStack_lock           = NULL;
   26.17  Mutex*   SATB_Q_FL_lock               = NULL;
   26.18  Monitor* SATB_Q_CBL_mon               = NULL;
   26.19 @@ -122,6 +120,9 @@
   26.20  Mutex*   PerfDataManager_lock         = NULL;
   26.21  Mutex*   OopMapCacheAlloc_lock        = NULL;
   26.22  
   26.23 +Mutex*   FreeList_lock                = NULL;
   26.24 +Monitor* SecondaryFreeList_lock       = NULL;
   26.25 +Mutex*   OldSets_lock                 = NULL;
   26.26  Mutex*   MMUTracker_lock              = NULL;
   26.27  Mutex*   HotCardCache_lock            = NULL;
   26.28  
   26.29 @@ -177,8 +178,6 @@
   26.30    }
   26.31    if (UseG1GC) {
   26.32      def(CMark_lock                 , Monitor, nonleaf,     true ); // coordinate concurrent mark thread
   26.33 -    def(ZF_mon                     , Monitor, leaf,        true );
   26.34 -    def(Cleanup_mon                , Monitor, nonleaf,     true );
   26.35      def(CMRegionStack_lock         , Mutex,   leaf,        true );
   26.36      def(SATB_Q_FL_lock             , Mutex  , special,     true );
   26.37      def(SATB_Q_CBL_mon             , Monitor, nonleaf,     true );
   26.38 @@ -188,6 +187,9 @@
   26.39      def(DirtyCardQ_CBL_mon         , Monitor, nonleaf,     true );
   26.40      def(Shared_DirtyCardQ_lock     , Mutex,   nonleaf,     true );
   26.41  
   26.42 +    def(FreeList_lock              , Mutex,   leaf     ,   true );
   26.43 +    def(SecondaryFreeList_lock     , Monitor, leaf     ,   true );
   26.44 +    def(OldSets_lock               , Mutex  , leaf     ,   true );
   26.45      def(MMUTracker_lock            , Mutex  , leaf     ,   true );
   26.46      def(HotCardCache_lock          , Mutex  , special  ,   true );
   26.47      def(EvacFailureStack_lock      , Mutex  , nonleaf  ,   true );
    27.1 --- a/src/share/vm/runtime/mutexLocker.hpp	Wed Jan 19 19:24:34 2011 -0800
    27.2 +++ b/src/share/vm/runtime/mutexLocker.hpp	Thu Jan 20 13:57:12 2011 -0800
    27.3 @@ -1,5 +1,5 @@
    27.4  /*
    27.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    27.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    27.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    27.8   *
    27.9   * This code is free software; you can redistribute it and/or modify it
   27.10 @@ -76,8 +76,6 @@
   27.11  extern Monitor* iCMS_lock;                       // CMS incremental mode start/stop notification
   27.12  extern Monitor* FullGCCount_lock;                // in support of "concurrent" full gc
   27.13  extern Monitor* CMark_lock;                      // used for concurrent mark thread coordination
   27.14 -extern Monitor* ZF_mon;                          // used for G1 conc zero-fill.
   27.15 -extern Monitor* Cleanup_mon;                     // used for G1 conc cleanup.
   27.16  extern Mutex*   CMRegionStack_lock;              // used for protecting accesses to the CM region stack
   27.17  extern Mutex*   SATB_Q_FL_lock;                  // Protects SATB Q
   27.18                                                   // buffer free list.
   27.19 @@ -125,6 +123,9 @@
   27.20  extern Mutex*   ParkerFreeList_lock;
   27.21  extern Mutex*   OopMapCacheAlloc_lock;           // protects allocation of oop_map caches
   27.22  
   27.23 +extern Mutex*   FreeList_lock;                   // protects the free region list during safepoints
   27.24 +extern Monitor* SecondaryFreeList_lock;          // protects the secondary free region list
   27.25 +extern Mutex*   OldSets_lock;                    // protects the old region sets
   27.26  extern Mutex*   MMUTracker_lock;                 // protects the MMU
   27.27                                                   // tracker data structures
   27.28  extern Mutex*   HotCardCache_lock;               // protects the hot card cache
    28.1 --- a/src/share/vm/utilities/debug.hpp	Wed Jan 19 19:24:34 2011 -0800
    28.2 +++ b/src/share/vm/utilities/debug.hpp	Thu Jan 20 13:57:12 2011 -0800
    28.3 @@ -34,6 +34,7 @@
    28.4  class FormatBuffer {
    28.5  public:
    28.6    inline FormatBuffer(const char * format, ...);
    28.7 +  inline void append(const char* format, ...);
    28.8    operator const char *() const { return _buf; }
    28.9  
   28.10  private:
   28.11 @@ -51,6 +52,19 @@
   28.12    va_end(argp);
   28.13  }
   28.14  
   28.15 +template <size_t bufsz>
   28.16 +void FormatBuffer<bufsz>::append(const char* format, ...) {
   28.17 +  // Given that the constructor does a vsnprintf we can assume that
   28.18 +  // _buf is already initialized.
   28.19 +  size_t len = strlen(_buf);
   28.20 +  char* buf_end = _buf + len;
   28.21 +
   28.22 +  va_list argp;
   28.23 +  va_start(argp, format);
   28.24 +  vsnprintf(buf_end, bufsz - len, format, argp);
   28.25 +  va_end(argp);
   28.26 +}
   28.27 +
   28.28  // Used to format messages for assert(), guarantee(), fatal(), etc.
   28.29  typedef FormatBuffer<> err_msg;
   28.30  
    29.1 --- a/src/share/vm/utilities/globalDefinitions.hpp	Wed Jan 19 19:24:34 2011 -0800
    29.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp	Thu Jan 20 13:57:12 2011 -0800
    29.3 @@ -1,5 +1,5 @@
    29.4  /*
    29.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    29.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    29.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    29.8   *
    29.9   * This code is free software; you can redistribute it and/or modify it
   29.10 @@ -1179,6 +1179,8 @@
   29.11  // '%d' formats to indicate a 64-bit quantity; commonly "l" (in LP64) or "ll"
   29.12  // (in ILP32).
   29.13  
   29.14 +#define BOOL_TO_STR(__b) (__b) ? "true" : "false"
   29.15 +
   29.16  // Format 32-bit quantities.
   29.17  #define INT32_FORMAT  "%d"
   29.18  #define UINT32_FORMAT "%u"

mercurial