src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

changeset 3357
441e946dc1af
parent 3338
adedfbbf0360
child 3410
bacb651cf5bf
     1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Dec 21 07:53:53 2011 -0500
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Dec 14 13:34:57 2011 -0800
     1.3 @@ -1165,9 +1165,9 @@
     1.4        _g1(g1)
     1.5    { }
     1.6  
     1.7 -  void work(int i) {
     1.8 -    RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
     1.9 -    _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
    1.10 +  void work(uint worker_id) {
    1.11 +    RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
    1.12 +    _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
    1.13                                            _g1->workers()->active_workers(),
    1.14                                           HeapRegion::RebuildRSClaimValue);
    1.15    }
    1.16 @@ -1374,7 +1374,7 @@
    1.17  
    1.18      // Rebuild remembered sets of all regions.
    1.19      if (G1CollectedHeap::use_parallel_gc_threads()) {
    1.20 -      int n_workers =
    1.21 +      uint n_workers =
    1.22          AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
    1.23                                         workers()->active_workers(),
    1.24                                         Threads::number_of_non_daemon_threads());
    1.25 @@ -2519,11 +2519,11 @@
    1.26  
    1.27  void
    1.28  G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
    1.29 -                                                 int worker,
    1.30 -                                                 int no_of_par_workers,
    1.31 +                                                 uint worker,
    1.32 +                                                 uint no_of_par_workers,
    1.33                                                   jint claim_value) {
    1.34    const size_t regions = n_regions();
    1.35 -  const size_t max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
    1.36 +  const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
    1.37                               no_of_par_workers :
    1.38                               1);
    1.39    assert(UseDynamicNumberOfGCThreads ||
    1.40 @@ -2739,7 +2739,7 @@
    1.41    result = g1_policy()->collection_set();
    1.42    if (G1CollectedHeap::use_parallel_gc_threads()) {
    1.43      size_t cs_size = g1_policy()->cset_region_length();
    1.44 -    int active_workers = workers()->active_workers();
    1.45 +    uint active_workers = workers()->active_workers();
    1.46      assert(UseDynamicNumberOfGCThreads ||
    1.47               active_workers == workers()->total_workers(),
    1.48               "Unless dynamic should use total workers");
    1.49 @@ -3075,10 +3075,10 @@
    1.50      return _failures;
    1.51    }
    1.52  
    1.53 -  void work(int worker_i) {
    1.54 +  void work(uint worker_id) {
    1.55      HandleMark hm;
    1.56      VerifyRegionClosure blk(_allow_dirty, true, _vo);
    1.57 -    _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
    1.58 +    _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
    1.59                                            _g1h->workers()->active_workers(),
    1.60                                            HeapRegion::ParVerifyClaimValue);
    1.61      if (blk.failures()) {
    1.62 @@ -4725,7 +4725,7 @@
    1.63    G1CollectedHeap*       _g1h;
    1.64    RefToScanQueueSet      *_queues;
    1.65    ParallelTaskTerminator _terminator;
    1.66 -  int _n_workers;
    1.67 +  uint _n_workers;
    1.68  
    1.69    Mutex _stats_lock;
    1.70    Mutex* stats_lock() { return &_stats_lock; }
    1.71 @@ -4765,18 +4765,18 @@
    1.72      _n_workers = active_workers;
    1.73    }
    1.74  
    1.75 -  void work(int i) {
    1.76 -    if (i >= _n_workers) return;  // no work needed this round
    1.77 +  void work(uint worker_id) {
    1.78 +    if (worker_id >= _n_workers) return;  // no work needed this round
    1.79  
    1.80      double start_time_ms = os::elapsedTime() * 1000.0;
    1.81 -    _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
    1.82 +    _g1h->g1_policy()->record_gc_worker_start_time(worker_id, start_time_ms);
    1.83  
    1.84      ResourceMark rm;
    1.85      HandleMark   hm;
    1.86  
    1.87      ReferenceProcessor*             rp = _g1h->ref_processor_stw();
    1.88  
    1.89 -    G1ParScanThreadState            pss(_g1h, i);
    1.90 +    G1ParScanThreadState            pss(_g1h, worker_id);
    1.91      G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
    1.92      G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
    1.93      G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
    1.94 @@ -4808,7 +4808,7 @@
    1.95                                    scan_root_cl,
    1.96                                    &push_heap_rs_cl,
    1.97                                    scan_perm_cl,
    1.98 -                                  i);
    1.99 +                                  worker_id);
   1.100      pss.end_strong_roots();
   1.101  
   1.102      {
   1.103 @@ -4817,8 +4817,8 @@
   1.104        evac.do_void();
   1.105        double elapsed_ms = (os::elapsedTime()-start)*1000.0;
   1.106        double term_ms = pss.term_time()*1000.0;
   1.107 -      _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
   1.108 -      _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
   1.109 +      _g1h->g1_policy()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
   1.110 +      _g1h->g1_policy()->record_termination(worker_id, term_ms, pss.term_attempts());
   1.111      }
   1.112      _g1h->g1_policy()->record_thread_age_table(pss.age_table());
   1.113      _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
   1.114 @@ -4828,12 +4828,12 @@
   1.115  
   1.116      if (ParallelGCVerbose) {
   1.117        MutexLocker x(stats_lock());
   1.118 -      pss.print_termination_stats(i);
   1.119 +      pss.print_termination_stats(worker_id);
   1.120      }
   1.121  
   1.122      assert(pss.refs()->is_empty(), "should be empty");
   1.123      double end_time_ms = os::elapsedTime() * 1000.0;
   1.124 -    _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
   1.125 +    _g1h->g1_policy()->record_gc_worker_end_time(worker_id, end_time_ms);
   1.126    }
   1.127  };
   1.128  
   1.129 @@ -5091,14 +5091,14 @@
   1.130      _terminator(terminator)
   1.131    {}
   1.132  
   1.133 -  virtual void work(int i) {
   1.134 +  virtual void work(uint worker_id) {
   1.135      // The reference processing task executed by a single worker.
   1.136      ResourceMark rm;
   1.137      HandleMark   hm;
   1.138  
   1.139      G1STWIsAliveClosure is_alive(_g1h);
   1.140  
   1.141 -    G1ParScanThreadState pss(_g1h, i);
   1.142 +    G1ParScanThreadState pss(_g1h, worker_id);
   1.143  
   1.144      G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
   1.145      G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
   1.146 @@ -5130,7 +5130,7 @@
   1.147      G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
   1.148  
   1.149      // Call the reference processing task's work routine.
   1.150 -    _proc_task.work(i, is_alive, keep_alive, drain_queue);
   1.151 +    _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
   1.152  
   1.153      // Note we cannot assert that the refs array is empty here as not all
   1.154      // of the processing tasks (specifically phase2 - pp2_work) execute
   1.155 @@ -5165,8 +5165,8 @@
   1.156      _enq_task(enq_task)
   1.157    { }
   1.158  
   1.159 -  virtual void work(int i) {
   1.160 -    _enq_task.work(i);
   1.161 +  virtual void work(uint worker_id) {
   1.162 +    _enq_task.work(worker_id);
   1.163    }
   1.164  };
   1.165  
   1.166 @@ -5195,7 +5195,7 @@
   1.167    G1CollectedHeap* _g1h;
   1.168    RefToScanQueueSet      *_queues;
   1.169    ParallelTaskTerminator _terminator;
   1.170 -  int _n_workers;
   1.171 +  uint _n_workers;
   1.172  
   1.173  public:
   1.174    G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
   1.175 @@ -5206,11 +5206,11 @@
   1.176      _n_workers(workers)
   1.177    { }
   1.178  
   1.179 -  void work(int i) {
   1.180 +  void work(uint worker_id) {
   1.181      ResourceMark rm;
   1.182      HandleMark   hm;
   1.183  
   1.184 -    G1ParScanThreadState            pss(_g1h, i);
   1.185 +    G1ParScanThreadState            pss(_g1h, worker_id);
   1.186      G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
   1.187      G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
   1.188      G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
   1.189 @@ -5246,17 +5246,17 @@
   1.190  
   1.191      ReferenceProcessor* rp = _g1h->ref_processor_cm();
   1.192  
   1.193 -    int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
   1.194 -    int stride = MIN2(MAX2(_n_workers, 1), limit);
   1.195 +    uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
   1.196 +    uint stride = MIN2(MAX2(_n_workers, 1U), limit);
   1.197  
   1.198      // limit is set using max_num_q() - which was set using ParallelGCThreads.
   1.199      // So this must be true - but assert just in case someone decides to
   1.200      // change the worker ids.
   1.201 -    assert(0 <= i && i < limit, "sanity");
   1.202 +    assert(0 <= worker_id && worker_id < limit, "sanity");
   1.203      assert(!rp->discovery_is_atomic(), "check this code");
   1.204  
   1.205      // Select discovered lists [i, i+stride, i+2*stride,...,limit)
   1.206 -    for (int idx = i; idx < limit; idx += stride) {
   1.207 +    for (uint idx = worker_id; idx < limit; idx += stride) {
   1.208        DiscoveredList& ref_list = rp->discovered_refs()[idx];
   1.209  
   1.210        DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
   1.211 @@ -5310,7 +5310,7 @@
   1.212    // referents points to another object which is also referenced by an
   1.213    // object discovered by the STW ref processor.
   1.214  
   1.215 -  int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
   1.216 +  uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
   1.217                          workers()->active_workers() : 1);
   1.218  
   1.219    assert(!G1CollectedHeap::use_parallel_gc_threads() ||
   1.220 @@ -5416,7 +5416,7 @@
   1.221    } else {
   1.222      // Parallel reference enqueuing
   1.223  
   1.224 -    int active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
   1.225 +    uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
   1.226      assert(active_workers == workers()->active_workers(),
   1.227             "Need to reset active_workers");
   1.228      assert(rp->num_q() == active_workers, "sanity");
   1.229 @@ -5445,7 +5445,7 @@
   1.230    concurrent_g1_refine()->set_use_cache(false);
   1.231    concurrent_g1_refine()->clear_hot_cache_claimed_index();
   1.232  
   1.233 -  int n_workers;
   1.234 +  uint n_workers;
   1.235    if (G1CollectedHeap::use_parallel_gc_threads()) {
   1.236      n_workers =
   1.237        AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
   1.238 @@ -5658,7 +5658,7 @@
   1.239      AbstractGangTask("G1 Par Cleanup CT Task"),
   1.240      _ct_bs(ct_bs), _g1h(g1h) { }
   1.241  
   1.242 -  void work(int i) {
   1.243 +  void work(uint worker_id) {
   1.244      HeapRegion* r;
   1.245      while (r = _g1h->pop_dirty_cards_region()) {
   1.246        clear_cards(r);
   1.247 @@ -6141,7 +6141,7 @@
   1.248    // Don't change the number of workers.  Use the value previously set
   1.249    // in the workgroup.
   1.250    assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
   1.251 -  int n_workers = workers()->active_workers();
   1.252 +  uint n_workers = workers()->active_workers();
   1.253    assert(UseDynamicNumberOfGCThreads ||
   1.254             n_workers == workers()->total_workers(),
   1.255        "Otherwise should be using the total number of workers");

mercurial