src/share/vm/utilities/taskqueue.cpp

Tue, 14 Jul 2009 15:40:39 -0700

author
ysr
date
Tue, 14 Jul 2009 15:40:39 -0700
changeset 1280
df6caf649ff7
parent 1014
0fbdb4381b99
child 1746
2a1472c30599
permissions
-rw-r--r--

6700789: G1: Enable use of compressed oops with G1 heaps
Summary: Modifications to G1 so as to allow the use of compressed oops.
Reviewed-by: apetrusenko, coleenp, jmasa, kvn, never, phh, tonyp

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_taskqueue.cpp.incl"
    28 #ifdef TRACESPINNING
    29 uint ParallelTaskTerminator::_total_yields = 0;
    30 uint ParallelTaskTerminator::_total_spins = 0;
    31 uint ParallelTaskTerminator::_total_peeks = 0;
    32 #endif
    34 bool TaskQueueSuper::peek() {
    35   return _bottom != _age.top();
    36 }
    38 int TaskQueueSetSuper::randomParkAndMiller(int *seed0) {
    39   const int a =      16807;
    40   const int m = 2147483647;
    41   const int q =     127773;  /* m div a */
    42   const int r =       2836;  /* m mod a */
    43   assert(sizeof(int) == 4, "I think this relies on that");
    44   int seed = *seed0;
    45   int hi   = seed / q;
    46   int lo   = seed % q;
    47   int test = a * lo - r * hi;
    48   if (test > 0)
    49     seed = test;
    50   else
    51     seed = test + m;
    52   *seed0 = seed;
    53   return seed;
    54 }
    56 ParallelTaskTerminator::
    57 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set) :
    58   _n_threads(n_threads),
    59   _queue_set(queue_set),
    60   _offered_termination(0) {}
    62 bool ParallelTaskTerminator::peek_in_queue_set() {
    63   return _queue_set->peek();
    64 }
    66 void ParallelTaskTerminator::yield() {
    67   assert(_offered_termination <= _n_threads, "Invariant");
    68   os::yield();
    69 }
    71 void ParallelTaskTerminator::sleep(uint millis) {
    72   assert(_offered_termination <= _n_threads, "Invariant");
    73   os::sleep(Thread::current(), millis, false);
    74 }
    76 bool
    77 ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
    78   assert(_offered_termination < _n_threads, "Invariant");
    79   Atomic::inc(&_offered_termination);
    81   uint yield_count = 0;
    82   // Number of hard spin loops done since last yield
    83   uint hard_spin_count = 0;
    84   // Number of iterations in the hard spin loop.
    85   uint hard_spin_limit = WorkStealingHardSpins;
    87   // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
    88   // If it is greater than 0, then start with a small number
    89   // of spins and increase number with each turn at spinning until
    90   // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
    91   // Then do a yield() call and start spinning afresh.
    92   if (WorkStealingSpinToYieldRatio > 0) {
    93     hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
    94     hard_spin_limit = MAX2(hard_spin_limit, 1U);
    95   }
    96   // Remember the initial spin limit.
    97   uint hard_spin_start = hard_spin_limit;
    99   // Loop waiting for all threads to offer termination or
   100   // more work.
   101   while (true) {
   102     assert(_offered_termination <= _n_threads, "Invariant");
   103     // Are all threads offering termination?
   104     if (_offered_termination == _n_threads) {
   105       return true;
   106     } else {
   107       // Look for more work.
   108       // Periodically sleep() instead of yield() to give threads
   109       // waiting on the cores the chance to grab this code
   110       if (yield_count <= WorkStealingYieldsBeforeSleep) {
   111         // Do a yield or hardspin.  For purposes of deciding whether
   112         // to sleep, count this as a yield.
   113         yield_count++;
   115         // Periodically call yield() instead spinning
   116         // After WorkStealingSpinToYieldRatio spins, do a yield() call
   117         // and reset the counts and starting limit.
   118         if (hard_spin_count > WorkStealingSpinToYieldRatio) {
   119           yield();
   120           hard_spin_count = 0;
   121           hard_spin_limit = hard_spin_start;
   122 #ifdef TRACESPINNING
   123           _total_yields++;
   124 #endif
   125         } else {
   126           // Hard spin this time
   127           // Increase the hard spinning period but only up to a limit.
   128           hard_spin_limit = MIN2(2*hard_spin_limit,
   129                                  (uint) WorkStealingHardSpins);
   130           for (uint j = 0; j < hard_spin_limit; j++) {
   131             SpinPause();
   132           }
   133           hard_spin_count++;
   134 #ifdef TRACESPINNING
   135           _total_spins++;
   136 #endif
   137         }
   138       } else {
   139         if (PrintGCDetails && Verbose) {
   140          gclog_or_tty->print_cr("ParallelTaskTerminator::offer_termination() "
   141            "thread %d sleeps after %d yields",
   142            Thread::current(), yield_count);
   143         }
   144         yield_count = 0;
   145         // A sleep will cause this processor to seek work on another processor's
   146         // runqueue, if it has nothing else to run (as opposed to the yield
   147         // which may only move the thread to the end of the this processor's
   148         // runqueue).
   149         sleep(WorkStealingSleepMillis);
   150       }
   152 #ifdef TRACESPINNING
   153       _total_peeks++;
   154 #endif
   155       if (peek_in_queue_set() ||
   156           (terminator != NULL && terminator->should_exit_termination())) {
   157         Atomic::dec(&_offered_termination);
   158         assert(_offered_termination < _n_threads, "Invariant");
   159         return false;
   160       }
   161     }
   162   }
   163 }
   165 #ifdef TRACESPINNING
   166 void ParallelTaskTerminator::print_termination_counts() {
   167   gclog_or_tty->print_cr("ParallelTaskTerminator Total yields: %lld  "
   168     "Total spins: %lld  Total peeks: %lld",
   169     total_yields(),
   170     total_spins(),
   171     total_peeks());
   172 }
   173 #endif
   175 void ParallelTaskTerminator::reset_for_reuse() {
   176   if (_offered_termination != 0) {
   177     assert(_offered_termination == _n_threads,
   178            "Terminator may still be in use");
   179     _offered_termination = 0;
   180   }
   181 }
   183 bool RegionTaskQueueWithOverflow::is_empty() {
   184   return (_region_queue.size() == 0) &&
   185          (_overflow_stack->length() == 0);
   186 }
   188 bool RegionTaskQueueWithOverflow::stealable_is_empty() {
   189   return _region_queue.size() == 0;
   190 }
   192 bool RegionTaskQueueWithOverflow::overflow_is_empty() {
   193   return _overflow_stack->length() == 0;
   194 }
   196 void RegionTaskQueueWithOverflow::initialize() {
   197   _region_queue.initialize();
   198   assert(_overflow_stack == 0, "Creating memory leak");
   199   _overflow_stack =
   200     new (ResourceObj::C_HEAP) GrowableArray<RegionTask>(10, true);
   201 }
   203 void RegionTaskQueueWithOverflow::save(RegionTask t) {
   204   if (TraceRegionTasksQueuing && Verbose) {
   205     gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
   206   }
   207   if(!_region_queue.push(t)) {
   208     _overflow_stack->push(t);
   209   }
   210 }
   212 // Note that using this method will retrieve all regions
   213 // that have been saved but that it will always check
   214 // the overflow stack.  It may be more efficient to
   215 // check the stealable queue and the overflow stack
   216 // separately.
   217 bool RegionTaskQueueWithOverflow::retrieve(RegionTask& region_task) {
   218   bool result = retrieve_from_overflow(region_task);
   219   if (!result) {
   220     result = retrieve_from_stealable_queue(region_task);
   221   }
   222   if (TraceRegionTasksQueuing && Verbose && result) {
   223     gclog_or_tty->print_cr("  CTQ: retrieve " PTR_FORMAT, result);
   224   }
   225   return result;
   226 }
   228 bool RegionTaskQueueWithOverflow::retrieve_from_stealable_queue(
   229                                    RegionTask& region_task) {
   230   bool result = _region_queue.pop_local(region_task);
   231   if (TraceRegionTasksQueuing && Verbose) {
   232     gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
   233   }
   234   return result;
   235 }
   237 bool
   238 RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) {
   239   bool result;
   240   if (!_overflow_stack->is_empty()) {
   241     region_task = _overflow_stack->pop();
   242     result = true;
   243   } else {
   244     region_task = (RegionTask) NULL;
   245     result = false;
   246   }
   247   if (TraceRegionTasksQueuing && Verbose) {
   248     gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
   249   }
   250   return result;
   251 }

mercurial