src/share/vm/utilities/taskqueue.cpp

Thu, 20 Nov 2008 16:56:09 -0800

author
ysr
date
Thu, 20 Nov 2008 16:56:09 -0800
changeset 888
c96030fff130
parent 810
81cd571500b0
child 905
ad8c8ca4ab0f
permissions
-rw-r--r--

6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa

     1 /*
     2  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_taskqueue.cpp.incl"
    28 bool TaskQueueSuper::peek() {
    29   return _bottom != _age.top();
    30 }
    32 int TaskQueueSetSuper::randomParkAndMiller(int *seed0) {
    33   const int a =      16807;
    34   const int m = 2147483647;
    35   const int q =     127773;  /* m div a */
    36   const int r =       2836;  /* m mod a */
    37   assert(sizeof(int) == 4, "I think this relies on that");
    38   int seed = *seed0;
    39   int hi   = seed / q;
    40   int lo   = seed % q;
    41   int test = a * lo - r * hi;
    42   if (test > 0)
    43     seed = test;
    44   else
    45     seed = test + m;
    46   *seed0 = seed;
    47   return seed;
    48 }
    50 ParallelTaskTerminator::
    51 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set) :
    52   _n_threads(n_threads),
    53   _queue_set(queue_set),
    54   _offered_termination(0) {}
    56 bool ParallelTaskTerminator::peek_in_queue_set() {
    57   return _queue_set->peek();
    58 }
    60 void ParallelTaskTerminator::yield() {
    61   os::yield();
    62 }
    64 void ParallelTaskTerminator::sleep(uint millis) {
    65   os::sleep(Thread::current(), millis, false);
    66 }
    68 bool
    69 ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
    70   Atomic::inc(&_offered_termination);
    72   juint yield_count = 0;
    73   while (true) {
    74     if (_offered_termination == _n_threads) {
    75       //inner_termination_loop();
    76       return true;
    77     } else {
    78       if (yield_count <= WorkStealingYieldsBeforeSleep) {
    79         yield_count++;
    80         yield();
    81       } else {
    82         if (PrintGCDetails && Verbose) {
    83          gclog_or_tty->print_cr("ParallelTaskTerminator::offer_termination() "
    84            "thread %d sleeps after %d yields",
    85            Thread::current(), yield_count);
    86         }
    87         yield_count = 0;
    88         // A sleep will cause this processor to seek work on another processor's
    89         // runqueue, if it has nothing else to run (as opposed to the yield
    90         // which may only move the thread to the end of the this processor's
    91         // runqueue).
    92         sleep(WorkStealingSleepMillis);
    93       }
    95       if (peek_in_queue_set() ||
    96           (terminator != NULL && terminator->should_exit_termination())) {
    97         Atomic::dec(&_offered_termination);
    98         return false;
    99       }
   100     }
   101   }
   102 }
   104 void ParallelTaskTerminator::reset_for_reuse() {
   105   if (_offered_termination != 0) {
   106     assert(_offered_termination == _n_threads,
   107            "Terminator may still be in use");
   108     _offered_termination = 0;
   109   }
   110 }
   112 bool RegionTaskQueueWithOverflow::is_empty() {
   113   return (_region_queue.size() == 0) &&
   114          (_overflow_stack->length() == 0);
   115 }
   117 bool RegionTaskQueueWithOverflow::stealable_is_empty() {
   118   return _region_queue.size() == 0;
   119 }
   121 bool RegionTaskQueueWithOverflow::overflow_is_empty() {
   122   return _overflow_stack->length() == 0;
   123 }
   125 void RegionTaskQueueWithOverflow::initialize() {
   126   _region_queue.initialize();
   127   assert(_overflow_stack == 0, "Creating memory leak");
   128   _overflow_stack =
   129     new (ResourceObj::C_HEAP) GrowableArray<RegionTask>(10, true);
   130 }
   132 void RegionTaskQueueWithOverflow::save(RegionTask t) {
   133   if (TraceRegionTasksQueuing && Verbose) {
   134     gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
   135   }
   136   if(!_region_queue.push(t)) {
   137     _overflow_stack->push(t);
   138   }
   139 }
   141 // Note that using this method will retrieve all regions
   142 // that have been saved but that it will always check
   143 // the overflow stack.  It may be more efficient to
   144 // check the stealable queue and the overflow stack
   145 // separately.
   146 bool RegionTaskQueueWithOverflow::retrieve(RegionTask& region_task) {
   147   bool result = retrieve_from_overflow(region_task);
   148   if (!result) {
   149     result = retrieve_from_stealable_queue(region_task);
   150   }
   151   if (TraceRegionTasksQueuing && Verbose && result) {
   152     gclog_or_tty->print_cr("  CTQ: retrieve " PTR_FORMAT, result);
   153   }
   154   return result;
   155 }
   157 bool RegionTaskQueueWithOverflow::retrieve_from_stealable_queue(
   158                                    RegionTask& region_task) {
   159   bool result = _region_queue.pop_local(region_task);
   160   if (TraceRegionTasksQueuing && Verbose) {
   161     gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
   162   }
   163   return result;
   164 }
   166 bool
   167 RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) {
   168   bool result;
   169   if (!_overflow_stack->is_empty()) {
   170     region_task = _overflow_stack->pop();
   171     result = true;
   172   } else {
   173     region_task = (RegionTask) NULL;
   174     result = false;
   175   }
   176   if (TraceRegionTasksQueuing && Verbose) {
   177     gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
   178   }
   179   return result;
   180 }

mercurial