Merge

Wed, 18 Feb 2009 18:14:18 -0800

author
trims
date
Wed, 18 Feb 2009 18:14:18 -0800
changeset 1006
6b7f6a17455e
parent 974
bcb33806d186
parent 1005
dca06e7f503d
child 1007
1605bb4eb5a7

Merge

     1.1 --- a/src/cpu/sparc/vm/sparc.ad	Thu Feb 12 14:00:38 2009 -0800
     1.2 +++ b/src/cpu/sparc/vm/sparc.ad	Wed Feb 18 18:14:18 2009 -0800
     1.3 @@ -762,7 +762,7 @@
     1.4      case Assembler::stdf_op3: st_op = Op_StoreD; break;
     1.5  
     1.6      case Assembler::ldsb_op3: ld_op = Op_LoadB; break;
     1.7 -    case Assembler::lduh_op3: ld_op = Op_LoadC; break;
     1.8 +    case Assembler::lduh_op3: ld_op = Op_LoadUS; break;
     1.9      case Assembler::ldsh_op3: ld_op = Op_LoadS; break;
    1.10      case Assembler::ldx_op3:  // may become LoadP or stay LoadI
    1.11      case Assembler::ldsw_op3: // may become LoadP or stay LoadI
    1.12 @@ -3869,6 +3869,8 @@
    1.13    constraint(ALLOC_IN_RC(dflt_reg));
    1.14    match(RegD);
    1.15  
    1.16 +  match(regD_low);
    1.17 +
    1.18    format %{ %}
    1.19    interface(REG_INTER);
    1.20  %}
    1.21 @@ -3883,7 +3885,7 @@
    1.22  
    1.23  operand regD_low() %{
    1.24    constraint(ALLOC_IN_RC(dflt_low_reg));
    1.25 -  match(RegD);
    1.26 +  match(regD);
    1.27  
    1.28    format %{ %}
    1.29    interface(REG_INTER);
    1.30 @@ -5314,9 +5316,9 @@
    1.31    ins_pipe(iload_mask_mem);
    1.32  %}
    1.33  
    1.34 -// Load Char (16bit UNsigned) into a Long Register
    1.35 -instruct loadUCL(iRegL dst, memory mem, immL_FFFF bytemask) %{
    1.36 -  match(Set dst (AndL (ConvI2L (LoadC mem)) bytemask));
    1.37 +// Load Unsigned Short/Char (16bit UNsigned) into a Long Register
    1.38 +instruct loadUS2L(iRegL dst, memory mem, immL_FFFF bytemask) %{
    1.39 +  match(Set dst (AndL (ConvI2L (LoadUS mem)) bytemask));
    1.40    ins_cost(MEMORY_REF_COST);
    1.41  
    1.42    size(4);
    1.43 @@ -5326,9 +5328,9 @@
    1.44    ins_pipe(iload_mask_mem);
    1.45  %}
    1.46  
    1.47 -// Load Char (16bit unsigned)
    1.48 -instruct loadC(iRegI dst, memory mem) %{
    1.49 -  match(Set dst (LoadC mem));
    1.50 +// Load Unsigned Short/Char (16bit unsigned)
    1.51 +instruct loadUS(iRegI dst, memory mem) %{
    1.52 +  match(Set dst (LoadUS mem));
    1.53    ins_cost(MEMORY_REF_COST);
    1.54  
    1.55    size(4);
     2.1 --- a/src/cpu/x86/vm/x86_32.ad	Thu Feb 12 14:00:38 2009 -0800
     2.2 +++ b/src/cpu/x86/vm/x86_32.ad	Wed Feb 18 18:14:18 2009 -0800
     2.3 @@ -6413,9 +6413,9 @@
     2.4    ins_pipe( ialu_reg_mem );
     2.5  %}
     2.6  
     2.7 -// Load Char (16bit unsigned)
     2.8 -instruct loadC(eRegI dst, memory mem) %{
     2.9 -  match(Set dst (LoadC mem));
    2.10 +// Load Unsigned Short/Char (16bit unsigned)
    2.11 +instruct loadUS(eRegI dst, memory mem) %{
    2.12 +  match(Set dst (LoadUS mem));
    2.13  
    2.14    ins_cost(125);
    2.15    format %{ "MOVZX  $dst,$mem" %}
     3.1 --- a/src/cpu/x86/vm/x86_64.ad	Thu Feb 12 14:00:38 2009 -0800
     3.2 +++ b/src/cpu/x86/vm/x86_64.ad	Wed Feb 18 18:14:18 2009 -0800
     3.3 @@ -6096,25 +6096,25 @@
     3.4  //   ins_pipe(ialu_reg_mem);
     3.5  // %}
     3.6  
     3.7 -// Load Char (16 bit UNsigned)
     3.8 -instruct loadC(rRegI dst, memory mem)
     3.9 -%{
    3.10 -  match(Set dst (LoadC mem));
    3.11 +// Load Unsigned Short/Char (16 bit UNsigned)
    3.12 +instruct loadUS(rRegI dst, memory mem)
    3.13 +%{
    3.14 +  match(Set dst (LoadUS mem));
    3.15  
    3.16    ins_cost(125);
    3.17 -  format %{ "movzwl  $dst, $mem\t# char" %}
    3.18 +  format %{ "movzwl  $dst, $mem\t# ushort/char" %}
    3.19    opcode(0x0F, 0xB7);
    3.20    ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
    3.21    ins_pipe(ialu_reg_mem);
    3.22  %}
    3.23  
    3.24 -// Load Char (16 bit UNsigned) into long
    3.25 -// instruct loadC2L(rRegL dst, memory mem)
    3.26 +// Load Unsigned Short/Char (16 bit UNsigned) into long
    3.27 +// instruct loadUS2L(rRegL dst, memory mem)
    3.28  // %{
    3.29 -//   match(Set dst (ConvI2L (LoadC mem)));
    3.30 +//   match(Set dst (ConvI2L (LoadUS mem)));
    3.31  
    3.32  //   ins_cost(125);
    3.33 -//   format %{ "movzwl  $dst, $mem\t# char -> long" %}
    3.34 +//   format %{ "movzwl  $dst, $mem\t# ushort/char -> long" %}
    3.35  //   opcode(0x0F, 0xB7);
    3.36  //   ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
    3.37  //   ins_pipe(ialu_reg_mem);
    3.38 @@ -9490,14 +9490,14 @@
    3.39  %{
    3.40    match(Set dst (AndL dst src));
    3.41  
    3.42 -  format %{ "movzbq  $dst, $src\t# long & 0xFF" %}
    3.43 +  format %{ "movzbq  $dst, $dst\t# long & 0xFF" %}
    3.44    opcode(0x0F, 0xB6);
    3.45    ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
    3.46    ins_pipe(ialu_reg);
    3.47  %}
    3.48  
    3.49  // And Register with Immediate 65535
    3.50 -instruct andL_rReg_imm65535(rRegI dst, immL_65535 src)
    3.51 +instruct andL_rReg_imm65535(rRegL dst, immL_65535 src)
    3.52  %{
    3.53    match(Set dst (AndL dst src));
    3.54  
     4.1 --- a/src/os/linux/vm/os_linux.cpp	Thu Feb 12 14:00:38 2009 -0800
     4.2 +++ b/src/os/linux/vm/os_linux.cpp	Wed Feb 18 18:14:18 2009 -0800
     4.3 @@ -1432,6 +1432,10 @@
     4.4    return buf;
     4.5  }
     4.6  
     4.7 +struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
     4.8 +  return localtime_r(clock, res);
     4.9 +}
    4.10 +
    4.11  ////////////////////////////////////////////////////////////////////////////////
    4.12  // runtime exit support
    4.13  
     5.1 --- a/src/os/solaris/vm/os_solaris.cpp	Thu Feb 12 14:00:38 2009 -0800
     5.2 +++ b/src/os/solaris/vm/os_solaris.cpp	Wed Feb 18 18:14:18 2009 -0800
     5.3 @@ -323,6 +323,10 @@
     5.4    return (size_t)(base - bottom);
     5.5  }
     5.6  
     5.7 +struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
     5.8 +  return localtime_r(clock, res);
     5.9 +}
    5.10 +
    5.11  // interruptible infrastructure
    5.12  
    5.13  // setup_interruptible saves the thread state before going into an
     6.1 --- a/src/os/windows/vm/os_windows.cpp	Thu Feb 12 14:00:38 2009 -0800
     6.2 +++ b/src/os/windows/vm/os_windows.cpp	Wed Feb 18 18:14:18 2009 -0800
     6.3 @@ -327,6 +327,14 @@
     6.4    return sz;
     6.5  }
     6.6  
     6.7 +struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
     6.8 +  const struct tm* time_struct_ptr = localtime(clock);
     6.9 +  if (time_struct_ptr != NULL) {
    6.10 +    *res = *time_struct_ptr;
    6.11 +    return res;
    6.12 +  }
    6.13 +  return NULL;
    6.14 +}
    6.15  
    6.16  LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
    6.17  
     7.1 --- a/src/share/vm/adlc/dict2.cpp	Thu Feb 12 14:00:38 2009 -0800
     7.2 +++ b/src/share/vm/adlc/dict2.cpp	Wed Feb 18 18:14:18 2009 -0800
     7.3 @@ -316,9 +316,12 @@
     7.4    return strcmp((const char *)k1,(const char *)k2);
     7.5  }
     7.6  
     7.7 -// Slimey cheap key comparator.
     7.8 +// Cheap key comparator.
     7.9  int cmpkey(const void *key1, const void *key2) {
    7.10 -  return (int)((intptr_t)key1 - (intptr_t)key2);
    7.11 +  if (key1 == key2) return 0;
    7.12 +  intptr_t delta = (intptr_t)key1 - (intptr_t)key2;
    7.13 +  if (delta > 0) return 1;
    7.14 +  return -1;
    7.15  }
    7.16  
    7.17  //=============================================================================
     8.1 --- a/src/share/vm/adlc/forms.cpp	Thu Feb 12 14:00:38 2009 -0800
     8.2 +++ b/src/share/vm/adlc/forms.cpp	Wed Feb 18 18:14:18 2009 -0800
     8.3 @@ -248,7 +248,7 @@
     8.4  // True if 'opType', an ideal name, loads or stores.
     8.5  Form::DataType Form::is_load_from_memory(const char *opType) const {
     8.6    if( strcmp(opType,"LoadB")==0 )  return Form::idealB;
     8.7 -  if( strcmp(opType,"LoadC")==0 )  return Form::idealC;
     8.8 +  if( strcmp(opType,"LoadUS")==0 )  return Form::idealC;
     8.9    if( strcmp(opType,"LoadD")==0 )  return Form::idealD;
    8.10    if( strcmp(opType,"LoadD_unaligned")==0 )  return Form::idealD;
    8.11    if( strcmp(opType,"LoadF")==0 )  return Form::idealF;
     9.1 --- a/src/share/vm/adlc/formssel.cpp	Thu Feb 12 14:00:38 2009 -0800
     9.2 +++ b/src/share/vm/adlc/formssel.cpp	Wed Feb 18 18:14:18 2009 -0800
     9.3 @@ -3314,7 +3314,7 @@
     9.4      "StoreI","StoreL","StoreP","StoreN","StoreD","StoreF" ,
     9.5      "StoreB","StoreC","Store" ,"StoreFP",
     9.6      "LoadI" ,"LoadL", "LoadP" ,"LoadN", "LoadD" ,"LoadF"  ,
     9.7 -    "LoadB" ,"LoadC" ,"LoadS" ,"Load"   ,
     9.8 +    "LoadB" ,"LoadUS" ,"LoadS" ,"Load"   ,
     9.9      "Store4I","Store2I","Store2L","Store2D","Store4F","Store2F","Store16B",
    9.10      "Store8B","Store4B","Store8C","Store4C","Store2C",
    9.11      "Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" ,
    10.1 --- a/src/share/vm/asm/codeBuffer.cpp	Thu Feb 12 14:00:38 2009 -0800
    10.2 +++ b/src/share/vm/asm/codeBuffer.cpp	Wed Feb 18 18:14:18 2009 -0800
    10.3 @@ -123,6 +123,10 @@
    10.4      // addresses constructed before expansions will not be confused.
    10.5      cb->free_blob();
    10.6    }
    10.7 +
    10.8 +  // free any overflow storage
    10.9 +  delete _overflow_arena;
   10.10 +
   10.11  #ifdef ASSERT
   10.12    Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
   10.13  #endif
    11.1 --- a/src/share/vm/classfile/systemDictionary.cpp	Thu Feb 12 14:00:38 2009 -0800
    11.2 +++ b/src/share/vm/classfile/systemDictionary.cpp	Wed Feb 18 18:14:18 2009 -0800
    11.3 @@ -846,6 +846,12 @@
    11.4                                  Handle protection_domain,
    11.5                                  TRAPS) {
    11.6  
    11.7 +  // UseNewReflection
    11.8 +  // The result of this call should be consistent with the result
    11.9 +  // of the call to resolve_instance_class_or_null().
   11.10 +  // See evaluation 6790209 and 4474172 for more details.
   11.11 +  class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
   11.12 +
   11.13    unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
   11.14    int d_index = dictionary()->hash_to_index(d_hash);
   11.15  
    12.1 --- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Thu Feb 12 14:00:38 2009 -0800
    12.2 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Wed Feb 18 18:14:18 2009 -0800
    12.3 @@ -24,7 +24,7 @@
    12.4  
    12.5  // We need to sort heap regions by collection desirability.
    12.6  
    12.7 -class CSetChooserCache {
    12.8 +class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
    12.9  private:
   12.10    enum {
   12.11      CacheLength = 16
    13.1 --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Thu Feb 12 14:00:38 2009 -0800
    13.2 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Feb 18 18:14:18 2009 -0800
    13.3 @@ -33,7 +33,7 @@
    13.4    PYA_cancel     // It's been completed by somebody else: cancel.
    13.5  };
    13.6  
    13.7 -class ConcurrentG1Refine {
    13.8 +class ConcurrentG1Refine: public CHeapObj {
    13.9    ConcurrentG1RefineThread* _cg1rThread;
   13.10  
   13.11    volatile jint _pya;
    14.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Feb 12 14:00:38 2009 -0800
    14.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Feb 18 18:14:18 2009 -0800
    14.3 @@ -30,7 +30,7 @@
    14.4  // A generic CM bit map.  This is essentially a wrapper around the BitMap
    14.5  // class, with one bit per (1<<_shifter) HeapWords.
    14.6  
    14.7 -class CMBitMapRO {
    14.8 +class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
    14.9   protected:
   14.10    HeapWord* _bmStartWord;      // base address of range covered by map
   14.11    size_t    _bmWordSize;       // map size (in #HeapWords covered)
   14.12 @@ -139,7 +139,7 @@
   14.13  
   14.14  // Represents a marking stack used by the CM collector.
   14.15  // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
   14.16 -class CMMarkStack {
   14.17 +class CMMarkStack VALUE_OBJ_CLASS_SPEC {
   14.18    ConcurrentMark* _cm;
   14.19    oop*   _base;      // bottom of stack
   14.20    jint   _index;     // one more than last occupied index
   14.21 @@ -237,7 +237,7 @@
   14.22    void oops_do(OopClosure* f);
   14.23  };
   14.24  
   14.25 -class CMRegionStack {
   14.26 +class CMRegionStack VALUE_OBJ_CLASS_SPEC {
   14.27    MemRegion* _base;
   14.28    jint _capacity;
   14.29    jint _index;
   14.30 @@ -312,7 +312,7 @@
   14.31  
   14.32  class ConcurrentMarkThread;
   14.33  
   14.34 -class ConcurrentMark {
   14.35 +class ConcurrentMark: public CHeapObj {
   14.36    friend class ConcurrentMarkThread;
   14.37    friend class CMTask;
   14.38    friend class CMBitMapClosure;
    15.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Feb 12 14:00:38 2009 -0800
    15.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Feb 18 18:14:18 2009 -0800
    15.3 @@ -141,7 +141,7 @@
    15.4      _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
    15.5      _length(0), _scan_only_length(0),
    15.6      _last_sampled_rs_lengths(0),
    15.7 -    _survivor_head(NULL), _survivors_tail(NULL), _survivor_length(0)
    15.8 +    _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
    15.9  {
   15.10    guarantee( check_list_empty(false), "just making sure..." );
   15.11  }
   15.12 @@ -159,16 +159,15 @@
   15.13  }
   15.14  
   15.15  void YoungList::add_survivor_region(HeapRegion* hr) {
   15.16 -  assert(!hr->is_survivor(), "should not already be for survived");
   15.17 +  assert(hr->is_survivor(), "should be flagged as survivor region");
   15.18    assert(hr->get_next_young_region() == NULL, "cause it should!");
   15.19  
   15.20    hr->set_next_young_region(_survivor_head);
   15.21    if (_survivor_head == NULL) {
   15.22 -    _survivors_tail = hr;
   15.23 +    _survivor_tail = hr;
   15.24    }
   15.25    _survivor_head = hr;
   15.26  
   15.27 -  hr->set_survivor();
   15.28    ++_survivor_length;
   15.29  }
   15.30  
   15.31 @@ -239,7 +238,7 @@
   15.32  
   15.33    empty_list(_survivor_head);
   15.34    _survivor_head = NULL;
   15.35 -  _survivors_tail = NULL;
   15.36 +  _survivor_tail = NULL;
   15.37    _survivor_length = 0;
   15.38  
   15.39    _last_sampled_rs_lengths = 0;
   15.40 @@ -391,6 +390,7 @@
   15.41  
   15.42    // Add survivor regions to SurvRateGroup.
   15.43    _g1h->g1_policy()->note_start_adding_survivor_regions();
   15.44 +  _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   15.45    for (HeapRegion* curr = _survivor_head;
   15.46         curr != NULL;
   15.47         curr = curr->get_next_young_region()) {
   15.48 @@ -401,7 +401,7 @@
   15.49    if (_survivor_head != NULL) {
   15.50      _head           = _survivor_head;
   15.51      _length         = _survivor_length + _scan_only_length;
   15.52 -    _survivors_tail->set_next_young_region(_scan_only_head);
   15.53 +    _survivor_tail->set_next_young_region(_scan_only_head);
   15.54    } else {
   15.55      _head           = _scan_only_head;
   15.56      _length         = _scan_only_length;
   15.57 @@ -418,9 +418,9 @@
   15.58    _curr_scan_only   = NULL;
   15.59  
   15.60    _survivor_head    = NULL;
   15.61 -  _survivors_tail   = NULL;
   15.62 +  _survivor_tail   = NULL;
   15.63    _survivor_length  = 0;
   15.64 -  _g1h->g1_policy()->finished_recalculating_age_indexes();
   15.65 +  _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   15.66  
   15.67    assert(check_list_well_formed(), "young list should be well formed");
   15.68  }
   15.69 @@ -553,7 +553,7 @@
   15.70    if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
   15.71      alloc_region = newAllocRegion_work(word_size, true, zero_filled);
   15.72      if (purpose == GCAllocForSurvived && alloc_region != NULL) {
   15.73 -      _young_list->add_survivor_region(alloc_region);
   15.74 +      alloc_region->set_survivor();
   15.75      }
   15.76      ++_gc_alloc_region_counts[purpose];
   15.77    } else {
   15.78 @@ -949,6 +949,10 @@
   15.79      GCOverheadReporter::recordSTWEnd(end);
   15.80      g1_policy()->record_full_collection_end();
   15.81  
   15.82 +#ifdef TRACESPINNING
   15.83 +    ParallelTaskTerminator::print_termination_counts();
   15.84 +#endif
   15.85 +
   15.86      gc_epilogue(true);
   15.87  
   15.88      // Abandon concurrent refinement.  This must happen last: in the
   15.89 @@ -2593,6 +2597,9 @@
   15.90          _young_list->print();
   15.91  #endif // SCAN_ONLY_VERBOSE
   15.92  
   15.93 +        g1_policy()->record_survivor_regions(_young_list->survivor_length(),
   15.94 +                                             _young_list->first_survivor_region(),
   15.95 +                                             _young_list->last_survivor_region());
   15.96          _young_list->reset_auxilary_lists();
   15.97        }
   15.98      } else {
   15.99 @@ -2619,7 +2626,9 @@
  15.100  #endif // SCAN_ONLY_VERBOSE
  15.101  
  15.102      double end_time_sec = os::elapsedTime();
  15.103 -    g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
  15.104 +    if (!evacuation_failed()) {
  15.105 +      g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
  15.106 +    }
  15.107      GCOverheadReporter::recordSTWEnd(end_time_sec);
  15.108      g1_policy()->record_collection_pause_end(popular_region != NULL,
  15.109                                               abandoned);
  15.110 @@ -2642,8 +2651,13 @@
  15.111        }
  15.112      }
  15.113  
  15.114 -    if (mark_in_progress())
  15.115 +    if (mark_in_progress()) {
  15.116        concurrent_mark()->update_g1_committed();
  15.117 +    }
  15.118 +
  15.119 +#ifdef TRACESPINNING
  15.120 +    ParallelTaskTerminator::print_termination_counts();
  15.121 +#endif
  15.122  
  15.123      gc_epilogue(false);
  15.124    }
  15.125 @@ -2754,6 +2768,13 @@
  15.126      _gc_alloc_region_list = r->next_gc_alloc_region();
  15.127      r->set_next_gc_alloc_region(NULL);
  15.128      r->set_is_gc_alloc_region(false);
  15.129 +    if (r->is_survivor()) {
  15.130 +      if (r->is_empty()) {
  15.131 +        r->set_not_young();
  15.132 +      } else {
  15.133 +        _young_list->add_survivor_region(r);
  15.134 +      }
  15.135 +    }
  15.136      if (r->is_empty()) {
  15.137        ++_free_regions;
  15.138      }
  15.139 @@ -3150,6 +3171,20 @@
  15.140    return block;
  15.141  }
  15.142  
  15.143 +void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
  15.144 +                                            bool par) {
  15.145 +  // Another thread might have obtained alloc_region for the given
  15.146 +  // purpose, and might be attempting to allocate in it, and might
  15.147 +  // succeed.  Therefore, we can't do the "finalization" stuff on the
  15.148 +  // region below until we're sure the last allocation has happened.
  15.149 +  // We ensure this by allocating the remaining space with a garbage
  15.150 +  // object.
  15.151 +  if (par) par_allocate_remaining_space(alloc_region);
  15.152 +  // Now we can do the post-GC stuff on the region.
  15.153 +  alloc_region->note_end_of_copying();
  15.154 +  g1_policy()->record_after_bytes(alloc_region->used());
  15.155 +}
  15.156 +
  15.157  HeapWord*
  15.158  G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
  15.159                                           HeapRegion*    alloc_region,
  15.160 @@ -3167,16 +3202,7 @@
  15.161      // Otherwise, continue; this new region is empty, too.
  15.162    }
  15.163    assert(alloc_region != NULL, "We better have an allocation region");
  15.164 -  // Another thread might have obtained alloc_region for the given
  15.165 -  // purpose, and might be attempting to allocate in it, and might
  15.166 -  // succeed.  Therefore, we can't do the "finalization" stuff on the
  15.167 -  // region below until we're sure the last allocation has happened.
  15.168 -  // We ensure this by allocating the remaining space with a garbage
  15.169 -  // object.
  15.170 -  if (par) par_allocate_remaining_space(alloc_region);
  15.171 -  // Now we can do the post-GC stuff on the region.
  15.172 -  alloc_region->note_end_of_copying();
  15.173 -  g1_policy()->record_after_bytes(alloc_region->used());
  15.174 +  retire_alloc_region(alloc_region, par);
  15.175  
  15.176    if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
  15.177      // Cannot allocate more regions for the given purpose.
  15.178 @@ -3185,7 +3211,7 @@
  15.179      if (purpose != alt_purpose) {
  15.180        HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
  15.181        // Has not the alternative region been aliased?
  15.182 -      if (alloc_region != alt_region) {
  15.183 +      if (alloc_region != alt_region && alt_region != NULL) {
  15.184          // Try to allocate in the alternative region.
  15.185          if (par) {
  15.186            block = alt_region->par_allocate(word_size);
  15.187 @@ -3194,9 +3220,10 @@
  15.188          }
  15.189          // Make an alias.
  15.190          _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
  15.191 -      }
  15.192 -      if (block != NULL) {
  15.193 -        return block;
  15.194 +        if (block != NULL) {
  15.195 +          return block;
  15.196 +        }
  15.197 +        retire_alloc_region(alt_region, par);
  15.198        }
  15.199        // Both the allocation region and the alternative one are full
  15.200        // and aliased, replace them with a new allocation region.
  15.201 @@ -3497,6 +3524,7 @@
  15.202    OverflowQueue* _overflowed_refs;
  15.203  
  15.204    G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
  15.205 +  ageTable           _age_table;
  15.206  
  15.207    size_t           _alloc_buffer_waste;
  15.208    size_t           _undo_waste;
  15.209 @@ -3538,6 +3566,7 @@
  15.210        _refs(g1h->task_queue(queue_num)),
  15.211        _hash_seed(17), _queue_num(queue_num),
  15.212        _term_attempts(0),
  15.213 +      _age_table(false),
  15.214  #if G1_DETAILED_STATS
  15.215        _pushes(0), _pops(0), _steals(0),
  15.216        _steal_attempts(0),  _overflow_pushes(0),
  15.217 @@ -3572,8 +3601,9 @@
  15.218  
  15.219    RefToScanQueue*   refs()            { return _refs;             }
  15.220    OverflowQueue*    overflowed_refs() { return _overflowed_refs;  }
  15.221 -
  15.222 -  inline G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
  15.223 +  ageTable*         age_table()       { return &_age_table;       }
  15.224 +
  15.225 +  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
  15.226      return &_alloc_buffers[purpose];
  15.227    }
  15.228  
  15.229 @@ -3834,7 +3864,9 @@
  15.230            (!from_region->is_young() && young_index == 0), "invariant" );
  15.231    G1CollectorPolicy* g1p = _g1->g1_policy();
  15.232    markOop m = old->mark();
  15.233 -  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, m->age(),
  15.234 +  int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
  15.235 +                                           : m->age();
  15.236 +  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
  15.237                                                               word_sz);
  15.238    HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
  15.239    oop       obj     = oop(obj_ptr);
  15.240 @@ -3872,9 +3904,12 @@
  15.241          obj->incr_age();
  15.242        } else {
  15.243          m = m->incr_age();
  15.244 +        obj->set_mark(m);
  15.245        }
  15.246 +      _par_scan_state->age_table()->add(obj, word_sz);
  15.247 +    } else {
  15.248 +      obj->set_mark(m);
  15.249      }
  15.250 -    obj->set_mark(m);
  15.251  
  15.252      // preserve "next" mark bit
  15.253      if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
  15.254 @@ -4129,6 +4164,9 @@
  15.255        _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
  15.256        _g1h->g1_policy()->record_termination_time(i, term_ms);
  15.257      }
  15.258 +    if (G1UseSurvivorSpace) {
  15.259 +      _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  15.260 +    }
  15.261      _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  15.262  
  15.263      // Clean up any par-expanded rem sets.
  15.264 @@ -4368,7 +4406,7 @@
  15.265    // Is this the right thing to do here?  We don't save marks
  15.266    // on individual heap regions when we allocate from
  15.267    // them in parallel, so this seems like the correct place for this.
  15.268 -  all_alloc_regions_note_end_of_copying();
  15.269 +  retire_all_alloc_regions();
  15.270    {
  15.271      G1IsAliveClosure is_alive(this);
  15.272      G1KeepAliveClosure keep_alive(this);
  15.273 @@ -5008,7 +5046,7 @@
  15.274    return no_allocs;
  15.275  }
  15.276  
  15.277 -void G1CollectedHeap::all_alloc_regions_note_end_of_copying() {
  15.278 +void G1CollectedHeap::retire_all_alloc_regions() {
  15.279    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  15.280      HeapRegion* r = _gc_alloc_regions[ap];
  15.281      if (r != NULL) {
  15.282 @@ -5021,8 +5059,7 @@
  15.283          }
  15.284        }
  15.285        if (!has_processed_alias) {
  15.286 -        r->note_end_of_copying();
  15.287 -        g1_policy()->record_after_bytes(r->used());
  15.288 +        retire_alloc_region(r, false /* par */);
  15.289        }
  15.290      }
  15.291    }
    16.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Feb 12 14:00:38 2009 -0800
    16.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Feb 18 18:14:18 2009 -0800
    16.3 @@ -90,7 +90,7 @@
    16.4    HeapRegion* _curr_scan_only;
    16.5  
    16.6    HeapRegion* _survivor_head;
    16.7 -  HeapRegion* _survivors_tail;
    16.8 +  HeapRegion* _survivor_tail;
    16.9    size_t      _survivor_length;
   16.10  
   16.11    void          empty_list(HeapRegion* list);
   16.12 @@ -105,6 +105,7 @@
   16.13    bool          is_empty() { return _length == 0; }
   16.14    size_t        length() { return _length; }
   16.15    size_t        scan_only_length() { return _scan_only_length; }
   16.16 +  size_t        survivor_length() { return _survivor_length; }
   16.17  
   16.18    void rs_length_sampling_init();
   16.19    bool rs_length_sampling_more();
   16.20 @@ -120,6 +121,7 @@
   16.21    HeapRegion* first_region() { return _head; }
   16.22    HeapRegion* first_scan_only_region() { return _scan_only_head; }
   16.23    HeapRegion* first_survivor_region() { return _survivor_head; }
   16.24 +  HeapRegion* last_survivor_region() { return _survivor_tail; }
   16.25    HeapRegion* par_get_next_scan_only_region() {
   16.26      MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
   16.27      HeapRegion* ret = _curr_scan_only;
   16.28 @@ -219,7 +221,7 @@
   16.29    // The to-space memory regions into which objects are being copied during
   16.30    // a GC.
   16.31    HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
   16.32 -  uint _gc_alloc_region_counts[GCAllocPurposeCount];
   16.33 +  size_t _gc_alloc_region_counts[GCAllocPurposeCount];
   16.34  
   16.35    // A list of the regions that have been set to be alloc regions in the
   16.36    // current collection.
   16.37 @@ -281,8 +283,8 @@
   16.38    // Returns "true" iff none of the gc alloc regions have any allocations
   16.39    // since the last call to "save_marks".
   16.40    bool all_alloc_regions_no_allocs_since_save_marks();
   16.41 -  // Calls "note_end_of_copying on all gc alloc_regions.
   16.42 -  void all_alloc_regions_note_end_of_copying();
   16.43 +  // Perform finalization stuff on all allocation regions.
   16.44 +  void retire_all_alloc_regions();
   16.45  
   16.46    // The number of regions allocated to hold humongous objects.
   16.47    int         _num_humongous_regions;
   16.48 @@ -351,6 +353,10 @@
   16.49    // that parallel threads might be attempting allocations.
   16.50    void par_allocate_remaining_space(HeapRegion* r);
   16.51  
   16.52 +  // Retires an allocation region when it is full or at the end of a
   16.53 +  // GC pause.
   16.54 +  void  retire_alloc_region(HeapRegion* alloc_region, bool par);
   16.55 +
   16.56    // Helper function for two callbacks below.
   16.57    // "full", if true, indicates that the GC is for a System.gc() request,
   16.58    // and should collect the entire heap.  If "clear_all_soft_refs" is true,
    17.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Feb 12 14:00:38 2009 -0800
    17.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Feb 18 18:14:18 2009 -0800
    17.3 @@ -196,8 +196,13 @@
    17.4    _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
    17.5                                                   G1YoungSurvRateNumRegionsSummary)),
    17.6    _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
    17.7 -                                              G1YoungSurvRateNumRegionsSummary))
    17.8 +                                              G1YoungSurvRateNumRegionsSummary)),
    17.9    // add here any more surv rate groups
   17.10 +  _recorded_survivor_regions(0),
   17.11 +  _recorded_survivor_head(NULL),
   17.12 +  _recorded_survivor_tail(NULL),
   17.13 +  _survivors_age_table(true)
   17.14 +
   17.15  {
   17.16    _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   17.17    _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   17.18 @@ -272,6 +277,15 @@
   17.19    _concurrent_mark_cleanup_times_ms->add(0.20);
   17.20    _tenuring_threshold = MaxTenuringThreshold;
   17.21  
   17.22 +  if (G1UseSurvivorSpace) {
   17.23 +    // if G1FixedSurvivorSpaceSize is 0 which means the size is not
   17.24 +    // fixed, then _max_survivor_regions will be calculated at
   17.25 +    // calculate_young_list_target_config during initialization
   17.26 +    _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
   17.27 +  } else {
   17.28 +    _max_survivor_regions = 0;
   17.29 +  }
   17.30 +
   17.31    initialize_all();
   17.32  }
   17.33  
   17.34 @@ -283,6 +297,9 @@
   17.35  void G1CollectorPolicy::initialize_flags() {
   17.36    set_min_alignment(HeapRegion::GrainBytes);
   17.37    set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
   17.38 +  if (SurvivorRatio < 1) {
   17.39 +    vm_exit_during_initialization("Invalid survivor ratio specified");
   17.40 +  }
   17.41    CollectorPolicy::initialize_flags();
   17.42  }
   17.43  
   17.44 @@ -301,6 +318,8 @@
   17.45                                    "-XX:+UseConcMarkSweepGC.");
   17.46    }
   17.47  
   17.48 +  initialize_gc_policy_counters();
   17.49 +
   17.50    if (G1Gen) {
   17.51      _in_young_gc_mode = true;
   17.52  
   17.53 @@ -322,6 +341,12 @@
   17.54    }
   17.55  }
   17.56  
   17.57 +// Create the jstat counters for the policy.
   17.58 +void G1CollectorPolicy::initialize_gc_policy_counters()
   17.59 +{
   17.60 +  _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
   17.61 +}
   17.62 +
   17.63  void G1CollectorPolicy::calculate_young_list_min_length() {
   17.64    _young_list_min_length = 0;
   17.65  
   17.66 @@ -352,6 +377,7 @@
   17.67      guarantee( so_length < _young_list_target_length, "invariant" );
   17.68      _young_list_so_prefix_length = so_length;
   17.69    }
   17.70 +  calculate_survivors_policy();
   17.71  }
   17.72  
   17.73  // This method calculate the optimal scan-only set for a fixed young
   17.74 @@ -448,6 +474,9 @@
   17.75    if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
   17.76      // we are in fully-young mode and there are free regions in the heap
   17.77  
   17.78 +    double survivor_regions_evac_time =
   17.79 +        predict_survivor_regions_evac_time();
   17.80 +
   17.81      size_t min_so_length = 0;
   17.82      size_t max_so_length = 0;
   17.83  
   17.84 @@ -497,9 +526,8 @@
   17.85        scanned_cards = predict_non_young_card_num(adj_rs_lengths);
   17.86      // calculate this once, so that we don't have to recalculate it in
   17.87      // the innermost loop
   17.88 -    double base_time_ms = predict_base_elapsed_time_ms(pending_cards,
   17.89 -                                                       scanned_cards);
   17.90 -
   17.91 +    double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
   17.92 +                          + survivor_regions_evac_time;
   17.93      // the result
   17.94      size_t final_young_length = 0;
   17.95      size_t final_so_length = 0;
   17.96 @@ -548,14 +576,14 @@
   17.97      bool done = false;
   17.98      // this is the outermost loop
   17.99      while (!done) {
  17.100 -#if 0
  17.101 +#ifdef TRACE_CALC_YOUNG_CONFIG
  17.102        // leave this in for debugging, just in case
  17.103        gclog_or_tty->print_cr("searching between " SIZE_FORMAT " and " SIZE_FORMAT
  17.104                               ", incr " SIZE_FORMAT ", pass %s",
  17.105                               from_so_length, to_so_length, so_length_incr,
  17.106                               (pass == pass_type_coarse) ? "coarse" :
  17.107                               (pass == pass_type_fine) ? "fine" : "final");
  17.108 -#endif // 0
  17.109 +#endif // TRACE_CALC_YOUNG_CONFIG
  17.110  
  17.111        size_t so_length = from_so_length;
  17.112        size_t init_free_regions =
  17.113 @@ -651,11 +679,11 @@
  17.114            guarantee( so_length_incr == so_coarse_increments, "invariant" );
  17.115            guarantee( final_so_length >= min_so_length, "invariant" );
  17.116  
  17.117 -#if 0
  17.118 +#ifdef TRACE_CALC_YOUNG_CONFIG
  17.119            // leave this in for debugging, just in case
  17.120            gclog_or_tty->print_cr("  coarse pass: SO length " SIZE_FORMAT,
  17.121                                   final_so_length);
  17.122 -#endif // 0
  17.123 +#endif // TRACE_CALC_YOUNG_CONFIG
  17.124  
  17.125            from_so_length =
  17.126              (final_so_length - min_so_length > so_coarse_increments) ?
  17.127 @@ -687,12 +715,12 @@
  17.128              // of the optimal
  17.129              size_t new_so_length = 950 * final_so_length / 1000;
  17.130  
  17.131 -#if 0
  17.132 +#ifdef TRACE_CALC_YOUNG_CONFIG
  17.133              // leave this in for debugging, just in case
  17.134              gclog_or_tty->print_cr("  fine pass: SO length " SIZE_FORMAT
  17.135                                     ", setting it to " SIZE_FORMAT,
  17.136                                      final_so_length, new_so_length);
  17.137 -#endif // 0
  17.138 +#endif // TRACE_CALC_YOUNG_CONFIG
  17.139  
  17.140              from_so_length = new_so_length;
  17.141              to_so_length = new_so_length;
  17.142 @@ -719,7 +747,8 @@
  17.143      }
  17.144  
  17.145      // we should have at least one region in the target young length
  17.146 -    _young_list_target_length = MAX2((size_t) 1, final_young_length);
  17.147 +    _young_list_target_length =
  17.148 +        MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
  17.149      if (final_so_length >= final_young_length)
  17.150        // and we need to ensure that the S-O length is not greater than
  17.151        // the target young length (this is being a bit careful)
  17.152 @@ -734,7 +763,7 @@
  17.153      double end_time_sec = os::elapsedTime();
  17.154      double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
  17.155  
  17.156 -#if 0
  17.157 +#ifdef TRACE_CALC_YOUNG_CONFIG
  17.158      // leave this in for debugging, just in case
  17.159      gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT
  17.160                             ", SO = " SIZE_FORMAT ", "
  17.161 @@ -747,9 +776,9 @@
  17.162                             calculations,
  17.163                             full_young_gcs() ? "full" : "partial",
  17.164                             should_initiate_conc_mark() ? " i-m" : "",
  17.165 -                           in_marking_window(),
  17.166 -                           in_marking_window_im());
  17.167 -#endif // 0
  17.168 +                           _in_marking_window,
  17.169 +                           _in_marking_window_im);
  17.170 +#endif // TRACE_CALC_YOUNG_CONFIG
  17.171  
  17.172      if (_young_list_target_length < _young_list_min_length) {
  17.173        // bummer; this means that, if we do a pause when the optimal
  17.174 @@ -768,14 +797,14 @@
  17.175          // S-O length
  17.176          so_length = calculate_optimal_so_length(_young_list_min_length);
  17.177  
  17.178 -#if 0
  17.179 +#ifdef TRACE_CALC_YOUNG_CONFIG
  17.180        // leave this in for debugging, just in case
  17.181        gclog_or_tty->print_cr("adjusted target length from "
  17.182                               SIZE_FORMAT " to " SIZE_FORMAT
  17.183                               ", SO " SIZE_FORMAT,
  17.184                               _young_list_target_length, _young_list_min_length,
  17.185                               so_length);
  17.186 -#endif // 0
  17.187 +#endif // TRACE_CALC_YOUNG_CONFIG
  17.188  
  17.189        _young_list_target_length =
  17.190          MAX2(_young_list_min_length, (size_t)1);
  17.191 @@ -785,12 +814,12 @@
  17.192      // we are in a partially-young mode or we've run out of regions (due
  17.193      // to evacuation failure)
  17.194  
  17.195 -#if 0
  17.196 +#ifdef TRACE_CALC_YOUNG_CONFIG
  17.197      // leave this in for debugging, just in case
  17.198      gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
  17.199                             ", SO " SIZE_FORMAT,
  17.200                             _young_list_min_length, 0);
  17.201 -#endif // 0
  17.202 +#endif // TRACE_CALC_YOUNG_CONFIG
  17.203  
  17.204      // we'll do the pause as soon as possible and with no S-O prefix
  17.205      // (see above for the reasons behind the latter)
  17.206 @@ -884,6 +913,16 @@
  17.207    return true;
  17.208  }
  17.209  
  17.210 +double G1CollectorPolicy::predict_survivor_regions_evac_time() {
  17.211 +  double survivor_regions_evac_time = 0.0;
  17.212 +  for (HeapRegion * r = _recorded_survivor_head;
  17.213 +       r != NULL && r != _recorded_survivor_tail->get_next_young_region();
  17.214 +       r = r->get_next_young_region()) {
  17.215 +    survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
  17.216 +  }
  17.217 +  return survivor_regions_evac_time;
  17.218 +}
  17.219 +
  17.220  void G1CollectorPolicy::check_prediction_validity() {
  17.221    guarantee( adaptive_young_list_length(), "should not call this otherwise" );
  17.222  
  17.223 @@ -995,11 +1034,15 @@
  17.224    _short_lived_surv_rate_group->start_adding_regions();
  17.225    // also call this on any additional surv rate groups
  17.226  
  17.227 +  record_survivor_regions(0, NULL, NULL);
  17.228 +
  17.229    _prev_region_num_young   = _region_num_young;
  17.230    _prev_region_num_tenured = _region_num_tenured;
  17.231  
  17.232    _free_regions_at_end_of_collection = _g1->free_regions();
  17.233    _scan_only_regions_at_end_of_collection = 0;
  17.234 +  // Reset survivors SurvRateGroup.
  17.235 +  _survivor_surv_rate_group->reset();
  17.236    calculate_young_list_min_length();
  17.237    calculate_young_list_target_config();
  17.238   }
  17.239 @@ -1104,6 +1147,10 @@
  17.240    _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
  17.241    tag_scan_only(short_lived_so_length);
  17.242  
  17.243 +  if (G1UseSurvivorSpace) {
  17.244 +    _survivors_age_table.clear();
  17.245 +  }
  17.246 +
  17.247    assert( verify_young_ages(), "region age verification" );
  17.248  }
  17.249  
  17.250 @@ -1965,9 +2012,6 @@
  17.251    // </NEW PREDICTION>
  17.252  
  17.253    _target_pause_time_ms = -1.0;
  17.254 -
  17.255 -  // TODO: calculate tenuring threshold
  17.256 -  _tenuring_threshold = MaxTenuringThreshold;
  17.257  }
  17.258  
  17.259  // <NEW PREDICTION>
  17.260 @@ -2058,7 +2102,7 @@
  17.261      guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
  17.262                 "invariant" );
  17.263      int age = hr->age_in_surv_rate_group();
  17.264 -    double yg_surv_rate = predict_yg_surv_rate(age);
  17.265 +    double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  17.266      bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  17.267    }
  17.268  
  17.269 @@ -2091,7 +2135,7 @@
  17.270    }
  17.271  #if PREDICTIONS_VERBOSE
  17.272    if (young) {
  17.273 -    _recorded_young_bytes += hr->asSpace()->used();
  17.274 +    _recorded_young_bytes += hr->used();
  17.275    } else {
  17.276      _recorded_marked_bytes += hr->max_live_bytes();
  17.277    }
  17.278 @@ -2119,11 +2163,6 @@
  17.279        predict_non_young_card_num(_predicted_rs_lengths);
  17.280    _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
  17.281  
  17.282 -  _predicted_young_survival_ratio = 0.0;
  17.283 -  for (int i = 0; i < _recorded_young_regions; ++i)
  17.284 -    _predicted_young_survival_ratio += predict_yg_surv_rate(i);
  17.285 -  _predicted_young_survival_ratio /= (double) _recorded_young_regions;
  17.286 -
  17.287    _predicted_scan_only_scan_time_ms =
  17.288      predict_scan_only_time_ms(_recorded_scan_only_regions);
  17.289    _predicted_rs_update_time_ms =
  17.290 @@ -2673,8 +2712,11 @@
  17.291    assert(in_young_gc_mode(), "should be in young GC mode");
  17.292    bool ret;
  17.293    size_t young_list_length = _g1->young_list_length();
  17.294 -
  17.295 -  if (young_list_length < _young_list_target_length) {
  17.296 +  size_t young_list_max_length = _young_list_target_length;
  17.297 +  if (G1FixedEdenSize) {
  17.298 +    young_list_max_length -= _max_survivor_regions;
  17.299 +  }
  17.300 +  if (young_list_length < young_list_max_length) {
  17.301      ret = true;
  17.302      ++_region_num_young;
  17.303    } else {
  17.304 @@ -2710,17 +2752,39 @@
  17.305  }
  17.306  
  17.307  
  17.308 -uint G1CollectorPolicy::max_regions(int purpose) {
  17.309 +size_t G1CollectorPolicy::max_regions(int purpose) {
  17.310    switch (purpose) {
  17.311      case GCAllocForSurvived:
  17.312 -      return G1MaxSurvivorRegions;
  17.313 +      return _max_survivor_regions;
  17.314      case GCAllocForTenured:
  17.315 -      return UINT_MAX;
  17.316 +      return REGIONS_UNLIMITED;
  17.317      default:
  17.318 -      return UINT_MAX;
  17.319 +      ShouldNotReachHere();
  17.320 +      return REGIONS_UNLIMITED;
  17.321    };
  17.322  }
  17.323  
  17.324 +// Calculates survivor space parameters.
  17.325 +void G1CollectorPolicy::calculate_survivors_policy()
  17.326 +{
  17.327 +  if (!G1UseSurvivorSpace) {
  17.328 +    return;
  17.329 +  }
  17.330 +  if (G1FixedSurvivorSpaceSize == 0) {
  17.331 +    _max_survivor_regions = _young_list_target_length / SurvivorRatio;
  17.332 +  } else {
  17.333 +    _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
  17.334 +  }
  17.335 +
  17.336 +  if (G1FixedTenuringThreshold) {
  17.337 +    _tenuring_threshold = MaxTenuringThreshold;
  17.338 +  } else {
  17.339 +    _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  17.340 +        HeapRegion::GrainWords * _max_survivor_regions);
  17.341 +  }
  17.342 +}
  17.343 +
  17.344 +
  17.345  void
  17.346  G1CollectorPolicy_BestRegionsFirst::
  17.347  set_single_region_collection_set(HeapRegion* hr) {
  17.348 @@ -2743,7 +2807,11 @@
  17.349    double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
  17.350  
  17.351    size_t young_list_length = _g1->young_list_length();
  17.352 -  bool reached_target_length = young_list_length >= _young_list_target_length;
  17.353 +  size_t young_list_max_length = _young_list_target_length;
  17.354 +  if (G1FixedEdenSize) {
  17.355 +    young_list_max_length -= _max_survivor_regions;
  17.356 +  }
  17.357 +  bool reached_target_length = young_list_length >= young_list_max_length;
  17.358  
  17.359    if (in_young_gc_mode()) {
  17.360      if (reached_target_length) {
    18.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Feb 12 14:00:38 2009 -0800
    18.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Feb 18 18:14:18 2009 -0800
    18.3 @@ -49,7 +49,7 @@
    18.4  class MainBodySummary;
    18.5  class PopPreambleSummary;
    18.6  
    18.7 -class PauseSummary {
    18.8 +class PauseSummary: public CHeapObj {
    18.9    define_num_seq(total)
   18.10      define_num_seq(other)
   18.11  
   18.12 @@ -58,7 +58,7 @@
   18.13    virtual PopPreambleSummary* pop_preamble_summary() { return NULL; }
   18.14  };
   18.15  
   18.16 -class MainBodySummary {
   18.17 +class MainBodySummary: public CHeapObj {
   18.18    define_num_seq(satb_drain) // optional
   18.19    define_num_seq(parallel) // parallel only
   18.20      define_num_seq(ext_root_scan)
   18.21 @@ -75,7 +75,7 @@
   18.22    define_num_seq(clear_ct)  // parallel only
   18.23  };
   18.24  
   18.25 -class PopPreambleSummary {
   18.26 +class PopPreambleSummary: public CHeapObj {
   18.27    define_num_seq(pop_preamble)
   18.28      define_num_seq(pop_update_rs)
   18.29      define_num_seq(pop_scan_rs)
   18.30 @@ -557,6 +557,8 @@
   18.31      return get_new_neg_prediction(_young_gc_eff_seq);
   18.32    }
   18.33  
   18.34 +  double predict_survivor_regions_evac_time();
   18.35 +
   18.36    // </NEW PREDICTION>
   18.37  
   18.38  public:
   18.39 @@ -599,8 +601,8 @@
   18.40  
   18.41    // Returns an estimate of the survival rate of the region at yg-age
   18.42    // "yg_age".
   18.43 -  double predict_yg_surv_rate(int age) {
   18.44 -    TruncatedSeq* seq = _short_lived_surv_rate_group->get_seq(age);
   18.45 +  double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
   18.46 +    TruncatedSeq* seq = surv_rate_group->get_seq(age);
   18.47      if (seq->num() == 0)
   18.48        gclog_or_tty->print("BARF! age is %d", age);
   18.49      guarantee( seq->num() > 0, "invariant" );
   18.50 @@ -610,6 +612,10 @@
   18.51      return pred;
   18.52    }
   18.53  
   18.54 +  double predict_yg_surv_rate(int age) {
   18.55 +    return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
   18.56 +  }
   18.57 +
   18.58    double accum_yg_surv_rate_pred(int age) {
   18.59      return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
   18.60    }
   18.61 @@ -822,6 +828,9 @@
   18.62  
   18.63    virtual void init();
   18.64  
   18.65 +  // Create jstat counters for the policy.
   18.66 +  virtual void initialize_gc_policy_counters();
   18.67 +
   18.68    virtual HeapWord* mem_allocate_work(size_t size,
   18.69                                        bool is_tlab,
   18.70                                        bool* gc_overhead_limit_was_exceeded);
   18.71 @@ -1047,8 +1056,12 @@
   18.72    // Print stats on young survival ratio
   18.73    void print_yg_surv_rate_info() const;
   18.74  
   18.75 -  void finished_recalculating_age_indexes() {
   18.76 -    _short_lived_surv_rate_group->finished_recalculating_age_indexes();
   18.77 +  void finished_recalculating_age_indexes(bool is_survivors) {
   18.78 +    if (is_survivors) {
   18.79 +      _survivor_surv_rate_group->finished_recalculating_age_indexes();
   18.80 +    } else {
   18.81 +      _short_lived_surv_rate_group->finished_recalculating_age_indexes();
   18.82 +    }
   18.83      // do that for any other surv rate groups
   18.84    }
   18.85  
   18.86 @@ -1097,6 +1110,17 @@
   18.87    // maximum amount of suvivors regions.
   18.88    int _tenuring_threshold;
   18.89  
   18.90 +  // The limit on the number of regions allocated for survivors.
   18.91 +  size_t _max_survivor_regions;
   18.92 +
   18.93 +  // The amount of survor regions after a collection.
   18.94 +  size_t _recorded_survivor_regions;
   18.95 +  // List of survivor regions.
   18.96 +  HeapRegion* _recorded_survivor_head;
   18.97 +  HeapRegion* _recorded_survivor_tail;
   18.98 +
   18.99 +  ageTable _survivors_age_table;
  18.100 +
  18.101  public:
  18.102  
  18.103    inline GCAllocPurpose
  18.104 @@ -1116,7 +1140,9 @@
  18.105      return GCAllocForTenured;
  18.106    }
  18.107  
  18.108 -  uint max_regions(int purpose);
  18.109 +  static const size_t REGIONS_UNLIMITED = ~(size_t)0;
  18.110 +
  18.111 +  size_t max_regions(int purpose);
  18.112  
  18.113    // The limit on regions for a particular purpose is reached.
  18.114    void note_alloc_region_limit_reached(int purpose) {
  18.115 @@ -1132,6 +1158,23 @@
  18.116    void note_stop_adding_survivor_regions() {
  18.117      _survivor_surv_rate_group->stop_adding_regions();
  18.118    }
  18.119 +
  18.120 +  void record_survivor_regions(size_t      regions,
  18.121 +                               HeapRegion* head,
  18.122 +                               HeapRegion* tail) {
  18.123 +    _recorded_survivor_regions = regions;
  18.124 +    _recorded_survivor_head    = head;
  18.125 +    _recorded_survivor_tail    = tail;
  18.126 +  }
  18.127 +
  18.128 +  void record_thread_age_table(ageTable* age_table)
  18.129 +  {
  18.130 +    _survivors_age_table.merge_par(age_table);
  18.131 +  }
  18.132 +
  18.133 +  // Calculates survivor space parameters.
  18.134 +  void calculate_survivors_policy();
  18.135 +
  18.136  };
  18.137  
  18.138  // This encapsulates a particular strategy for a g1 Collector.
    19.1 --- a/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Thu Feb 12 14:00:38 2009 -0800
    19.2 +++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Wed Feb 18 18:14:18 2009 -0800
    19.3 @@ -28,7 +28,7 @@
    19.4  /***** ALL TIMES ARE IN SECS!!!!!!! *****/
    19.5  
    19.6  // this is the "interface"
    19.7 -class G1MMUTracker {
    19.8 +class G1MMUTracker: public CHeapObj {
    19.9  protected:
   19.10    double          _time_slice;
   19.11    double          _max_gc_time; // this is per time slice
   19.12 @@ -67,7 +67,7 @@
   19.13    }
   19.14  };
   19.15  
   19.16 -class G1MMUTrackerQueueElem {
   19.17 +class G1MMUTrackerQueueElem VALUE_OBJ_CLASS_SPEC {
   19.18  private:
   19.19    double _start_time;
   19.20    double _end_time;
    20.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Feb 12 14:00:38 2009 -0800
    20.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Feb 18 18:14:18 2009 -0800
    20.3 @@ -572,6 +572,9 @@
    20.4    }
    20.5    guarantee( _cards_scanned == NULL, "invariant" );
    20.6    _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
    20.7 +  for (uint i = 0; i < n_workers(); ++i) {
    20.8 +    _cards_scanned[i] = 0;
    20.9 +  }
   20.10    _total_cards_scanned = 0;
   20.11  }
   20.12  
    21.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Feb 12 14:00:38 2009 -0800
    21.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Feb 18 18:14:18 2009 -0800
    21.3 @@ -30,7 +30,7 @@
    21.4  class HRInto_G1RemSet;
    21.5  class ConcurrentG1Refine;
    21.6  
    21.7 -class G1RemSet {
    21.8 +class G1RemSet: public CHeapObj {
    21.9  protected:
   21.10    G1CollectedHeap* _g1;
   21.11  
    22.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Feb 12 14:00:38 2009 -0800
    22.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Feb 18 18:14:18 2009 -0800
    22.3 @@ -281,7 +281,17 @@
    22.4    develop(bool, G1HRRSFlushLogBuffersOnVerify, false,                       \
    22.5            "Forces flushing of log buffers before verification.")            \
    22.6                                                                              \
    22.7 -  product(intx, G1MaxSurvivorRegions, 0,                                    \
    22.8 -          "The maximum number of survivor regions")
    22.9 +  product(bool, G1UseSurvivorSpace, true,                                   \
   22.10 +          "When true, use survivor space.")                                 \
   22.11 +                                                                            \
   22.12 +  product(bool, G1FixedTenuringThreshold, false,                            \
   22.13 +          "When set, G1 will not adjust the tenuring threshold")            \
   22.14 +                                                                            \
   22.15 +  product(bool, G1FixedEdenSize, false,                                     \
   22.16 +          "When set, G1 will not allocate unused survivor space regions")   \
   22.17 +                                                                            \
   22.18 +  product(uintx, G1FixedSurvivorSpaceSize, 0,                               \
   22.19 +          "If non-0 is the size of the G1 survivor space, "                 \
   22.20 +          "otherwise SurvivorRatio is used to determine the size")
   22.21  
   22.22  G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
    23.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Feb 12 14:00:38 2009 -0800
    23.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Feb 18 18:14:18 2009 -0800
    23.3 @@ -566,7 +566,11 @@
    23.4    void note_end_of_copying() {
    23.5      assert(top() >= _next_top_at_mark_start,
    23.6             "Increase only");
    23.7 -    _next_top_at_mark_start = top();
    23.8 +    // Survivor regions will be scanned on the start of concurrent
    23.9 +    // marking.
   23.10 +    if (!is_survivor()) {
   23.11 +      _next_top_at_mark_start = top();
   23.12 +    }
   23.13    }
   23.14  
   23.15    // Returns "false" iff no object in the region was allocated when the
   23.16 @@ -829,7 +833,7 @@
   23.17  
   23.18  // A linked lists of heap regions.  It leaves the "next" field
   23.19  // unspecified; that's up to subtypes.
   23.20 -class RegionList {
   23.21 +class RegionList VALUE_OBJ_CLASS_SPEC {
   23.22  protected:
   23.23    virtual HeapRegion* get_next(HeapRegion* chr) = 0;
   23.24    virtual void set_next(HeapRegion* chr,
    24.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Feb 12 14:00:38 2009 -0800
    24.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Feb 18 18:14:18 2009 -0800
    24.3 @@ -65,9 +65,11 @@
    24.4    // We need access in order to union things into the base table.
    24.5    BitMap* bm() { return &_bm; }
    24.6  
    24.7 +#if PRT_COUNT_OCCUPIED
    24.8    void recount_occupied() {
    24.9      _occupied = (jint) bm()->count_one_bits();
   24.10    }
   24.11 +#endif
   24.12  
   24.13    PerRegionTable(HeapRegion* hr) :
   24.14      _hr(hr),
   24.15 @@ -1144,7 +1146,9 @@
   24.16    size_t i = _outgoing_region_map.get_next_one_offset(0);
   24.17    while (i < _outgoing_region_map.size()) {
   24.18      HeapRegion* to_region = g1h->region_at(i);
   24.19 -    to_region->rem_set()->clear_incoming_entry(hr());
   24.20 +    if (!to_region->in_collection_set()) {
   24.21 +      to_region->rem_set()->clear_incoming_entry(hr());
   24.22 +    }
   24.23      i = _outgoing_region_map.get_next_one_offset(i+1);
   24.24    }
   24.25  }
    25.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Feb 12 14:00:38 2009 -0800
    25.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Feb 18 18:14:18 2009 -0800
    25.3 @@ -58,7 +58,7 @@
    25.4  //      is represented.  If a deleted PRT is re-used, a thread adding a bit,
    25.5  //      thinking the PRT is for a different region, does no harm.
    25.6  
    25.7 -class OtherRegionsTable: public CHeapObj {
    25.8 +class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
    25.9    friend class HeapRegionRemSetIterator;
   25.10  
   25.11    G1CollectedHeap* _g1h;
    26.1 --- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Thu Feb 12 14:00:38 2009 -0800
    26.2 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Wed Feb 18 18:14:18 2009 -0800
    26.3 @@ -29,7 +29,7 @@
    26.4  
    26.5  class PtrQueueSet;
    26.6  
    26.7 -class PtrQueue: public CHeapObj {
    26.8 +class PtrQueue VALUE_OBJ_CLASS_SPEC {
    26.9  
   26.10  protected:
   26.11    // The ptr queue set to which this queue belongs.
   26.12 @@ -130,7 +130,7 @@
   26.13  // In particular, the individual queues allocate buffers from this shared
   26.14  // set, and return completed buffers to the set.
   26.15  // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
   26.16 -class PtrQueueSet: public CHeapObj {
   26.17 +class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
   26.18  
   26.19  protected:
   26.20  
    27.1 --- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu Feb 12 14:00:38 2009 -0800
    27.2 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Wed Feb 18 18:14:18 2009 -0800
    27.3 @@ -33,7 +33,7 @@
    27.4  // old versions synchronously.
    27.5  
    27.6  
    27.7 -class SparsePRTEntry {
    27.8 +class SparsePRTEntry: public CHeapObj {
    27.9  public:
   27.10    enum SomePublicConstants {
   27.11      CardsPerEntry = (short)4,
   27.12 @@ -167,7 +167,7 @@
   27.13  };
   27.14  
   27.15    // ValueObj because will be embedded in HRRS iterator.
   27.16 -class RSHashTableIter: public CHeapObj {
   27.17 +class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
   27.18      short _tbl_ind;
   27.19      short _bl_ind;
   27.20      short _card_ind;
   27.21 @@ -213,7 +213,7 @@
   27.22  
   27.23  class SparsePRTIter;
   27.24  
   27.25 -class SparsePRT : public CHeapObj {
   27.26 +class SparsePRT VALUE_OBJ_CLASS_SPEC {
   27.27    //  Iterations are done on the _cur hash table, since they only need to
   27.28    //  see entries visible at the start of a collection pause.
   27.29    //  All other operations are done using the _next hash table.
    28.1 --- a/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Thu Feb 12 14:00:38 2009 -0800
    28.2 +++ b/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Wed Feb 18 18:14:18 2009 -0800
    28.3 @@ -29,23 +29,14 @@
    28.4                               const char* name,
    28.5                               size_t summary_surv_rates_len) :
    28.6      _g1p(g1p), _name(name),
    28.7 -    _all_regions_allocated(0),
    28.8 -    _curr_length(0), _scan_only_prefix(0), _setup_seq_num(0),
    28.9 -    _array_length(0), _surv_rate(NULL), _accum_surv_rate_pred(NULL),
   28.10 -    _accum_surv_rate(0.0), _surv_rate_pred(NULL), _last_pred(0.0),
   28.11      _summary_surv_rates_len(summary_surv_rates_len),
   28.12      _summary_surv_rates_max_len(0),
   28.13 -    _summary_surv_rates(NULL) {
   28.14 -
   28.15 -  // the following will set up the arrays with length 1
   28.16 -  _curr_length = 1;
   28.17 -  stop_adding_regions();
   28.18 -  guarantee( _array_length == 1, "invariant" );
   28.19 -  guarantee( _surv_rate_pred[0] != NULL, "invariant" );
   28.20 -  _surv_rate_pred[0]->add(0.4);
   28.21 -  all_surviving_words_recorded(false);
   28.22 -  _curr_length = 0;
   28.23 -
   28.24 +    _summary_surv_rates(NULL),
   28.25 +    _surv_rate(NULL),
   28.26 +    _accum_surv_rate_pred(NULL),
   28.27 +    _surv_rate_pred(NULL)
   28.28 +{
   28.29 +  reset();
   28.30    if (summary_surv_rates_len > 0) {
   28.31      size_t length = summary_surv_rates_len;
   28.32        _summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length);
   28.33 @@ -60,61 +51,80 @@
   28.34    start_adding_regions();
   28.35  }
   28.36  
   28.37 +
   28.38 +void SurvRateGroup::reset()
   28.39 +{
   28.40 +  _all_regions_allocated = 0;
   28.41 +  _scan_only_prefix      = 0;
   28.42 +  _setup_seq_num         = 0;
   28.43 +  _stats_arrays_length   = 0;
   28.44 +  _accum_surv_rate       = 0.0;
   28.45 +  _last_pred             = 0.0;
   28.46 +  // the following will set up the arrays with length 1
   28.47 +  _region_num            = 1;
   28.48 +  stop_adding_regions();
   28.49 +  guarantee( _stats_arrays_length == 1, "invariant" );
   28.50 +  guarantee( _surv_rate_pred[0] != NULL, "invariant" );
   28.51 +  _surv_rate_pred[0]->add(0.4);
   28.52 +  all_surviving_words_recorded(false);
   28.53 +  _region_num = 0;
   28.54 +}
   28.55 +
   28.56 +
   28.57  void
   28.58  SurvRateGroup::start_adding_regions() {
   28.59 -  _setup_seq_num   = _array_length;
   28.60 -  _curr_length     = _scan_only_prefix;
   28.61 +  _setup_seq_num   = _stats_arrays_length;
   28.62 +  _region_num      = _scan_only_prefix;
   28.63    _accum_surv_rate = 0.0;
   28.64  
   28.65  #if 0
   28.66 -  gclog_or_tty->print_cr("start adding regions, seq num %d, length %d",
   28.67 -                         _setup_seq_num, _curr_length);
   28.68 +  gclog_or_tty->print_cr("[%s] start adding regions, seq num %d, length %d",
   28.69 +                         _name, _setup_seq_num, _region_num);
   28.70  #endif // 0
   28.71  }
   28.72  
   28.73  void
   28.74  SurvRateGroup::stop_adding_regions() {
   28.75 -  size_t length = _curr_length;
   28.76  
   28.77  #if 0
   28.78 -  gclog_or_tty->print_cr("stop adding regions, length %d", length);
   28.79 +  gclog_or_tty->print_cr("[%s] stop adding regions, length %d", _name, _region_num);
   28.80  #endif // 0
   28.81  
   28.82 -  if (length > _array_length) {
   28.83 +  if (_region_num > _stats_arrays_length) {
   28.84      double* old_surv_rate = _surv_rate;
   28.85      double* old_accum_surv_rate_pred = _accum_surv_rate_pred;
   28.86      TruncatedSeq** old_surv_rate_pred = _surv_rate_pred;
   28.87  
   28.88 -    _surv_rate = NEW_C_HEAP_ARRAY(double, length);
   28.89 +    _surv_rate = NEW_C_HEAP_ARRAY(double, _region_num);
   28.90      if (_surv_rate == NULL) {
   28.91 -      vm_exit_out_of_memory(sizeof(double) * length,
   28.92 +      vm_exit_out_of_memory(sizeof(double) * _region_num,
   28.93                              "Not enough space for surv rate array.");
   28.94      }
   28.95 -    _accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, length);
   28.96 +    _accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, _region_num);
   28.97      if (_accum_surv_rate_pred == NULL) {
   28.98 -      vm_exit_out_of_memory(sizeof(double) * length,
   28.99 +      vm_exit_out_of_memory(sizeof(double) * _region_num,
  28.100                           "Not enough space for accum surv rate pred array.");
  28.101      }
  28.102 -    _surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, length);
  28.103 +    _surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, _region_num);
  28.104      if (_surv_rate == NULL) {
  28.105 -      vm_exit_out_of_memory(sizeof(TruncatedSeq*) * length,
  28.106 +      vm_exit_out_of_memory(sizeof(TruncatedSeq*) * _region_num,
  28.107                              "Not enough space for surv rate pred array.");
  28.108      }
  28.109  
  28.110 -    for (size_t i = 0; i < _array_length; ++i)
  28.111 +    for (size_t i = 0; i < _stats_arrays_length; ++i)
  28.112        _surv_rate_pred[i] = old_surv_rate_pred[i];
  28.113  
  28.114  #if 0
  28.115 -    gclog_or_tty->print_cr("stop adding regions, new seqs %d to %d",
  28.116 -                  _array_length, length - 1);
  28.117 +    gclog_or_tty->print_cr("[%s] stop adding regions, new seqs %d to %d",
  28.118 +                  _name, _array_length, _region_num - 1);
  28.119  #endif // 0
  28.120  
  28.121 -    for (size_t i = _array_length; i < length; ++i) {
  28.122 +    for (size_t i = _stats_arrays_length; i < _region_num; ++i) {
  28.123        _surv_rate_pred[i] = new TruncatedSeq(10);
  28.124        // _surv_rate_pred[i]->add(last_pred);
  28.125      }
  28.126  
  28.127 -    _array_length = length;
  28.128 +    _stats_arrays_length = _region_num;
  28.129  
  28.130      if (old_surv_rate != NULL)
  28.131        FREE_C_HEAP_ARRAY(double, old_surv_rate);
  28.132 @@ -124,7 +134,7 @@
  28.133        FREE_C_HEAP_ARRAY(NumberSeq*, old_surv_rate_pred);
  28.134    }
  28.135  
  28.136 -  for (size_t i = 0; i < _array_length; ++i)
  28.137 +  for (size_t i = 0; i < _stats_arrays_length; ++i)
  28.138      _surv_rate[i] = 0.0;
  28.139  }
  28.140  
  28.141 @@ -135,7 +145,7 @@
  28.142  
  28.143    double ret = _accum_surv_rate;
  28.144    if (adjustment > 0) {
  28.145 -    TruncatedSeq* seq = get_seq(_curr_length+1);
  28.146 +    TruncatedSeq* seq = get_seq(_region_num+1);
  28.147      double surv_rate = _g1p->get_new_prediction(seq);
  28.148      ret += surv_rate;
  28.149    }
  28.150 @@ -145,23 +155,23 @@
  28.151  
  28.152  int
  28.153  SurvRateGroup::next_age_index() {
  28.154 -  TruncatedSeq* seq = get_seq(_curr_length);
  28.155 +  TruncatedSeq* seq = get_seq(_region_num);
  28.156    double surv_rate = _g1p->get_new_prediction(seq);
  28.157    _accum_surv_rate += surv_rate;
  28.158  
  28.159 -  ++_curr_length;
  28.160 +  ++_region_num;
  28.161    return (int) ++_all_regions_allocated;
  28.162  }
  28.163  
  28.164  void
  28.165  SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) {
  28.166 -  guarantee( scan_only_prefix <= _curr_length, "pre-condition" );
  28.167 +  guarantee( scan_only_prefix <= _region_num, "pre-condition" );
  28.168    _scan_only_prefix = scan_only_prefix;
  28.169  }
  28.170  
  28.171  void
  28.172  SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
  28.173 -  guarantee( 0 <= age_in_group && (size_t) age_in_group < _curr_length,
  28.174 +  guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
  28.175               "pre-condition" );
  28.176    guarantee( _surv_rate[age_in_group] <= 0.00001,
  28.177               "should only update each slot once" );
  28.178 @@ -178,15 +188,15 @@
  28.179  
  28.180  void
  28.181  SurvRateGroup::all_surviving_words_recorded(bool propagate) {
  28.182 -  if (propagate && _curr_length > 0) { // conservative
  28.183 -    double surv_rate = _surv_rate_pred[_curr_length-1]->last();
  28.184 +  if (propagate && _region_num > 0) { // conservative
  28.185 +    double surv_rate = _surv_rate_pred[_region_num-1]->last();
  28.186  
  28.187  #if 0
  28.188      gclog_or_tty->print_cr("propagating %1.2lf from %d to %d",
  28.189                    surv_rate, _curr_length, _array_length - 1);
  28.190  #endif // 0
  28.191  
  28.192 -    for (size_t i = _curr_length; i < _array_length; ++i) {
  28.193 +    for (size_t i = _region_num; i < _stats_arrays_length; ++i) {
  28.194        guarantee( _surv_rate[i] <= 0.00001,
  28.195                   "the slot should not have been updated" );
  28.196        _surv_rate_pred[i]->add(surv_rate);
  28.197 @@ -195,7 +205,7 @@
  28.198  
  28.199    double accum = 0.0;
  28.200    double pred = 0.0;
  28.201 -  for (size_t i = 0; i < _array_length; ++i) {
  28.202 +  for (size_t i = 0; i < _stats_arrays_length; ++i) {
  28.203      pred = _g1p->get_new_prediction(_surv_rate_pred[i]);
  28.204      if (pred > 1.0) pred = 1.0;
  28.205      accum += pred;
  28.206 @@ -209,8 +219,8 @@
  28.207  void
  28.208  SurvRateGroup::print() {
  28.209    gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)",
  28.210 -                _name, _curr_length, _scan_only_prefix);
  28.211 -  for (size_t i = 0; i < _curr_length; ++i) {
  28.212 +                _name, _region_num, _scan_only_prefix);
  28.213 +  for (size_t i = 0; i < _region_num; ++i) {
  28.214      gclog_or_tty->print_cr("    age %4d   surv rate %6.2lf %%   pred %6.2lf %%%s",
  28.215                    i, _surv_rate[i] * 100.0,
  28.216                    _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0,
    29.1 --- a/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Thu Feb 12 14:00:38 2009 -0800
    29.2 +++ b/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Wed Feb 18 18:14:18 2009 -0800
    29.3 @@ -29,7 +29,7 @@
    29.4    G1CollectorPolicy* _g1p;
    29.5    const char* _name;
    29.6  
    29.7 -  size_t  _array_length;
    29.8 +  size_t  _stats_arrays_length;
    29.9    double* _surv_rate;
   29.10    double* _accum_surv_rate_pred;
   29.11    double  _last_pred;
   29.12 @@ -40,7 +40,7 @@
   29.13    size_t         _summary_surv_rates_max_len;
   29.14  
   29.15    int _all_regions_allocated;
   29.16 -  size_t _curr_length;
   29.17 +  size_t _region_num;
   29.18    size_t _scan_only_prefix;
   29.19    size_t _setup_seq_num;
   29.20  
   29.21 @@ -48,6 +48,7 @@
   29.22    SurvRateGroup(G1CollectorPolicy* g1p,
   29.23                  const char* name,
   29.24                  size_t summary_surv_rates_len);
   29.25 +  void reset();
   29.26    void start_adding_regions();
   29.27    void stop_adding_regions();
   29.28    void record_scan_only_prefix(size_t scan_only_prefix);
   29.29 @@ -55,22 +56,21 @@
   29.30    void all_surviving_words_recorded(bool propagate);
   29.31    const char* name() { return _name; }
   29.32  
   29.33 -  size_t region_num() { return _curr_length; }
   29.34 +  size_t region_num() { return _region_num; }
   29.35    size_t scan_only_length() { return _scan_only_prefix; }
   29.36    double accum_surv_rate_pred(int age) {
   29.37      assert(age >= 0, "must be");
   29.38 -    if ((size_t)age < _array_length)
   29.39 +    if ((size_t)age < _stats_arrays_length)
   29.40        return _accum_surv_rate_pred[age];
   29.41      else {
   29.42 -      double diff = (double) (age - _array_length + 1);
   29.43 -      return _accum_surv_rate_pred[_array_length-1] + diff * _last_pred;
   29.44 +      double diff = (double) (age - _stats_arrays_length + 1);
   29.45 +      return _accum_surv_rate_pred[_stats_arrays_length-1] + diff * _last_pred;
   29.46      }
   29.47    }
   29.48  
   29.49    double accum_surv_rate(size_t adjustment);
   29.50  
   29.51    TruncatedSeq* get_seq(size_t age) {
   29.52 -    guarantee( 0 <= age, "pre-condition" );
   29.53      if (age >= _setup_seq_num) {
   29.54        guarantee( _setup_seq_num > 0, "invariant" );
   29.55        age = _setup_seq_num-1;
    30.1 --- a/src/share/vm/gc_implementation/includeDB_gc_g1	Thu Feb 12 14:00:38 2009 -0800
    30.2 +++ b/src/share/vm/gc_implementation/includeDB_gc_g1	Wed Feb 18 18:14:18 2009 -0800
    30.3 @@ -48,6 +48,7 @@
    30.4  concurrentG1Refine.cpp			space.inline.hpp
    30.5  
    30.6  concurrentG1Refine.hpp			globalDefinitions.hpp
    30.7 +concurrentG1Refine.hpp			allocation.hpp
    30.8  
    30.9  concurrentG1RefineThread.cpp		concurrentG1Refine.hpp
   30.10  concurrentG1RefineThread.cpp		concurrentG1RefineThread.hpp
   30.11 @@ -172,6 +173,7 @@
   30.12  g1CollectorPolicy.cpp                   g1CollectorPolicy.hpp
   30.13  g1CollectorPolicy.cpp                   heapRegionRemSet.hpp
   30.14  g1CollectorPolicy.cpp			mutexLocker.hpp
   30.15 +g1CollectorPolicy.cpp			gcPolicyCounters.hpp
   30.16  
   30.17  g1CollectorPolicy.hpp                   collectorPolicy.hpp
   30.18  g1CollectorPolicy.hpp                   collectionSetChooser.hpp
   30.19 @@ -228,7 +230,7 @@
   30.20  g1MMUTracker.cpp			mutexLocker.hpp
   30.21  
   30.22  g1MMUTracker.hpp			debug.hpp
   30.23 -
   30.24 +g1MMUTracker.hpp			allocation.hpp
   30.25  g1RemSet.cpp				bufferingOopClosure.hpp
   30.26  g1RemSet.cpp				concurrentG1Refine.hpp
   30.27  g1RemSet.cpp				concurrentG1RefineThread.hpp
   30.28 @@ -272,6 +274,7 @@
   30.29  heapRegion.hpp                          watermark.hpp
   30.30  heapRegion.hpp				g1_specialized_oop_closures.hpp
   30.31  heapRegion.hpp				survRateGroup.hpp
   30.32 +heapRegion.hpp				ageTable.hpp
   30.33  
   30.34  heapRegionRemSet.hpp			sparsePRT.hpp
   30.35  
    31.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Feb 12 14:00:38 2009 -0800
    31.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Feb 18 18:14:18 2009 -0800
    31.3 @@ -362,6 +362,10 @@
    31.4    if (PrintHeapAtGC) {
    31.5      Universe::print_heap_after_gc();
    31.6    }
    31.7 +
    31.8 +#ifdef TRACESPINNING
    31.9 +  ParallelTaskTerminator::print_termination_counts();
   31.10 +#endif
   31.11  }
   31.12  
   31.13  bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
    32.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Feb 12 14:00:38 2009 -0800
    32.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Feb 18 18:14:18 2009 -0800
    32.3 @@ -2203,6 +2203,10 @@
    32.4                             collection_exit.ticks());
    32.5      gc_task_manager()->print_task_time_stamps();
    32.6    }
    32.7 +
    32.8 +#ifdef TRACESPINNING
    32.9 +  ParallelTaskTerminator::print_termination_counts();
   32.10 +#endif
   32.11  }
   32.12  
   32.13  bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
    33.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu Feb 12 14:00:38 2009 -0800
    33.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Feb 18 18:14:18 2009 -0800
    33.3 @@ -615,6 +615,10 @@
    33.4      gc_task_manager()->print_task_time_stamps();
    33.5    }
    33.6  
    33.7 +#ifdef TRACESPINNING
    33.8 +  ParallelTaskTerminator::print_termination_counts();
    33.9 +#endif
   33.10 +
   33.11    return !promotion_failure_occurred;
   33.12  }
   33.13  
    34.1 --- a/src/share/vm/gc_implementation/shared/ageTable.cpp	Thu Feb 12 14:00:38 2009 -0800
    34.2 +++ b/src/share/vm/gc_implementation/shared/ageTable.cpp	Wed Feb 18 18:14:18 2009 -0800
    34.3 @@ -67,6 +67,12 @@
    34.4    }
    34.5  }
    34.6  
    34.7 +void ageTable::merge_par(ageTable* subTable) {
    34.8 +  for (int i = 0; i < table_size; i++) {
    34.9 +    Atomic::add_ptr(subTable->sizes[i], &sizes[i]);
   34.10 +  }
   34.11 +}
   34.12 +
   34.13  int ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
   34.14    size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
   34.15    size_t total = 0;
    35.1 --- a/src/share/vm/gc_implementation/shared/ageTable.hpp	Thu Feb 12 14:00:38 2009 -0800
    35.2 +++ b/src/share/vm/gc_implementation/shared/ageTable.hpp	Wed Feb 18 18:14:18 2009 -0800
    35.3 @@ -56,6 +56,7 @@
    35.4    // Merge another age table with the current one.  Used
    35.5    // for parallel young generation gc.
    35.6    void merge(ageTable* subTable);
    35.7 +  void merge_par(ageTable* subTable);
    35.8  
    35.9    // calculate new tenuring threshold based on age information
   35.10    int compute_tenuring_threshold(size_t survivor_capacity);
    36.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp	Thu Feb 12 14:00:38 2009 -0800
    36.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp	Wed Feb 18 18:14:18 2009 -0800
    36.3 @@ -42,6 +42,7 @@
    36.4  class CollectedHeap : public CHeapObj {
    36.5    friend class VMStructs;
    36.6    friend class IsGCActiveMark; // Block structured external access to _is_gc_active
    36.7 +  friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe
    36.8  
    36.9  #ifdef ASSERT
   36.10    static int       _fire_out_of_memory_count;
   36.11 @@ -82,8 +83,6 @@
   36.12    // Reinitialize tlabs before resuming mutators.
   36.13    virtual void resize_all_tlabs();
   36.14  
   36.15 -  debug_only(static void check_for_valid_allocation_state();)
   36.16 -
   36.17   protected:
   36.18    // Allocate from the current thread's TLAB, with broken-out slow path.
   36.19    inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
   36.20 @@ -142,6 +141,7 @@
   36.21      PRODUCT_RETURN;
   36.22    virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
   36.23      PRODUCT_RETURN;
   36.24 +  debug_only(static void check_for_valid_allocation_state();)
   36.25  
   36.26   public:
   36.27    enum Name {
    37.1 --- a/src/share/vm/interpreter/rewriter.cpp	Thu Feb 12 14:00:38 2009 -0800
    37.2 +++ b/src/share/vm/interpreter/rewriter.cpp	Wed Feb 18 18:14:18 2009 -0800
    37.3 @@ -48,9 +48,14 @@
    37.4  
    37.5  
    37.6  // Creates a constant pool cache given an inverse_index_map
    37.7 +// This creates the constant pool cache initially in a state
    37.8 +// that is unsafe for concurrent GC processing but sets it to
    37.9 +// a safe mode before the constant pool cache is returned.
   37.10  constantPoolCacheHandle Rewriter::new_constant_pool_cache(intArray& inverse_index_map, TRAPS) {
   37.11    const int length = inverse_index_map.length();
   37.12 -  constantPoolCacheOop cache = oopFactory::new_constantPoolCache(length, CHECK_(constantPoolCacheHandle()));
   37.13 +  constantPoolCacheOop cache = oopFactory::new_constantPoolCache(length,
   37.14 +                                             methodOopDesc::IsUnsafeConc,
   37.15 +                                             CHECK_(constantPoolCacheHandle()));
   37.16    cache->initialize(inverse_index_map);
   37.17    return constantPoolCacheHandle(THREAD, cache);
   37.18  }
    38.1 --- a/src/share/vm/libadt/dict.cpp	Thu Feb 12 14:00:38 2009 -0800
    38.2 +++ b/src/share/vm/libadt/dict.cpp	Wed Feb 18 18:14:18 2009 -0800
    38.3 @@ -346,9 +346,12 @@
    38.4    return strcmp((const char *)k1,(const char *)k2);
    38.5  }
    38.6  
    38.7 -// Slimey cheap key comparator.
    38.8 +// Cheap key comparator.
    38.9  int32 cmpkey(const void *key1, const void *key2) {
   38.10 -  return (int32)((intptr_t)key1 - (intptr_t)key2);
   38.11 +  if (key1 == key2) return 0;
   38.12 +  intptr_t delta = (intptr_t)key1 - (intptr_t)key2;
   38.13 +  if (delta > 0) return 1;
   38.14 +  return -1;
   38.15  }
   38.16  
   38.17  //=============================================================================
    39.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Thu Feb 12 14:00:38 2009 -0800
    39.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed Feb 18 18:14:18 2009 -0800
    39.3 @@ -610,6 +610,10 @@
    39.4      Universe::print_heap_after_gc();
    39.5    }
    39.6  
    39.7 +#ifdef TRACESPINNING
    39.8 +  ParallelTaskTerminator::print_termination_counts();
    39.9 +#endif
   39.10 +
   39.11    if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
   39.12      tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
   39.13      vm_exit(-1);
    40.1 --- a/src/share/vm/memory/oopFactory.cpp	Thu Feb 12 14:00:38 2009 -0800
    40.2 +++ b/src/share/vm/memory/oopFactory.cpp	Wed Feb 18 18:14:18 2009 -0800
    40.3 @@ -90,9 +90,11 @@
    40.4  }
    40.5  
    40.6  
    40.7 -constantPoolCacheOop oopFactory::new_constantPoolCache(int length, TRAPS) {
    40.8 +constantPoolCacheOop oopFactory::new_constantPoolCache(int length,
    40.9 +                                                       bool is_conc_safe,
   40.10 +                                                       TRAPS) {
   40.11    constantPoolCacheKlass* ck = constantPoolCacheKlass::cast(Universe::constantPoolCacheKlassObj());
   40.12 -  return ck->allocate(length, CHECK_NULL);
   40.13 +  return ck->allocate(length, is_conc_safe, CHECK_NULL);
   40.14  }
   40.15  
   40.16  
    41.1 --- a/src/share/vm/memory/oopFactory.hpp	Thu Feb 12 14:00:38 2009 -0800
    41.2 +++ b/src/share/vm/memory/oopFactory.hpp	Wed Feb 18 18:14:18 2009 -0800
    41.3 @@ -84,7 +84,9 @@
    41.4    static constantPoolOop      new_constantPool     (int length,
    41.5                                                      bool is_conc_safe,
    41.6                                                      TRAPS);
    41.7 -  static constantPoolCacheOop new_constantPoolCache(int length, TRAPS);
    41.8 +  static constantPoolCacheOop new_constantPoolCache(int length,
    41.9 +                                                    bool is_conc_safe,
   41.10 +                                                    TRAPS);
   41.11  
   41.12    // Instance classes
   41.13    static klassOop        new_instanceKlass(int vtable_len, int itable_len, int static_field_size,
    42.1 --- a/src/share/vm/oops/cpCacheKlass.cpp	Thu Feb 12 14:00:38 2009 -0800
    42.2 +++ b/src/share/vm/oops/cpCacheKlass.cpp	Wed Feb 18 18:14:18 2009 -0800
    42.3 @@ -32,13 +32,43 @@
    42.4  }
    42.5  
    42.6  
    42.7 -constantPoolCacheOop constantPoolCacheKlass::allocate(int length, TRAPS) {
    42.8 +constantPoolCacheOop constantPoolCacheKlass::allocate(int length,
    42.9 +                                                      bool is_conc_safe,
   42.10 +                                                      TRAPS) {
   42.11    // allocate memory
   42.12    int size = constantPoolCacheOopDesc::object_size(length);
   42.13 +
   42.14    KlassHandle klass (THREAD, as_klassOop());
   42.15 -  constantPoolCacheOop cache = (constantPoolCacheOop)
   42.16 -    CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
   42.17 +
   42.18 +  // This is the original code.  The code from permanent_obj_allocate()
   42.19 +  // was in-lined to allow the setting of is_conc_safe before the klass
   42.20 +  // is installed.
   42.21 +  // constantPoolCacheOop cache = (constantPoolCacheOop)
   42.22 +  //   CollectedHeap::permanent_obj_allocate(klass, size, CHECK_NULL);
   42.23 +
   42.24 +  oop obj = CollectedHeap::permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL);
   42.25 +  constantPoolCacheOop cache = (constantPoolCacheOop) obj;
   42.26 +  cache->set_is_conc_safe(is_conc_safe);
   42.27 +  // The store to is_conc_safe must be visible before the klass
   42.28 +  // is set.  This should be done safely because _is_conc_safe has
   42.29 +  // been declared volatile.  If there are any problems, consider adding
   42.30 +  // OrderAccess::storestore();
   42.31 +  CollectedHeap::post_allocation_install_obj_klass(klass, obj, size);
   42.32 +  NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj,
   42.33 +                                                              size));
   42.34 +
   42.35 +  // The length field affects the size of the object.  The allocation
   42.36 +  // above allocates the correct size (see calculation of "size") but
   42.37 +  // the size() method of the constant pool cache oop will not reflect
   42.38 +  // that size until the correct length is set.
   42.39    cache->set_length(length);
   42.40 +
   42.41 +  // The store of the length must be visible before is_conc_safe is
   42.42 +  // set to a safe state.
   42.43 +  // This should be done safely because _is_conc_safe has
   42.44 +  // been declared volatile.  If there are any problems, consider adding
   42.45 +  // OrderAccess::storestore();
   42.46 +  cache->set_is_conc_safe(methodOopDesc::IsSafeConc);
   42.47    cache->set_constant_pool(NULL);
   42.48    return cache;
   42.49  }
   42.50 @@ -114,7 +144,6 @@
   42.51    return size;
   42.52  }
   42.53  
   42.54 -
   42.55  int constantPoolCacheKlass::oop_adjust_pointers(oop obj) {
   42.56    assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
   42.57    constantPoolCacheOop cache = (constantPoolCacheOop)obj;
   42.58 @@ -131,6 +160,11 @@
   42.59    return size;
   42.60  }
   42.61  
   42.62 +bool constantPoolCacheKlass::oop_is_conc_safe(oop obj) const {
   42.63 +  assert(obj->is_constantPoolCache(), "should be constant pool");
   42.64 +  return constantPoolCacheOop(obj)->is_conc_safe();
   42.65 +}
   42.66 +
   42.67  #ifndef SERIALGC
   42.68  void constantPoolCacheKlass::oop_copy_contents(PSPromotionManager* pm,
   42.69                                                 oop obj) {
    43.1 --- a/src/share/vm/oops/cpCacheKlass.hpp	Thu Feb 12 14:00:38 2009 -0800
    43.2 +++ b/src/share/vm/oops/cpCacheKlass.hpp	Wed Feb 18 18:14:18 2009 -0800
    43.3 @@ -32,7 +32,7 @@
    43.4  
    43.5    // Allocation
    43.6    DEFINE_ALLOCATE_PERMANENT(constantPoolCacheKlass);
    43.7 -  constantPoolCacheOop allocate(int length, TRAPS);
    43.8 +  constantPoolCacheOop allocate(int length, bool is_conc_safe, TRAPS);
    43.9    static klassOop create_klass(TRAPS);
   43.10  
   43.11    // Casting from klassOop
   43.12 @@ -48,6 +48,7 @@
   43.13    // Garbage collection
   43.14    void oop_follow_contents(oop obj);
   43.15    int oop_adjust_pointers(oop obj);
   43.16 +  virtual bool oop_is_conc_safe(oop obj) const;
   43.17  
   43.18    // Parallel Scavenge and Parallel Old
   43.19    PARALLEL_GC_DECLS
    44.1 --- a/src/share/vm/oops/cpCacheOop.hpp	Thu Feb 12 14:00:38 2009 -0800
    44.2 +++ b/src/share/vm/oops/cpCacheOop.hpp	Wed Feb 18 18:14:18 2009 -0800
    44.3 @@ -291,6 +291,9 @@
    44.4   private:
    44.5    int             _length;
    44.6    constantPoolOop _constant_pool;                // the corresponding constant pool
    44.7 +  // If true, safe for concurrent GC processing,
    44.8 +  // Set unconditionally in constantPoolCacheKlass::allocate()
    44.9 +  volatile bool        _is_conc_safe;
   44.10  
   44.11    // Sizing
   44.12    debug_only(friend class ClassVerifier;)
   44.13 @@ -316,6 +319,12 @@
   44.14    constantPoolOop constant_pool() const          { return _constant_pool; }
   44.15    ConstantPoolCacheEntry* entry_at(int i) const  { assert(0 <= i && i < length(), "index out of bounds"); return base() + i; }
   44.16  
   44.17 +  // GC support
   44.18 +  // If the _length field has not been set, the size of the
   44.19 +  // constantPoolCache cannot be correctly calculated.
   44.20 +  bool is_conc_safe()                            { return _is_conc_safe; }
   44.21 +  void set_is_conc_safe(bool v)                  { _is_conc_safe = v; }
   44.22 +
   44.23    // Code generation
   44.24    static ByteSize base_offset()                  { return in_ByteSize(sizeof(constantPoolCacheOopDesc)); }
   44.25  
    45.1 --- a/src/share/vm/opto/block.cpp	Thu Feb 12 14:00:38 2009 -0800
    45.2 +++ b/src/share/vm/opto/block.cpp	Wed Feb 18 18:14:18 2009 -0800
    45.3 @@ -880,6 +880,7 @@
    45.4  }
    45.5  
    45.6  void PhaseCFG::verify( ) const {
    45.7 +#ifdef ASSERT
    45.8    // Verify sane CFG
    45.9    for( uint i = 0; i < _num_blocks; i++ ) {
   45.10      Block *b = _blocks[i];
   45.11 @@ -894,10 +895,20 @@
   45.12                  "CreateEx must be first instruction in block" );
   45.13        }
   45.14        for( uint k = 0; k < n->req(); k++ ) {
   45.15 -        Node *use = n->in(k);
   45.16 -        if( use && use != n ) {
   45.17 -          assert( _bbs[use->_idx] || use->is_Con(),
   45.18 +        Node *def = n->in(k);
   45.19 +        if( def && def != n ) {
   45.20 +          assert( _bbs[def->_idx] || def->is_Con(),
   45.21                    "must have block; constants for debug info ok" );
   45.22 +          // Verify that instructions in the block is in correct order.
   45.23 +          // Uses must follow their definition if they are at the same block.
   45.24 +          // Mostly done to check that MachSpillCopy nodes are placed correctly
   45.25 +          // when CreateEx node is moved in build_ifg_physical().
   45.26 +          if( _bbs[def->_idx] == b &&
   45.27 +              !(b->head()->is_Loop() && n->is_Phi()) &&
   45.28 +              // See (+++) comment in reg_split.cpp
   45.29 +              !(n->jvms() != NULL && n->jvms()->is_monitor_use(k)) ) {
   45.30 +            assert( b->find_node(def) < j, "uses must follow definitions" );
   45.31 +          }
   45.32          }
   45.33        }
   45.34      }
   45.35 @@ -914,6 +925,7 @@
   45.36        assert( b->_num_succs == 2, "Conditional branch must have two targets");
   45.37      }
   45.38    }
   45.39 +#endif
   45.40  }
   45.41  #endif
   45.42  
    46.1 --- a/src/share/vm/opto/c2_globals.hpp	Thu Feb 12 14:00:38 2009 -0800
    46.2 +++ b/src/share/vm/opto/c2_globals.hpp	Wed Feb 18 18:14:18 2009 -0800
    46.3 @@ -191,6 +191,9 @@
    46.4    notproduct(bool, VerifyHashTableKeys, true,                               \
    46.5            "Verify the immutability of keys in the VN hash tables")          \
    46.6                                                                              \
    46.7 +  notproduct(bool, VerifyRegisterAllocator , false,                         \
    46.8 +          "Verify Register Allocator")                                      \
    46.9 +                                                                            \
   46.10    develop_pd(intx, FLOATPRESSURE,                                           \
   46.11            "Number of float LRG's that constitute high register pressure")   \
   46.12                                                                              \
    47.1 --- a/src/share/vm/opto/cfgnode.cpp	Thu Feb 12 14:00:38 2009 -0800
    47.2 +++ b/src/share/vm/opto/cfgnode.cpp	Wed Feb 18 18:14:18 2009 -0800
    47.3 @@ -858,12 +858,18 @@
    47.4    // convert the one to the other.
    47.5    const TypePtr* ttp = _type->make_ptr();
    47.6    const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL;
    47.7 +  const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_klassptr() : NULL;
    47.8    bool is_intf = false;
    47.9    if (ttip != NULL) {
   47.10      ciKlass* k = ttip->klass();
   47.11      if (k->is_loaded() && k->is_interface())
   47.12        is_intf = true;
   47.13    }
   47.14 +  if (ttkp != NULL) {
   47.15 +    ciKlass* k = ttkp->klass();
   47.16 +    if (k->is_loaded() && k->is_interface())
   47.17 +      is_intf = true;
   47.18 +  }
   47.19  
   47.20    // Default case: merge all inputs
   47.21    const Type *t = Type::TOP;        // Merged type starting value
   47.22 @@ -921,6 +927,8 @@
   47.23      // uplift the type.
   47.24      if( !t->empty() && ttip && ttip->is_loaded() && ttip->klass()->is_interface() )
   47.25        { assert(ft == _type, ""); } // Uplift to interface
   47.26 +    else if( !t->empty() && ttkp && ttkp->is_loaded() && ttkp->klass()->is_interface() )
   47.27 +      { assert(ft == _type, ""); } // Uplift to interface
   47.28      // Otherwise it's something stupid like non-overlapping int ranges
   47.29      // found on dying counted loops.
   47.30      else
   47.31 @@ -936,6 +944,7 @@
   47.32      // because the type system doesn't interact well with interfaces.
   47.33      const TypePtr *jtp = jt->make_ptr();
   47.34      const TypeInstPtr *jtip = (jtp != NULL) ? jtp->isa_instptr() : NULL;
   47.35 +    const TypeKlassPtr *jtkp = (jtp != NULL) ? jtp->isa_klassptr() : NULL;
   47.36      if( jtip && ttip ) {
   47.37        if( jtip->is_loaded() &&  jtip->klass()->is_interface() &&
   47.38            ttip->is_loaded() && !ttip->klass()->is_interface() ) {
   47.39 @@ -945,6 +954,14 @@
   47.40          jt = ft;
   47.41        }
   47.42      }
   47.43 +    if( jtkp && ttkp ) {
   47.44 +      if( jtkp->is_loaded() &&  jtkp->klass()->is_interface() &&
   47.45 +          ttkp->is_loaded() && !ttkp->klass()->is_interface() ) {
   47.46 +        assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) ||
   47.47 +               ft->isa_narrowoop() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), "");
   47.48 +        jt = ft;
   47.49 +      }
   47.50 +    }
   47.51      if (jt != ft && jt->base() == ft->base()) {
   47.52        if (jt->isa_int() &&
   47.53            jt->is_int()->_lo == ft->is_int()->_lo &&
    48.1 --- a/src/share/vm/opto/chaitin.cpp	Thu Feb 12 14:00:38 2009 -0800
    48.2 +++ b/src/share/vm/opto/chaitin.cpp	Wed Feb 18 18:14:18 2009 -0800
    48.3 @@ -228,6 +228,11 @@
    48.4    // them for real.
    48.5    de_ssa();
    48.6  
    48.7 +#ifdef ASSERT
    48.8 +  // Veify the graph before RA.
    48.9 +  verify(&live_arena);
   48.10 +#endif
   48.11 +
   48.12    {
   48.13      NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
   48.14      _live = NULL;                 // Mark live as being not available
   48.15 @@ -306,12 +311,6 @@
   48.16      C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
   48.17      if (C->failing())  return;
   48.18  
   48.19 -#ifdef ASSERT
   48.20 -    if( VerifyOpto ) {
   48.21 -      _cfg.verify();
   48.22 -      verify_base_ptrs(&live_arena);
   48.23 -    }
   48.24 -#endif
   48.25      NOT_PRODUCT( C->verify_graph_edges(); )
   48.26  
   48.27      compact();                  // Compact LRGs; return new lower max lrg
   48.28 @@ -340,7 +339,7 @@
   48.29      compress_uf_map_for_nodes();
   48.30  
   48.31  #ifdef ASSERT
   48.32 -    if( VerifyOpto ) _ifg->verify(this);
   48.33 +    verify(&live_arena, true);
   48.34  #endif
   48.35    } else {
   48.36      ifg.SquareUp();
   48.37 @@ -376,12 +375,6 @@
   48.38      // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
   48.39      C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after split");
   48.40      if (C->failing())  return;
   48.41 -#ifdef ASSERT
   48.42 -    if( VerifyOpto ) {
   48.43 -      _cfg.verify();
   48.44 -      verify_base_ptrs(&live_arena);
   48.45 -    }
   48.46 -#endif
   48.47  
   48.48      compact();                  // Compact LRGs; return new lower max lrg
   48.49  
   48.50 @@ -412,7 +405,7 @@
   48.51      }
   48.52      compress_uf_map_for_nodes();
   48.53  #ifdef ASSERT
   48.54 -    if( VerifyOpto ) _ifg->verify(this);
   48.55 +    verify(&live_arena, true);
   48.56  #endif
   48.57      cache_lrg_info();           // Count degree of LRGs
   48.58  
   48.59 @@ -432,6 +425,11 @@
   48.60    // Peephole remove copies
   48.61    post_allocate_copy_removal();
   48.62  
   48.63 +#ifdef ASSERT
   48.64 +  // Veify the graph after RA.
   48.65 +  verify(&live_arena);
   48.66 +#endif
   48.67 +
   48.68    // max_reg is past the largest *register* used.
   48.69    // Convert that to a frame_slot number.
   48.70    if( _max_reg <= _matcher._new_SP )
   48.71 @@ -956,7 +954,7 @@
   48.72        while ((neighbor = elements.next()) != 0) {
   48.73          LRG *n = &lrgs(neighbor);
   48.74  #ifdef ASSERT
   48.75 -        if( VerifyOpto ) {
   48.76 +        if( VerifyOpto || VerifyRegisterAllocator ) {
   48.77            assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
   48.78          }
   48.79  #endif
    49.1 --- a/src/share/vm/opto/chaitin.hpp	Thu Feb 12 14:00:38 2009 -0800
    49.2 +++ b/src/share/vm/opto/chaitin.hpp	Wed Feb 18 18:14:18 2009 -0800
    49.3 @@ -491,6 +491,8 @@
    49.4    // Verify that base pointers and derived pointers are still sane
    49.5    void verify_base_ptrs( ResourceArea *a ) const;
    49.6  
    49.7 +  void verify( ResourceArea *a, bool verify_ifg = false ) const;
    49.8 +
    49.9    void dump_for_spill_split_recycle() const;
   49.10  
   49.11  public:
    50.1 --- a/src/share/vm/opto/classes.hpp	Thu Feb 12 14:00:38 2009 -0800
    50.2 +++ b/src/share/vm/opto/classes.hpp	Wed Feb 18 18:14:18 2009 -0800
    50.3 @@ -129,7 +129,7 @@
    50.4  macro(LShiftI)
    50.5  macro(LShiftL)
    50.6  macro(LoadB)
    50.7 -macro(LoadC)
    50.8 +macro(LoadUS)
    50.9  macro(LoadD)
   50.10  macro(LoadD_unaligned)
   50.11  macro(LoadF)
    51.1 --- a/src/share/vm/opto/compile.cpp	Thu Feb 12 14:00:38 2009 -0800
    51.2 +++ b/src/share/vm/opto/compile.cpp	Wed Feb 18 18:14:18 2009 -0800
    51.3 @@ -2005,7 +2005,7 @@
    51.4    case Op_StoreP:
    51.5    case Op_StoreN:
    51.6    case Op_LoadB:
    51.7 -  case Op_LoadC:
    51.8 +  case Op_LoadUS:
    51.9    case Op_LoadI:
   51.10    case Op_LoadKlass:
   51.11    case Op_LoadNKlass:
    52.1 --- a/src/share/vm/opto/divnode.cpp	Thu Feb 12 14:00:38 2009 -0800
    52.2 +++ b/src/share/vm/opto/divnode.cpp	Wed Feb 18 18:14:18 2009 -0800
    52.3 @@ -1,5 +1,5 @@
    52.4  /*
    52.5 - * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
    52.6 + * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    52.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    52.8   *
    52.9   * This code is free software; you can redistribute it and/or modify it
   52.10 @@ -244,42 +244,73 @@
   52.11  
   52.12  //---------------------long_by_long_mulhi--------------------------------------
   52.13  // Generate ideal node graph for upper half of a 64 bit x 64 bit multiplication
   52.14 -static Node *long_by_long_mulhi( PhaseGVN *phase, Node *dividend, jlong magic_const) {
   52.15 +static Node* long_by_long_mulhi(PhaseGVN* phase, Node* dividend, jlong magic_const) {
   52.16    // If the architecture supports a 64x64 mulhi, there is
   52.17    // no need to synthesize it in ideal nodes.
   52.18    if (Matcher::has_match_rule(Op_MulHiL)) {
   52.19 -    Node *v = phase->longcon(magic_const);
   52.20 +    Node* v = phase->longcon(magic_const);
   52.21      return new (phase->C, 3) MulHiLNode(dividend, v);
   52.22    }
   52.23  
   52.24 +  // Taken from Hacker's Delight, Fig. 8-2. Multiply high signed.
   52.25 +  // (http://www.hackersdelight.org/HDcode/mulhs.c)
   52.26 +  //
   52.27 +  // int mulhs(int u, int v) {
   52.28 +  //    unsigned u0, v0, w0;
   52.29 +  //    int u1, v1, w1, w2, t;
   52.30 +  //
   52.31 +  //    u0 = u & 0xFFFF;  u1 = u >> 16;
   52.32 +  //    v0 = v & 0xFFFF;  v1 = v >> 16;
   52.33 +  //    w0 = u0*v0;
   52.34 +  //    t  = u1*v0 + (w0 >> 16);
   52.35 +  //    w1 = t & 0xFFFF;
   52.36 +  //    w2 = t >> 16;
   52.37 +  //    w1 = u0*v1 + w1;
   52.38 +  //    return u1*v1 + w2 + (w1 >> 16);
   52.39 +  // }
   52.40 +  //
   52.41 +  // Note: The version above is for 32x32 multiplications, while the
   52.42 +  // following inline comments are adapted to 64x64.
   52.43 +
   52.44    const int N = 64;
   52.45  
   52.46 -  Node *u_hi = phase->transform(new (phase->C, 3) RShiftLNode(dividend, phase->intcon(N / 2)));
   52.47 -  Node *u_lo = phase->transform(new (phase->C, 3) AndLNode(dividend, phase->longcon(0xFFFFFFFF)));
   52.48 +  // u0 = u & 0xFFFFFFFF;  u1 = u >> 32;
   52.49 +  Node* u0 = phase->transform(new (phase->C, 3) AndLNode(dividend, phase->longcon(0xFFFFFFFF)));
   52.50 +  Node* u1 = phase->transform(new (phase->C, 3) RShiftLNode(dividend, phase->intcon(N / 2)));
   52.51  
   52.52 -  Node *v_hi = phase->longcon(magic_const >> N/2);
   52.53 -  Node *v_lo = phase->longcon(magic_const & 0XFFFFFFFF);
   52.54 +  // v0 = v & 0xFFFFFFFF;  v1 = v >> 32;
   52.55 +  Node* v0 = phase->longcon(magic_const & 0xFFFFFFFF);
   52.56 +  Node* v1 = phase->longcon(magic_const >> (N / 2));
   52.57  
   52.58 -  Node *hihi_product = phase->transform(new (phase->C, 3) MulLNode(u_hi, v_hi));
   52.59 -  Node *hilo_product = phase->transform(new (phase->C, 3) MulLNode(u_hi, v_lo));
   52.60 -  Node *lohi_product = phase->transform(new (phase->C, 3) MulLNode(u_lo, v_hi));
   52.61 -  Node *lolo_product = phase->transform(new (phase->C, 3) MulLNode(u_lo, v_lo));
   52.62 +  // w0 = u0*v0;
   52.63 +  Node* w0 = phase->transform(new (phase->C, 3) MulLNode(u0, v0));
   52.64  
   52.65 -  Node *t1 = phase->transform(new (phase->C, 3) URShiftLNode(lolo_product, phase->intcon(N / 2)));
   52.66 -  Node *t2 = phase->transform(new (phase->C, 3) AddLNode(hilo_product, t1));
   52.67 +  // t = u1*v0 + (w0 >> 32);
   52.68 +  Node* u1v0 = phase->transform(new (phase->C, 3) MulLNode(u1, v0));
   52.69 +  Node* temp = phase->transform(new (phase->C, 3) URShiftLNode(w0, phase->intcon(N / 2)));
   52.70 +  Node* t    = phase->transform(new (phase->C, 3) AddLNode(u1v0, temp));
   52.71  
   52.72 -  // Construct both t3 and t4 before transforming so t2 doesn't go dead
   52.73 -  // prematurely.
   52.74 -  Node *t3 = new (phase->C, 3) RShiftLNode(t2, phase->intcon(N / 2));
   52.75 -  Node *t4 = new (phase->C, 3) AndLNode(t2, phase->longcon(0xFFFFFFFF));
   52.76 -  t3 = phase->transform(t3);
   52.77 -  t4 = phase->transform(t4);
   52.78 +  // w1 = t & 0xFFFFFFFF;
   52.79 +  Node* w1 = new (phase->C, 3) AndLNode(t, phase->longcon(0xFFFFFFFF));
   52.80  
   52.81 -  Node *t5 = phase->transform(new (phase->C, 3) AddLNode(t4, lohi_product));
   52.82 -  Node *t6 = phase->transform(new (phase->C, 3) RShiftLNode(t5, phase->intcon(N / 2)));
   52.83 -  Node *t7 = phase->transform(new (phase->C, 3) AddLNode(t3, hihi_product));
   52.84 +  // w2 = t >> 32;
   52.85 +  Node* w2 = new (phase->C, 3) RShiftLNode(t, phase->intcon(N / 2));
   52.86  
   52.87 -  return new (phase->C, 3) AddLNode(t7, t6);
   52.88 +  // 6732154: Construct both w1 and w2 before transforming, so t
   52.89 +  // doesn't go dead prematurely.
   52.90 +  w1 = phase->transform(w1);
   52.91 +  w2 = phase->transform(w2);
   52.92 +
   52.93 +  // w1 = u0*v1 + w1;
   52.94 +  Node* u0v1 = phase->transform(new (phase->C, 3) MulLNode(u0, v1));
   52.95 +  w1         = phase->transform(new (phase->C, 3) AddLNode(u0v1, w1));
   52.96 +
   52.97 +  // return u1*v1 + w2 + (w1 >> 32);
   52.98 +  Node* u1v1  = phase->transform(new (phase->C, 3) MulLNode(u1, v1));
   52.99 +  Node* temp1 = phase->transform(new (phase->C, 3) AddLNode(u1v1, w2));
  52.100 +  Node* temp2 = phase->transform(new (phase->C, 3) RShiftLNode(w1, phase->intcon(N / 2)));
  52.101 +
  52.102 +  return new (phase->C, 3) AddLNode(temp1, temp2);
  52.103  }
  52.104  
  52.105  
  52.106 @@ -976,7 +1007,7 @@
  52.107  
  52.108    // Expand mod
  52.109    if( con >= 0 && con < max_jlong && is_power_of_2_long(con+1) ) {
  52.110 -    uint k = log2_long(con);       // Extract k
  52.111 +    uint k = exact_log2_long(con+1);  // Extract k
  52.112  
  52.113      // Basic algorithm by David Detlefs.  See fastmod_long.java for gory details.
  52.114      // Used to help a popular random number generator which does a long-mod
    53.1 --- a/src/share/vm/opto/gcm.cpp	Thu Feb 12 14:00:38 2009 -0800
    53.2 +++ b/src/share/vm/opto/gcm.cpp	Wed Feb 18 18:14:18 2009 -0800
    53.3 @@ -29,6 +29,9 @@
    53.4  #include "incls/_precompiled.incl"
    53.5  #include "incls/_gcm.cpp.incl"
    53.6  
    53.7 +// To avoid float value underflow
    53.8 +#define MIN_BLOCK_FREQUENCY 1.e-35f
    53.9 +
   53.10  //----------------------------schedule_node_into_block-------------------------
   53.11  // Insert node n into block b. Look for projections of n and make sure they
   53.12  // are in b also.
   53.13 @@ -1380,6 +1383,13 @@
   53.14      }
   53.15    }
   53.16  
   53.17 +#ifdef ASSERT
   53.18 +  for (uint i = 0; i < _num_blocks; i++ ) {
   53.19 +    Block *b = _blocks[i];
   53.20 +    assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requiers meaningful block frequency");
   53.21 +  }
   53.22 +#endif
   53.23 +
   53.24  #ifndef PRODUCT
   53.25    if (PrintCFGBlockFreq) {
   53.26      tty->print_cr("CFG Block Frequencies");
   53.27 @@ -1877,7 +1887,9 @@
   53.28    float loop_freq = _freq * trip_count();
   53.29    for (int i = 0; i < _members.length(); i++) {
   53.30      CFGElement* s = _members.at(i);
   53.31 -    s->_freq *= loop_freq;
   53.32 +    float block_freq = s->_freq * loop_freq;
   53.33 +    if (block_freq < MIN_BLOCK_FREQUENCY) block_freq = MIN_BLOCK_FREQUENCY;
   53.34 +    s->_freq = block_freq;
   53.35    }
   53.36    CFGLoop* ch = _child;
   53.37    while (ch != NULL) {
    54.1 --- a/src/share/vm/opto/graphKit.cpp	Thu Feb 12 14:00:38 2009 -0800
    54.2 +++ b/src/share/vm/opto/graphKit.cpp	Wed Feb 18 18:14:18 2009 -0800
    54.3 @@ -1836,10 +1836,7 @@
    54.4      (CardTableModRefBS*)(Universe::heap()->barrier_set());
    54.5    Node *b = _gvn.transform(new (C, 3) URShiftXNode( cast, _gvn.intcon(CardTableModRefBS::card_shift) ));
    54.6    // We store into a byte array, so do not bother to left-shift by zero
    54.7 -  // Get base of card map
    54.8 -  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte),
    54.9 -         "adjust this code");
   54.10 -  Node *c = makecon(TypeRawPtr::make((address)ct->byte_map_base));
   54.11 +  Node *c = byte_map_base_node();
   54.12    // Combine
   54.13    Node *sb_ctl = control();
   54.14    Node *sb_adr = _gvn.transform(new (C, 4) AddPNode( top()/*no base ptr*/, c, b ));
   54.15 @@ -2945,16 +2942,10 @@
   54.16  
   54.17    // Now generate allocation code
   54.18  
   54.19 -  // With escape analysis, the entire memory state is needed to be able to
   54.20 -  // eliminate the allocation.  If the allocations cannot be eliminated, this
   54.21 -  // will be optimized to the raw slice when the allocation is expanded.
   54.22 -  Node *mem;
   54.23 -  if (C->do_escape_analysis()) {
   54.24 -    mem = reset_memory();
   54.25 -    set_all_memory(mem);
   54.26 -  } else {
   54.27 -    mem = memory(Compile::AliasIdxRaw);
   54.28 -  }
   54.29 +  // The entire memory state is needed for slow path of the allocation
   54.30 +  // since GC and deoptimization can happened.
   54.31 +  Node *mem = reset_memory();
   54.32 +  set_all_memory(mem); // Create new memory state
   54.33  
   54.34    AllocateNode* alloc
   54.35      = new (C, AllocateNode::ParmLimit)
   54.36 @@ -3091,16 +3082,10 @@
   54.37  
   54.38    // Now generate allocation code
   54.39  
   54.40 -  // With escape analysis, the entire memory state is needed to be able to
   54.41 -  // eliminate the allocation.  If the allocations cannot be eliminated, this
   54.42 -  // will be optimized to the raw slice when the allocation is expanded.
   54.43 -  Node *mem;
   54.44 -  if (C->do_escape_analysis()) {
   54.45 -    mem = reset_memory();
   54.46 -    set_all_memory(mem);
   54.47 -  } else {
   54.48 -    mem = memory(Compile::AliasIdxRaw);
   54.49 -  }
   54.50 +  // The entire memory state is needed for slow path of the allocation
   54.51 +  // since GC and deoptimization can happened.
   54.52 +  Node *mem = reset_memory();
   54.53 +  set_all_memory(mem); // Create new memory state
   54.54  
   54.55    // Create the AllocateArrayNode and its result projections
   54.56    AllocateArrayNode* alloc
   54.57 @@ -3233,12 +3218,11 @@
   54.58  
   54.59    // Now some of the values
   54.60  
   54.61 -  Node* marking = __ load(no_ctrl, marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
   54.62 -  Node* index   = __ load(no_ctrl, index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
   54.63 -  Node* buffer  = __ load(no_ctrl, buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
   54.64 +  Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
   54.65  
   54.66    // if (!marking)
   54.67    __ if_then(marking, BoolTest::ne, zero); {
   54.68 +    Node* index   = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
   54.69  
   54.70      const Type* t1 = adr->bottom_type();
   54.71      const Type* t2 = val->bottom_type();
   54.72 @@ -3246,6 +3230,7 @@
   54.73      Node* orig = __ load(no_ctrl, adr, val_type, bt, alias_idx);
   54.74      // if (orig != NULL)
   54.75      __ if_then(orig, BoolTest::ne, null()); {
   54.76 +      Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
   54.77  
   54.78        // load original value
   54.79        // alias_idx correct??
   54.80 @@ -3365,14 +3350,6 @@
   54.81  
   54.82    const TypeFunc *tf = OptoRuntime::g1_wb_post_Type();
   54.83  
   54.84 -  // Get the address of the card table
   54.85 -  CardTableModRefBS* ct =
   54.86 -    (CardTableModRefBS*)(Universe::heap()->barrier_set());
   54.87 -  Node *card_table = __ makecon(TypeRawPtr::make((address)ct->byte_map_base));
   54.88 -  // Get base of card map
   54.89 -  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
   54.90 -
   54.91 -
   54.92    // Offsets into the thread
   54.93    const int index_offset  = in_bytes(JavaThread::dirty_card_queue_offset() +
   54.94                                       PtrQueue::byte_offset_of_index());
   54.95 @@ -3402,7 +3379,7 @@
   54.96    Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
   54.97  
   54.98    // Combine card table base and card offset
   54.99 -  Node *card_adr = __ AddP(no_base, card_table, card_offset );
  54.100 +  Node *card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
  54.101  
  54.102    // If we know the value being stored does it cross regions?
  54.103  
    55.1 --- a/src/share/vm/opto/graphKit.hpp	Thu Feb 12 14:00:38 2009 -0800
    55.2 +++ b/src/share/vm/opto/graphKit.hpp	Wed Feb 18 18:14:18 2009 -0800
    55.3 @@ -83,6 +83,18 @@
    55.4    Node* zerocon(BasicType bt)   const { return _gvn.zerocon(bt); }
    55.5    // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
    55.6  
    55.7 +  // Helper for byte_map_base
    55.8 +  Node* byte_map_base_node() {
    55.9 +    // Get base of card map
   55.10 +    CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
   55.11 +    assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
   55.12 +    if (ct->byte_map_base != NULL) {
   55.13 +      return makecon(TypeRawPtr::make((address)ct->byte_map_base));
   55.14 +    } else {
   55.15 +      return null();
   55.16 +    }
   55.17 +  }
   55.18 +
   55.19    jint  find_int_con(Node* n, jint value_if_unknown) {
   55.20      return _gvn.find_int_con(n, value_if_unknown);
   55.21    }
    56.1 --- a/src/share/vm/opto/ifg.cpp	Thu Feb 12 14:00:38 2009 -0800
    56.2 +++ b/src/share/vm/opto/ifg.cpp	Wed Feb 18 18:14:18 2009 -0800
    56.3 @@ -471,12 +471,28 @@
    56.4      // for the "collect_gc_info" phase later.
    56.5      IndexSet liveout(_live->live(b));
    56.6      uint last_inst = b->end_idx();
    56.7 -    // Compute last phi index
    56.8 -    uint last_phi;
    56.9 -    for( last_phi = 1; last_phi < last_inst; last_phi++ )
   56.10 -      if( !b->_nodes[last_phi]->is_Phi() )
   56.11 +    // Compute first nonphi node index
   56.12 +    uint first_inst;
   56.13 +    for( first_inst = 1; first_inst < last_inst; first_inst++ )
   56.14 +      if( !b->_nodes[first_inst]->is_Phi() )
   56.15          break;
   56.16  
   56.17 +    // Spills could be inserted before CreateEx node which should be
   56.18 +    // first instruction in block after Phis. Move CreateEx up.
   56.19 +    for( uint insidx = first_inst; insidx < last_inst; insidx++ ) {
   56.20 +      Node *ex = b->_nodes[insidx];
   56.21 +      if( ex->is_SpillCopy() ) continue;
   56.22 +      if( insidx > first_inst && ex->is_Mach() &&
   56.23 +          ex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
   56.24 +        // If the CreateEx isn't above all the MachSpillCopies
   56.25 +        // then move it to the top.
   56.26 +        b->_nodes.remove(insidx);
   56.27 +        b->_nodes.insert(first_inst, ex);
   56.28 +      }
   56.29 +      // Stop once a CreateEx or any other node is found
   56.30 +      break;
   56.31 +    }
   56.32 +
   56.33      // Reset block's register pressure values for each ifg construction
   56.34      uint pressure[2], hrp_index[2];
   56.35      pressure[0] = pressure[1] = 0;
   56.36 @@ -485,7 +501,7 @@
   56.37      // Liveout things are presumed live for the whole block.  We accumulate
   56.38      // 'area' accordingly.  If they get killed in the block, we'll subtract
   56.39      // the unused part of the block from the area.
   56.40 -    int inst_count = last_inst - last_phi;
   56.41 +    int inst_count = last_inst - first_inst;
   56.42      double cost = (inst_count <= 0) ? 0.0 : b->_freq * double(inst_count);
   56.43      assert(!(cost < 0.0), "negative spill cost" );
   56.44      IndexSetIterator elements(&liveout);
    57.1 --- a/src/share/vm/opto/lcm.cpp	Thu Feb 12 14:00:38 2009 -0800
    57.2 +++ b/src/share/vm/opto/lcm.cpp	Wed Feb 18 18:14:18 2009 -0800
    57.3 @@ -107,7 +107,7 @@
    57.4      was_store = false;
    57.5      switch( mach->ideal_Opcode() ) {
    57.6      case Op_LoadB:
    57.7 -    case Op_LoadC:
    57.8 +    case Op_LoadUS:
    57.9      case Op_LoadD:
   57.10      case Op_LoadF:
   57.11      case Op_LoadI:
    58.1 --- a/src/share/vm/opto/live.cpp	Thu Feb 12 14:00:38 2009 -0800
    58.2 +++ b/src/share/vm/opto/live.cpp	Wed Feb 18 18:14:18 2009 -0800
    58.3 @@ -271,9 +271,9 @@
    58.4  
    58.5  //------------------------------verify_base_ptrs-------------------------------
    58.6  // Verify that base pointers and derived pointers are still sane.
    58.7 -// Basically, if a derived pointer is live at a safepoint, then its
    58.8 -// base pointer must be live also.
    58.9  void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
   58.10 +#ifdef ASSERT
   58.11 +  Unique_Node_List worklist(a);
   58.12    for( uint i = 0; i < _cfg._num_blocks; i++ ) {
   58.13      Block *b = _cfg._blocks[i];
   58.14      for( uint j = b->end_idx() + 1; j > 1; j-- ) {
   58.15 @@ -287,28 +287,81 @@
   58.16            // Now scan for a live derived pointer
   58.17            if (jvms->oopoff() < sfpt->req()) {
   58.18              // Check each derived/base pair
   58.19 -            for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx += 2) {
   58.20 +            for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
   58.21                Node *check = sfpt->in(idx);
   58.22 -              uint j = 0;
   58.23 +              bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
   58.24                // search upwards through spills and spill phis for AddP
   58.25 -              while(true) {
   58.26 -                if( !check ) break;
   58.27 -                int idx = check->is_Copy();
   58.28 -                if( idx ) {
   58.29 -                  check = check->in(idx);
   58.30 -                } else if( check->is_Phi() && check->_idx >= _oldphi ) {
   58.31 -                  check = check->in(1);
   58.32 -                } else
   58.33 -                  break;
   58.34 -                j++;
   58.35 -                assert(j < 100000,"Derived pointer checking in infinite loop");
   58.36 +              worklist.clear();
   58.37 +              worklist.push(check);
   58.38 +              uint k = 0;
   58.39 +              while( k < worklist.size() ) {
   58.40 +                check = worklist.at(k);
   58.41 +                assert(check,"Bad base or derived pointer");
   58.42 +                // See PhaseChaitin::find_base_for_derived() for all cases.
   58.43 +                int isc = check->is_Copy();
   58.44 +                if( isc ) {
   58.45 +                  worklist.push(check->in(isc));
   58.46 +                } else if( check->is_Phi() ) {
   58.47 +                  for (uint m = 1; m < check->req(); m++)
   58.48 +                    worklist.push(check->in(m));
   58.49 +                } else if( check->is_Con() ) {
   58.50 +                  if (is_derived) {
   58.51 +                    // Derived is NULL+offset
   58.52 +                    assert(!is_derived || check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad derived pointer");
   58.53 +                  } else {
   58.54 +                    assert(check->bottom_type()->is_ptr()->_offset == 0,"Bad base pointer");
   58.55 +                    // Base either ConP(NULL) or loadConP
   58.56 +                    if (check->is_Mach()) {
   58.57 +                      assert(check->as_Mach()->ideal_Opcode() == Op_ConP,"Bad base pointer");
   58.58 +                    } else {
   58.59 +                      assert(check->Opcode() == Op_ConP &&
   58.60 +                             check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad base pointer");
   58.61 +                    }
   58.62 +                  }
   58.63 +                } else if( check->bottom_type()->is_ptr()->_offset == 0 ) {
   58.64 +                  if(check->is_Proj() || check->is_Mach() &&
   58.65 +                     (check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
   58.66 +                      check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
   58.67 +                      check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
   58.68 +                      check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
   58.69 +#ifdef _LP64
   58.70 +                      UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
   58.71 +                      UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
   58.72 +#endif
   58.73 +                      check->as_Mach()->ideal_Opcode() == Op_LoadP ||
   58.74 +                      check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
   58.75 +                    // Valid nodes
   58.76 +                  } else {
   58.77 +                    check->dump();
   58.78 +                    assert(false,"Bad base or derived pointer");
   58.79 +                  }
   58.80 +                } else {
   58.81 +                  assert(is_derived,"Bad base pointer");
   58.82 +                  assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP,"Bad derived pointer");
   58.83 +                }
   58.84 +                k++;
   58.85 +                assert(k < 100000,"Derived pointer checking in infinite loop");
   58.86                } // End while
   58.87 -              assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP,"Bad derived pointer")
   58.88              }
   58.89            } // End of check for derived pointers
   58.90          } // End of Kcheck for debug info
   58.91        } // End of if found a safepoint
   58.92      } // End of forall instructions in block
   58.93    } // End of forall blocks
   58.94 +#endif
   58.95  }
   58.96 +
   58.97 +//------------------------------verify-------------------------------------
   58.98 +// Verify that graphs and base pointers are still sane.
   58.99 +void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const {
  58.100 +#ifdef ASSERT
  58.101 +  if( VerifyOpto || VerifyRegisterAllocator ) {
  58.102 +    _cfg.verify();
  58.103 +    verify_base_ptrs(a);
  58.104 +    if(verify_ifg)
  58.105 +      _ifg->verify(this);
  58.106 +  }
  58.107  #endif
  58.108 +}
  58.109 +
  58.110 +#endif
    59.1 --- a/src/share/vm/opto/loopnode.cpp	Thu Feb 12 14:00:38 2009 -0800
    59.2 +++ b/src/share/vm/opto/loopnode.cpp	Wed Feb 18 18:14:18 2009 -0800
    59.3 @@ -2654,7 +2654,7 @@
    59.4      case Op_ModF:
    59.5      case Op_ModD:
    59.6      case Op_LoadB:              // Same with Loads; they can sink
    59.7 -    case Op_LoadC:              // during loop optimizations.
    59.8 +    case Op_LoadUS:             // during loop optimizations.
    59.9      case Op_LoadD:
   59.10      case Op_LoadF:
   59.11      case Op_LoadI:
    60.1 --- a/src/share/vm/opto/macro.cpp	Thu Feb 12 14:00:38 2009 -0800
    60.2 +++ b/src/share/vm/opto/macro.cpp	Wed Feb 18 18:14:18 2009 -0800
    60.3 @@ -952,13 +952,6 @@
    60.4    Node* klass_node        = alloc->in(AllocateNode::KlassNode);
    60.5    Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
    60.6  
    60.7 -  // With escape analysis, the entire memory state was needed to be able to
    60.8 -  // eliminate the allocation.  Since the allocations cannot be eliminated,
    60.9 -  // optimize it to the raw slice.
   60.10 -  if (mem->is_MergeMem()) {
   60.11 -    mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
   60.12 -  }
   60.13 -
   60.14    assert(ctrl != NULL, "must have control");
   60.15    // We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
   60.16    // they will not be used if "always_slow" is set
   60.17 @@ -1016,6 +1009,11 @@
   60.18    Node *slow_mem = mem;  // save the current memory state for slow path
   60.19    // generate the fast allocation code unless we know that the initial test will always go slow
   60.20    if (!always_slow) {
   60.21 +    // Fast path modifies only raw memory.
   60.22 +    if (mem->is_MergeMem()) {
   60.23 +      mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
   60.24 +    }
   60.25 +
   60.26      Node* eden_top_adr;
   60.27      Node* eden_end_adr;
   60.28  
   60.29 @@ -1239,8 +1237,6 @@
   60.30      }
   60.31    }
   60.32  
   60.33 -  mem = result_phi_rawmem;
   60.34 -
   60.35    // An allocate node has separate i_o projections for the uses on the control and i_o paths
   60.36    // Replace uses of the control i_o projection with result_phi_i_o (unless we are only generating a slow call)
   60.37    if (_ioproj_fallthrough == NULL) {
    61.1 --- a/src/share/vm/opto/matcher.cpp	Thu Feb 12 14:00:38 2009 -0800
    61.2 +++ b/src/share/vm/opto/matcher.cpp	Wed Feb 18 18:14:18 2009 -0800
    61.3 @@ -1824,7 +1824,7 @@
    61.4          mem_op = true;
    61.5          break;
    61.6        case Op_LoadB:
    61.7 -      case Op_LoadC:
    61.8 +      case Op_LoadUS:
    61.9        case Op_LoadD:
   61.10        case Op_LoadF:
   61.11        case Op_LoadI:
    62.1 --- a/src/share/vm/opto/memnode.cpp	Thu Feb 12 14:00:38 2009 -0800
    62.2 +++ b/src/share/vm/opto/memnode.cpp	Wed Feb 18 18:14:18 2009 -0800
    62.3 @@ -779,14 +779,14 @@
    62.4           "use LoadRangeNode instead");
    62.5    switch (bt) {
    62.6    case T_BOOLEAN:
    62.7 -  case T_BYTE:    return new (C, 3) LoadBNode(ctl, mem, adr, adr_type, rt->is_int()    );
    62.8 -  case T_INT:     return new (C, 3) LoadINode(ctl, mem, adr, adr_type, rt->is_int()    );
    62.9 -  case T_CHAR:    return new (C, 3) LoadCNode(ctl, mem, adr, adr_type, rt->is_int()    );
   62.10 -  case T_SHORT:   return new (C, 3) LoadSNode(ctl, mem, adr, adr_type, rt->is_int()    );
   62.11 -  case T_LONG:    return new (C, 3) LoadLNode(ctl, mem, adr, adr_type, rt->is_long()   );
   62.12 -  case T_FLOAT:   return new (C, 3) LoadFNode(ctl, mem, adr, adr_type, rt              );
   62.13 -  case T_DOUBLE:  return new (C, 3) LoadDNode(ctl, mem, adr, adr_type, rt              );
   62.14 -  case T_ADDRESS: return new (C, 3) LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr()    );
   62.15 +  case T_BYTE:    return new (C, 3) LoadBNode (ctl, mem, adr, adr_type, rt->is_int()    );
   62.16 +  case T_INT:     return new (C, 3) LoadINode (ctl, mem, adr, adr_type, rt->is_int()    );
   62.17 +  case T_CHAR:    return new (C, 3) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int()    );
   62.18 +  case T_SHORT:   return new (C, 3) LoadSNode (ctl, mem, adr, adr_type, rt->is_int()    );
   62.19 +  case T_LONG:    return new (C, 3) LoadLNode (ctl, mem, adr, adr_type, rt->is_long()   );
   62.20 +  case T_FLOAT:   return new (C, 3) LoadFNode (ctl, mem, adr, adr_type, rt              );
   62.21 +  case T_DOUBLE:  return new (C, 3) LoadDNode (ctl, mem, adr, adr_type, rt              );
   62.22 +  case T_ADDRESS: return new (C, 3) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr()    );
   62.23    case T_OBJECT:
   62.24  #ifdef _LP64
   62.25      if (adr->bottom_type()->is_ptr_to_narrowoop()) {
   62.26 @@ -1076,13 +1076,14 @@
   62.27        // of the original value.
   62.28        Node* mem_phi = in(Memory);
   62.29        Node* offset = in(Address)->in(AddPNode::Offset);
   62.30 +      Node* region = base->in(0);
   62.31  
   62.32        Node* in1 = clone();
   62.33        Node* in1_addr = in1->in(Address)->clone();
   62.34        in1_addr->set_req(AddPNode::Base, base->in(allocation_index));
   62.35        in1_addr->set_req(AddPNode::Address, base->in(allocation_index));
   62.36        in1_addr->set_req(AddPNode::Offset, offset);
   62.37 -      in1->set_req(0, base->in(allocation_index));
   62.38 +      in1->set_req(0, region->in(allocation_index));
   62.39        in1->set_req(Address, in1_addr);
   62.40        in1->set_req(Memory, mem_phi->in(allocation_index));
   62.41  
   62.42 @@ -1091,7 +1092,7 @@
   62.43        in2_addr->set_req(AddPNode::Base, base->in(load_index));
   62.44        in2_addr->set_req(AddPNode::Address, base->in(load_index));
   62.45        in2_addr->set_req(AddPNode::Offset, offset);
   62.46 -      in2->set_req(0, base->in(load_index));
   62.47 +      in2->set_req(0, region->in(load_index));
   62.48        in2->set_req(Address, in2_addr);
   62.49        in2->set_req(Memory, mem_phi->in(load_index));
   62.50  
   62.51 @@ -1100,7 +1101,7 @@
   62.52        in2_addr = phase->transform(in2_addr);
   62.53        in2 =      phase->transform(in2);
   62.54  
   62.55 -      PhiNode* result = PhiNode::make_blank(base->in(0), this);
   62.56 +      PhiNode* result = PhiNode::make_blank(region, this);
   62.57        result->set_req(allocation_index, in1);
   62.58        result->set_req(load_index, in2);
   62.59        return result;
   62.60 @@ -1303,6 +1304,7 @@
   62.61      Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
   62.62      if (base != NULL
   62.63          && phase->type(base)->higher_equal(TypePtr::NOTNULL)
   62.64 +        && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw
   62.65          && all_controls_dominate(base, phase->C->start())) {
   62.66        // A method-invariant, non-null address (constant or 'this' argument).
   62.67        set_req(MemNode::Control, NULL);
   62.68 @@ -1356,7 +1358,7 @@
   62.69    // Steps (a), (b):  Walk past independent stores to find an exact match.
   62.70    if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) {
   62.71      // (c) See if we can fold up on the spot, but don't fold up here.
   62.72 -    // Fold-up might require truncation (for LoadB/LoadS/LoadC) or
   62.73 +    // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
   62.74      // just return a prior value, which is done by Identity calls.
   62.75      if (can_see_stored_value(prev_mem, phase)) {
   62.76        // Make ready for step (d):
   62.77 @@ -1605,14 +1607,14 @@
   62.78    return LoadNode::Ideal(phase, can_reshape);
   62.79  }
   62.80  
   62.81 -//--------------------------LoadCNode::Ideal--------------------------------------
   62.82 +//--------------------------LoadUSNode::Ideal-------------------------------------
   62.83  //
   62.84  //  If the previous store is to the same address as this load,
   62.85  //  and the value stored was larger than a char, replace this load
   62.86  //  with the value stored truncated to a char.  If no truncation is
   62.87  //  needed, the replacement is done in LoadNode::Identity().
   62.88  //
   62.89 -Node *LoadCNode::Ideal(PhaseGVN *phase, bool can_reshape) {
   62.90 +Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
   62.91    Node* mem = in(MemNode::Memory);
   62.92    Node* value = can_see_stored_value(mem,phase);
   62.93    if( value && !phase->type(value)->higher_equal( _type ) )
    63.1 --- a/src/share/vm/opto/memnode.hpp	Thu Feb 12 14:00:38 2009 -0800
    63.2 +++ b/src/share/vm/opto/memnode.hpp	Wed Feb 18 18:14:18 2009 -0800
    63.3 @@ -207,11 +207,11 @@
    63.4    virtual BasicType memory_type() const { return T_BYTE; }
    63.5  };
    63.6  
    63.7 -//------------------------------LoadCNode--------------------------------------
    63.8 -// Load a char (16bits unsigned) from memory
    63.9 -class LoadCNode : public LoadNode {
   63.10 +//------------------------------LoadUSNode-------------------------------------
   63.11 +// Load an unsigned short/char (16bits unsigned) from memory
   63.12 +class LoadUSNode : public LoadNode {
   63.13  public:
   63.14 -  LoadCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
   63.15 +  LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
   63.16      : LoadNode(c,mem,adr,at,ti) {}
   63.17    virtual int Opcode() const;
   63.18    virtual uint ideal_reg() const { return Op_RegI; }
    64.1 --- a/src/share/vm/opto/mulnode.cpp	Thu Feb 12 14:00:38 2009 -0800
    64.2 +++ b/src/share/vm/opto/mulnode.cpp	Wed Feb 18 18:14:18 2009 -0800
    64.3 @@ -1,5 +1,5 @@
    64.4  /*
    64.5 - * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
    64.6 + * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    64.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    64.8   *
    64.9   * This code is free software; you can redistribute it and/or modify it
   64.10 @@ -442,16 +442,17 @@
   64.11          return load;
   64.12      }
   64.13      uint lop = load->Opcode();
   64.14 -    if( lop == Op_LoadC &&
   64.15 +    if( lop == Op_LoadUS &&
   64.16          con == 0x0000FFFF )     // Already zero-extended
   64.17        return load;
   64.18      // Masking off the high bits of a unsigned-shift-right is not
   64.19      // needed either.
   64.20      if( lop == Op_URShiftI ) {
   64.21        const TypeInt *t12 = phase->type( load->in(2) )->isa_int();
   64.22 -      if( t12 && t12->is_con() ) {
   64.23 -        int shift_con = t12->get_con();
   64.24 -        int mask = max_juint >> shift_con;
   64.25 +      if( t12 && t12->is_con() ) {  // Shift is by a constant
   64.26 +        int shift = t12->get_con();
   64.27 +        shift &= BitsPerJavaInteger - 1;  // semantics of Java shifts
   64.28 +        int mask = max_juint >> shift;
   64.29          if( (mask&con) == mask )  // If AND is useless, skip it
   64.30            return load;
   64.31        }
   64.32 @@ -470,19 +471,19 @@
   64.33    uint lop = load->Opcode();
   64.34  
   64.35    // Masking bits off of a Character?  Hi bits are already zero.
   64.36 -  if( lop == Op_LoadC &&
   64.37 +  if( lop == Op_LoadUS &&
   64.38        (mask & 0xFFFF0000) )     // Can we make a smaller mask?
   64.39      return new (phase->C, 3) AndINode(load,phase->intcon(mask&0xFFFF));
   64.40  
   64.41    // Masking bits off of a Short?  Loading a Character does some masking
   64.42    if( lop == Op_LoadS &&
   64.43        (mask & 0xFFFF0000) == 0 ) {
   64.44 -    Node *ldc = new (phase->C, 3) LoadCNode(load->in(MemNode::Control),
   64.45 +    Node *ldus = new (phase->C, 3) LoadUSNode(load->in(MemNode::Control),
   64.46                                    load->in(MemNode::Memory),
   64.47                                    load->in(MemNode::Address),
   64.48                                    load->adr_type());
   64.49 -    ldc = phase->transform(ldc);
   64.50 -    return new (phase->C, 3) AndINode(ldc,phase->intcon(mask&0xFFFF));
   64.51 +    ldus = phase->transform(ldus);
   64.52 +    return new (phase->C, 3) AndINode(ldus, phase->intcon(mask&0xFFFF));
   64.53    }
   64.54  
   64.55    // Masking sign bits off of a Byte?  Let the matcher use an unsigned load
   64.56 @@ -579,9 +580,10 @@
   64.57      // needed either.
   64.58      if( lop == Op_URShiftL ) {
   64.59        const TypeInt *t12 = phase->type( usr->in(2) )->isa_int();
   64.60 -      if( t12 && t12->is_con() ) {
   64.61 -        int shift_con = t12->get_con();
   64.62 -        jlong mask = max_julong >> shift_con;
   64.63 +      if( t12 && t12->is_con() ) {  // Shift is by a constant
   64.64 +        int shift = t12->get_con();
   64.65 +        shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
   64.66 +        jlong mask = max_julong >> shift;
   64.67          if( (mask&con) == mask )  // If AND is useless, skip it
   64.68            return usr;
   64.69        }
   64.70 @@ -605,8 +607,8 @@
   64.71      const TypeInt *t12 = phase->type(rsh->in(2))->isa_int();
   64.72      if( t12 && t12->is_con() ) { // Shift is by a constant
   64.73        int shift = t12->get_con();
   64.74 -      shift &= (BitsPerJavaInteger*2)-1;  // semantics of Java shifts
   64.75 -      const jlong sign_bits_mask = ~(((jlong)CONST64(1) << (jlong)(BitsPerJavaInteger*2 - shift)) -1);
   64.76 +      shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
   64.77 +      const jlong sign_bits_mask = ~(((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - shift)) -1);
   64.78        // If the AND'ing of the 2 masks has no bits, then only original shifted
   64.79        // bits survive.  NO sign-extension bits survive the maskings.
   64.80        if( (sign_bits_mask & mask) == 0 ) {
   64.81 @@ -786,7 +788,7 @@
   64.82  
   64.83    // Check for ((x & ((CONST64(1)<<(64-c0))-1)) << c0) which ANDs off high bits
   64.84    // before shifting them away.
   64.85 -  const jlong bits_mask = ((jlong)CONST64(1) << (jlong)(BitsPerJavaInteger*2 - con)) - CONST64(1);
   64.86 +  const jlong bits_mask = ((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - con)) - CONST64(1);
   64.87    if( add1_op == Op_AndL &&
   64.88        phase->type(add1->in(2)) == TypeLong::make( bits_mask ) )
   64.89      return new (phase->C, 3) LShiftLNode( add1->in(1), in(2) );
   64.90 @@ -820,7 +822,7 @@
   64.91      return TypeLong::LONG;
   64.92  
   64.93    uint shift = r2->get_con();
   64.94 -  shift &= (BitsPerJavaInteger*2)-1;  // semantics of Java shifts
   64.95 +  shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
   64.96    // Shift by a multiple of 64 does nothing:
   64.97    if (shift == 0)  return t1;
   64.98  
   64.99 @@ -913,7 +915,7 @@
  64.100        set_req(2, phase->intcon(0));
  64.101        return this;
  64.102      }
  64.103 -    else if( ld->Opcode() == Op_LoadC )
  64.104 +    else if( ld->Opcode() == Op_LoadUS )
  64.105        // Replace zero-extension-load with sign-extension-load
  64.106        return new (phase->C, 3) LoadSNode( ld->in(MemNode::Control),
  64.107                                  ld->in(MemNode::Memory),
  64.108 @@ -1235,7 +1237,7 @@
  64.109    if ( con == 0 ) return NULL;  // let Identity() handle a 0 shift count
  64.110                                // note: mask computation below does not work for 0 shift count
  64.111    // We'll be wanting the right-shift amount as a mask of that many bits
  64.112 -  const jlong mask = (((jlong)CONST64(1) << (jlong)(BitsPerJavaInteger*2 - con)) -1);
  64.113 +  const jlong mask = (((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - con)) -1);
  64.114  
  64.115    // Check for ((x << z) + Y) >>> z.  Replace with x + con>>>z
  64.116    // The idiom for rounding to a power of 2 is "(Q+(2^z-1)) >>> z".
  64.117 @@ -1302,7 +1304,7 @@
  64.118  
  64.119    if (r2->is_con()) {
  64.120      uint shift = r2->get_con();
  64.121 -    shift &= (2*BitsPerJavaInteger)-1;  // semantics of Java shifts
  64.122 +    shift &= BitsPerJavaLong - 1;  // semantics of Java shifts
  64.123      // Shift by a multiple of 64 does nothing:
  64.124      if (shift == 0)  return t1;
  64.125      // Calculate reasonably aggressive bounds for the result.
  64.126 @@ -1325,7 +1327,7 @@
  64.127      const TypeLong* tl = TypeLong::make(lo, hi, MAX2(r1->_widen,r2->_widen));
  64.128      #ifdef ASSERT
  64.129      // Make sure we get the sign-capture idiom correct.
  64.130 -    if (shift == (2*BitsPerJavaInteger)-1) {
  64.131 +    if (shift == BitsPerJavaLong - 1) {
  64.132        if (r1->_lo >= 0) assert(tl == TypeLong::ZERO, ">>>63 of + is 0");
  64.133        if (r1->_hi < 0)  assert(tl == TypeLong::ONE,  ">>>63 of - is +1");
  64.134      }
    65.1 --- a/src/share/vm/opto/superword.cpp	Thu Feb 12 14:00:38 2009 -0800
    65.2 +++ b/src/share/vm/opto/superword.cpp	Wed Feb 18 18:14:18 2009 -0800
    65.3 @@ -1444,7 +1444,7 @@
    65.4  // (Start, end] half-open range defining which operands are vector
    65.5  void SuperWord::vector_opd_range(Node* n, uint* start, uint* end) {
    65.6    switch (n->Opcode()) {
    65.7 -  case Op_LoadB:   case Op_LoadC:
    65.8 +  case Op_LoadB:   case Op_LoadUS:
    65.9    case Op_LoadI:   case Op_LoadL:
   65.10    case Op_LoadF:   case Op_LoadD:
   65.11    case Op_LoadP:
    66.1 --- a/src/share/vm/opto/type.cpp	Thu Feb 12 14:00:38 2009 -0800
    66.2 +++ b/src/share/vm/opto/type.cpp	Wed Feb 18 18:14:18 2009 -0800
    66.3 @@ -2471,6 +2471,8 @@
    66.4    const Type* ft = join(kills);
    66.5    const TypeInstPtr* ftip = ft->isa_instptr();
    66.6    const TypeInstPtr* ktip = kills->isa_instptr();
    66.7 +  const TypeKlassPtr* ftkp = ft->isa_klassptr();
    66.8 +  const TypeKlassPtr* ktkp = kills->isa_klassptr();
    66.9  
   66.10    if (ft->empty()) {
   66.11      // Check for evil case of 'this' being a class and 'kills' expecting an
   66.12 @@ -2484,6 +2486,8 @@
   66.13      // uplift the type.
   66.14      if (!empty() && ktip != NULL && ktip->is_loaded() && ktip->klass()->is_interface())
   66.15        return kills;             // Uplift to interface
   66.16 +    if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface())
   66.17 +      return kills;             // Uplift to interface
   66.18  
   66.19      return Type::TOP;           // Canonical empty value
   66.20    }
   66.21 @@ -2499,6 +2503,12 @@
   66.22      // Happens in a CTW of rt.jar, 320-341, no extra flags
   66.23      return ktip->cast_to_ptr_type(ftip->ptr());
   66.24    }
   66.25 +  if (ftkp != NULL && ktkp != NULL &&
   66.26 +      ftkp->is_loaded() &&  ftkp->klass()->is_interface() &&
   66.27 +      ktkp->is_loaded() && !ktkp->klass()->is_interface()) {
   66.28 +    // Happens in a CTW of rt.jar, 320-341, no extra flags
   66.29 +    return ktkp->cast_to_ptr_type(ftkp->ptr());
   66.30 +  }
   66.31  
   66.32    return ft;
   66.33  }
   66.34 @@ -3657,7 +3667,7 @@
   66.35  
   66.36  //------------------------------cast_to_ptr_type-------------------------------
   66.37  const Type *TypeKlassPtr::cast_to_ptr_type(PTR ptr) const {
   66.38 -  assert(_base == OopPtr, "subclass must override cast_to_ptr_type");
   66.39 +  assert(_base == KlassPtr, "subclass must override cast_to_ptr_type");
   66.40    if( ptr == _ptr ) return this;
   66.41    return make(ptr, _klass, _offset);
   66.42  }
    67.1 --- a/src/share/vm/opto/type.hpp	Thu Feb 12 14:00:38 2009 -0800
    67.2 +++ b/src/share/vm/opto/type.hpp	Wed Feb 18 18:14:18 2009 -0800
    67.3 @@ -882,6 +882,8 @@
    67.4  public:
    67.5    ciSymbol* name()  const { return _klass->name(); }
    67.6  
    67.7 +  bool  is_loaded() const { return _klass->is_loaded(); }
    67.8 +
    67.9    // ptr to klass 'k'
   67.10    static const TypeKlassPtr *make( ciKlass* k ) { return make( TypePtr::Constant, k, 0); }
   67.11    // ptr to klass 'k' with offset
    68.1 --- a/src/share/vm/opto/vectornode.cpp	Thu Feb 12 14:00:38 2009 -0800
    68.2 +++ b/src/share/vm/opto/vectornode.cpp	Wed Feb 18 18:14:18 2009 -0800
    68.3 @@ -239,7 +239,7 @@
    68.4      return Op_XorV;
    68.5  
    68.6    case Op_LoadB:
    68.7 -  case Op_LoadC:
    68.8 +  case Op_LoadUS:
    68.9    case Op_LoadS:
   68.10    case Op_LoadI:
   68.11    case Op_LoadL:
   68.12 @@ -269,7 +269,7 @@
   68.13      case 16:       return Op_Load16B;
   68.14      }
   68.15      break;
   68.16 -  case Op_LoadC:
   68.17 +  case Op_LoadUS:
   68.18      switch (vlen) {
   68.19      case  2:       return Op_Load2C;
   68.20      case  4:       return Op_Load4C;
    69.1 --- a/src/share/vm/runtime/arguments.cpp	Thu Feb 12 14:00:38 2009 -0800
    69.2 +++ b/src/share/vm/runtime/arguments.cpp	Wed Feb 18 18:14:18 2009 -0800
    69.3 @@ -2489,7 +2489,7 @@
    69.4      vm_args.version = JNI_VERSION_1_2;
    69.5      vm_args.options = options;
    69.6      vm_args.nOptions = i;
    69.7 -    vm_args.ignoreUnrecognized = false;
    69.8 +    vm_args.ignoreUnrecognized = IgnoreUnrecognizedVMOptions;
    69.9  
   69.10      if (PrintVMOptions) {
   69.11        const char* tail;
   69.12 @@ -2536,13 +2536,12 @@
   69.13  
   69.14    // If flag "-XX:Flags=flags-file" is used it will be the first option to be processed.
   69.15    bool settings_file_specified = false;
   69.16 +  const char* flags_file;
   69.17    int index;
   69.18    for (index = 0; index < args->nOptions; index++) {
   69.19      const JavaVMOption *option = args->options + index;
   69.20      if (match_option(option, "-XX:Flags=", &tail)) {
   69.21 -      if (!process_settings_file(tail, true, args->ignoreUnrecognized)) {
   69.22 -        return JNI_EINVAL;
   69.23 -      }
   69.24 +      flags_file = tail;
   69.25        settings_file_specified = true;
   69.26      }
   69.27      if (match_option(option, "-XX:+PrintVMOptions", &tail)) {
   69.28 @@ -2551,6 +2550,24 @@
   69.29      if (match_option(option, "-XX:-PrintVMOptions", &tail)) {
   69.30        PrintVMOptions = false;
   69.31      }
   69.32 +    if (match_option(option, "-XX:+IgnoreUnrecognizedVMOptions", &tail)) {
   69.33 +      IgnoreUnrecognizedVMOptions = true;
   69.34 +    }
   69.35 +    if (match_option(option, "-XX:-IgnoreUnrecognizedVMOptions", &tail)) {
   69.36 +      IgnoreUnrecognizedVMOptions = false;
   69.37 +    }
   69.38 +  }
   69.39 +
   69.40 +  if (IgnoreUnrecognizedVMOptions) {
   69.41 +    // uncast const to modify the flag args->ignoreUnrecognized
   69.42 +    *(jboolean*)(&args->ignoreUnrecognized) = true;
   69.43 +  }
   69.44 +
   69.45 +  // Parse specified settings file
   69.46 +  if (settings_file_specified) {
   69.47 +    if (!process_settings_file(flags_file, true, args->ignoreUnrecognized)) {
   69.48 +      return JNI_EINVAL;
   69.49 +    }
   69.50    }
   69.51  
   69.52    // Parse default .hotspotrc settings file
    70.1 --- a/src/share/vm/runtime/globals.hpp	Thu Feb 12 14:00:38 2009 -0800
    70.2 +++ b/src/share/vm/runtime/globals.hpp	Wed Feb 18 18:14:18 2009 -0800
    70.3 @@ -1426,10 +1426,10 @@
    70.4    develop(bool, CMSOverflowEarlyRestoration, false,                         \
    70.5            "Whether preserved marks should be restored early")               \
    70.6                                                                              \
    70.7 -  product(uintx, CMSMarkStackSize, 32*K,                                    \
    70.8 +  product(uintx, CMSMarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M),           \
    70.9            "Size of CMS marking stack")                                      \
   70.10                                                                              \
   70.11 -  product(uintx, CMSMarkStackSizeMax, 4*M,                                  \
   70.12 +  product(uintx, CMSMarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M),       \
   70.13            "Max size of CMS marking stack")                                  \
   70.14                                                                              \
   70.15    notproduct(bool, CMSMarkStackOverflowALot, false,                         \
   70.16 @@ -1655,6 +1655,13 @@
   70.17    develop(uintx, WorkStealingYieldsBeforeSleep, 1000,                       \
   70.18            "Number of yields before a sleep is done during workstealing")    \
   70.19                                                                              \
   70.20 +  develop(uintx, WorkStealingHardSpins, 4096,                               \
   70.21 +          "Number of iterations in a spin loop between checks on "          \
   70.22 +          "time out of hard spin")                                          \
   70.23 +                                                                            \
   70.24 +  develop(uintx, WorkStealingSpinToYieldRatio, 10,                          \
   70.25 +          "Ratio of hard spins to calls to yield")                          \
   70.26 +                                                                            \
   70.27    product(uintx, PreserveMarkStackSize, 1024,                               \
   70.28             "Size for stack used in promotion failure handling")             \
   70.29                                                                              \
   70.30 @@ -2187,6 +2194,9 @@
   70.31    product(bool, PrintVMOptions, trueInDebug,                                \
   70.32           "print VM flag settings")                                          \
   70.33                                                                              \
   70.34 +  product(bool, IgnoreUnrecognizedVMOptions, false,                         \
   70.35 +         "Ignore unrecognized VM options")                                  \
   70.36 +                                                                            \
   70.37    diagnostic(bool, SerializeVMOutput, true,                                 \
   70.38           "Use a mutex to serialize output to tty and hotspot.log")          \
   70.39                                                                              \
    71.1 --- a/src/share/vm/runtime/os.cpp	Thu Feb 12 14:00:38 2009 -0800
    71.2 +++ b/src/share/vm/runtime/os.cpp	Wed Feb 18 18:14:18 2009 -0800
    71.3 @@ -74,13 +74,11 @@
    71.4    const int milliseconds_after_second =
    71.5      milliseconds_since_19700101 % milliseconds_per_microsecond;
    71.6    // Convert the time value to a tm and timezone variable
    71.7 -  const struct tm *time_struct_temp = localtime(&seconds_since_19700101);
    71.8 -  if (time_struct_temp == NULL) {
    71.9 -    assert(false, "Failed localtime");
   71.10 +  struct tm time_struct;
   71.11 +  if (localtime_pd(&seconds_since_19700101, &time_struct) == NULL) {
   71.12 +    assert(false, "Failed localtime_pd");
   71.13      return NULL;
   71.14    }
   71.15 -  // Save the results of localtime
   71.16 -  const struct tm time_struct = *time_struct_temp;
   71.17    const time_t zone = timezone;
   71.18  
   71.19    // If daylight savings time is in effect,
   71.20 @@ -93,10 +91,10 @@
   71.21      UTC_to_local = UTC_to_local - seconds_per_hour;
   71.22    }
   71.23    // Compute the time zone offset.
   71.24 -  //    localtime(3C) sets timezone to the difference (in seconds)
   71.25 +  //    localtime_pd() sets timezone to the difference (in seconds)
   71.26    //    between UTC and and local time.
   71.27    //    ISO 8601 says we need the difference between local time and UTC,
   71.28 -  //    we change the sign of the localtime(3C) result.
   71.29 +  //    we change the sign of the localtime_pd() result.
   71.30    const time_t local_to_UTC = -(UTC_to_local);
   71.31    // Then we have to figure out if if we are ahead (+) or behind (-) UTC.
   71.32    char sign_local_to_UTC = '+';
    72.1 --- a/src/share/vm/runtime/os.hpp	Thu Feb 12 14:00:38 2009 -0800
    72.2 +++ b/src/share/vm/runtime/os.hpp	Wed Feb 18 18:14:18 2009 -0800
    72.3 @@ -120,7 +120,8 @@
    72.4    // Return current local time in a string (YYYY-MM-DD HH:MM:SS).
    72.5    // It is MT safe, but not async-safe, as reading time zone
    72.6    // information may require a lock on some platforms.
    72.7 -  static char* local_time_string(char *buf, size_t buflen);
    72.8 +  static char*      local_time_string(char *buf, size_t buflen);
    72.9 +  static struct tm* localtime_pd     (const time_t* clock, struct tm*  res);
   72.10    // Fill in buffer with current local time as an ISO-8601 string.
   72.11    // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
   72.12    // Returns buffer, or NULL if it failed.
    73.1 --- a/src/share/vm/utilities/globalDefinitions.hpp	Thu Feb 12 14:00:38 2009 -0800
    73.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp	Wed Feb 18 18:14:18 2009 -0800
    73.3 @@ -1,5 +1,5 @@
    73.4  /*
    73.5 - * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
    73.6 + * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
    73.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.8   *
    73.9   * This code is free software; you can redistribute it and/or modify it
   73.10 @@ -74,6 +74,7 @@
   73.11  extern int BitsPerHeapOop;
   73.12  
   73.13  const int BitsPerJavaInteger = 32;
   73.14 +const int BitsPerJavaLong    = 64;
   73.15  const int BitsPerSize_t      = size_tSize * BitsPerByte;
   73.16  
   73.17  // Size of a char[] needed to represent a jint as a string in decimal.
   73.18 @@ -906,6 +907,14 @@
   73.19    return log2_intptr(x);
   73.20  }
   73.21  
   73.22 +//* the argument must be exactly a power of 2
   73.23 +inline int exact_log2_long(jlong x) {
   73.24 +  #ifdef ASSERT
   73.25 +    if (!is_power_of_2_long(x)) basic_fatal("x must be a power of 2");
   73.26 +  #endif
   73.27 +  return log2_long(x);
   73.28 +}
   73.29 +
   73.30  
   73.31  // returns integer round-up to the nearest multiple of s (s must be a power of two)
   73.32  inline intptr_t round_to(intptr_t x, uintx s) {
    74.1 --- a/src/share/vm/utilities/taskqueue.cpp	Thu Feb 12 14:00:38 2009 -0800
    74.2 +++ b/src/share/vm/utilities/taskqueue.cpp	Wed Feb 18 18:14:18 2009 -0800
    74.3 @@ -25,6 +25,12 @@
    74.4  # include "incls/_precompiled.incl"
    74.5  # include "incls/_taskqueue.cpp.incl"
    74.6  
    74.7 +#ifdef TRACESPINNING
    74.8 +uint ParallelTaskTerminator::_total_yields = 0;
    74.9 +uint ParallelTaskTerminator::_total_spins = 0;
   74.10 +uint ParallelTaskTerminator::_total_peeks = 0;
   74.11 +#endif
   74.12 +
   74.13  bool TaskQueueSuper::peek() {
   74.14    return _bottom != _age.top();
   74.15  }
   74.16 @@ -69,15 +75,62 @@
   74.17  ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
   74.18    Atomic::inc(&_offered_termination);
   74.19  
   74.20 -  juint yield_count = 0;
   74.21 +  uint yield_count = 0;
   74.22 +  // Number of hard spin loops done since last yield
   74.23 +  uint hard_spin_count = 0;
   74.24 +  // Number of iterations in the hard spin loop.
   74.25 +  uint hard_spin_limit = WorkStealingHardSpins;
   74.26 +
   74.27 +  // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
   74.28 +  // If it is greater than 0, then start with a small number
   74.29 +  // of spins and increase number with each turn at spinning until
   74.30 +  // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
   74.31 +  // Then do a yield() call and start spinning afresh.
   74.32 +  if (WorkStealingSpinToYieldRatio > 0) {
   74.33 +    hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
   74.34 +    hard_spin_limit = MAX2(hard_spin_limit, 1U);
   74.35 +  }
   74.36 +  // Remember the initial spin limit.
   74.37 +  uint hard_spin_start = hard_spin_limit;
   74.38 +
   74.39 +  // Loop waiting for all threads to offer termination or
   74.40 +  // more work.
   74.41    while (true) {
   74.42 +    // Are all threads offering termination?
   74.43      if (_offered_termination == _n_threads) {
   74.44 -      //inner_termination_loop();
   74.45        return true;
   74.46      } else {
   74.47 +      // Look for more work.
   74.48 +      // Periodically sleep() instead of yield() to give threads
   74.49 +      // waiting on the cores the chance to grab this code
   74.50        if (yield_count <= WorkStealingYieldsBeforeSleep) {
   74.51 +        // Do a yield or hardspin.  For purposes of deciding whether
   74.52 +        // to sleep, count this as a yield.
   74.53          yield_count++;
   74.54 -        yield();
   74.55 +
   74.56 +        // Periodically call yield() instead spinning
   74.57 +        // After WorkStealingSpinToYieldRatio spins, do a yield() call
   74.58 +        // and reset the counts and starting limit.
   74.59 +        if (hard_spin_count > WorkStealingSpinToYieldRatio) {
   74.60 +          yield();
   74.61 +          hard_spin_count = 0;
   74.62 +          hard_spin_limit = hard_spin_start;
   74.63 +#ifdef TRACESPINNING
   74.64 +          _total_yields++;
   74.65 +#endif
   74.66 +        } else {
   74.67 +          // Hard spin this time
   74.68 +          // Increase the hard spinning period but only up to a limit.
   74.69 +          hard_spin_limit = MIN2(2*hard_spin_limit,
   74.70 +                                 (uint) WorkStealingHardSpins);
   74.71 +          for (uint j = 0; j < hard_spin_limit; j++) {
   74.72 +            SpinPause();
   74.73 +          }
   74.74 +          hard_spin_count++;
   74.75 +#ifdef TRACESPINNING
   74.76 +          _total_spins++;
   74.77 +#endif
   74.78 +        }
   74.79        } else {
   74.80          if (PrintGCDetails && Verbose) {
   74.81           gclog_or_tty->print_cr("ParallelTaskTerminator::offer_termination() "
   74.82 @@ -92,6 +145,9 @@
   74.83          sleep(WorkStealingSleepMillis);
   74.84        }
   74.85  
   74.86 +#ifdef TRACESPINNING
   74.87 +      _total_peeks++;
   74.88 +#endif
   74.89        if (peek_in_queue_set() ||
   74.90            (terminator != NULL && terminator->should_exit_termination())) {
   74.91          Atomic::dec(&_offered_termination);
   74.92 @@ -101,6 +157,16 @@
   74.93    }
   74.94  }
   74.95  
   74.96 +#ifdef TRACESPINNING
   74.97 +void ParallelTaskTerminator::print_termination_counts() {
   74.98 +  gclog_or_tty->print_cr("ParallelTaskTerminator Total yields: %lld  "
   74.99 +    "Total spins: %lld  Total peeks: %lld",
  74.100 +    total_yields(),
  74.101 +    total_spins(),
  74.102 +    total_peeks());
  74.103 +}
  74.104 +#endif
  74.105 +
  74.106  void ParallelTaskTerminator::reset_for_reuse() {
  74.107    if (_offered_termination != 0) {
  74.108      assert(_offered_termination == _n_threads,
    75.1 --- a/src/share/vm/utilities/taskqueue.hpp	Thu Feb 12 14:00:38 2009 -0800
    75.2 +++ b/src/share/vm/utilities/taskqueue.hpp	Wed Feb 18 18:14:18 2009 -0800
    75.3 @@ -22,67 +22,76 @@
    75.4   *
    75.5   */
    75.6  
    75.7 +#ifdef LP64
    75.8 +typedef juint TAG_TYPE;
    75.9 +// for a taskqueue size of 4M
   75.10 +#define LOG_TASKQ_SIZE 22
   75.11 +#else
   75.12 +typedef jushort TAG_TYPE;
   75.13 +// for a taskqueue size of 16K
   75.14 +#define LOG_TASKQ_SIZE 14
   75.15 +#endif
   75.16 +
   75.17  class TaskQueueSuper: public CHeapObj {
   75.18  protected:
   75.19    // The first free element after the last one pushed (mod _n).
   75.20 -  // (For now we'll assume only 32-bit CAS).
   75.21 -  volatile juint _bottom;
   75.22 +  volatile uint _bottom;
   75.23  
   75.24    // log2 of the size of the queue.
   75.25    enum SomeProtectedConstants {
   75.26 -    Log_n = 14
   75.27 +    Log_n = LOG_TASKQ_SIZE
   75.28    };
   75.29 +#undef LOG_TASKQ_SIZE
   75.30  
   75.31    // Size of the queue.
   75.32 -  juint n() { return (1 << Log_n); }
   75.33 +  uint n() { return (1 << Log_n); }
   75.34    // For computing "x mod n" efficiently.
   75.35 -  juint n_mod_mask() { return n() - 1; }
   75.36 +  uint n_mod_mask() { return n() - 1; }
   75.37  
   75.38    struct Age {
   75.39 -    jushort _top;
   75.40 -    jushort _tag;
   75.41 +    TAG_TYPE _top;
   75.42 +    TAG_TYPE _tag;
   75.43  
   75.44 -    jushort tag() const { return _tag; }
   75.45 -    jushort top() const { return _top; }
   75.46 +    TAG_TYPE tag() const { return _tag; }
   75.47 +    TAG_TYPE top() const { return _top; }
   75.48  
   75.49      Age() { _tag = 0; _top = 0; }
   75.50  
   75.51      friend bool operator ==(const Age& a1, const Age& a2) {
   75.52        return a1.tag() == a2.tag() && a1.top() == a2.top();
   75.53      }
   75.54 -
   75.55    };
   75.56    Age _age;
   75.57    // These make sure we do single atomic reads and writes.
   75.58    Age get_age() {
   75.59 -    jint res = *(volatile jint*)(&_age);
   75.60 +    uint res = *(volatile uint*)(&_age);
   75.61      return *(Age*)(&res);
   75.62    }
   75.63    void set_age(Age a) {
   75.64 -    *(volatile jint*)(&_age) = *(int*)(&a);
   75.65 +    *(volatile uint*)(&_age) = *(uint*)(&a);
   75.66    }
   75.67  
   75.68 -  jushort get_top() {
   75.69 +  TAG_TYPE get_top() {
   75.70      return get_age().top();
   75.71    }
   75.72  
   75.73    // These both operate mod _n.
   75.74 -  juint increment_index(juint ind) {
   75.75 +  uint increment_index(uint ind) {
   75.76      return (ind + 1) & n_mod_mask();
   75.77    }
   75.78 -  juint decrement_index(juint ind) {
   75.79 +  uint decrement_index(uint ind) {
   75.80      return (ind - 1) & n_mod_mask();
   75.81    }
   75.82  
   75.83    // Returns a number in the range [0.._n).  If the result is "n-1", it
   75.84    // should be interpreted as 0.
   75.85 -  juint dirty_size(juint bot, juint top) {
   75.86 -    return ((jint)bot - (jint)top) & n_mod_mask();
   75.87 +  uint dirty_size(uint bot, uint top) {
   75.88 +    return ((int)bot - (int)top) & n_mod_mask();
   75.89    }
   75.90  
   75.91    // Returns the size corresponding to the given "bot" and "top".
   75.92 -  juint size(juint bot, juint top) {
   75.93 -    juint sz = dirty_size(bot, top);
   75.94 +  uint size(uint bot, uint top) {
   75.95 +    uint sz = dirty_size(bot, top);
   75.96      // Has the queue "wrapped", so that bottom is less than top?
   75.97      // There's a complicated special case here.  A pair of threads could
   75.98      // perform pop_local and pop_global operations concurrently, starting
   75.99 @@ -94,7 +103,7 @@
  75.100      // owner performs pop_local's, and several concurrent threads
  75.101      // attempting to perform the pop_global will all perform the same CAS,
  75.102      // and only one can succeed.  Any stealing thread that reads after
  75.103 -    // either the increment or decrement will seen an empty queue, and will
  75.104 +    // either the increment or decrement will see an empty queue, and will
  75.105      // not join the competitors.  The "sz == -1 || sz == _n-1" state will
  75.106      // not be modified  by concurrent queues, so the owner thread can reset
  75.107      // the state to _bottom == top so subsequent pushes will be performed
  75.108 @@ -112,11 +121,11 @@
  75.109    // Return an estimate of the number of elements in the queue.
  75.110    // The "careful" version admits the possibility of pop_local/pop_global
  75.111    // races.
  75.112 -  juint size() {
  75.113 +  uint size() {
  75.114      return size(_bottom, get_top());
  75.115    }
  75.116  
  75.117 -  juint dirty_size() {
  75.118 +  uint dirty_size() {
  75.119      return dirty_size(_bottom, get_top());
  75.120    }
  75.121  
  75.122 @@ -127,15 +136,15 @@
  75.123  
  75.124    // Maximum number of elements allowed in the queue.  This is two less
  75.125    // than the actual queue size, for somewhat complicated reasons.
  75.126 -  juint max_elems() { return n() - 2; }
  75.127 +  uint max_elems() { return n() - 2; }
  75.128  
  75.129  };
  75.130  
  75.131  template<class E> class GenericTaskQueue: public TaskQueueSuper {
  75.132  private:
  75.133    // Slow paths for push, pop_local.  (pop_global has no fast path.)
  75.134 -  bool push_slow(E t, juint dirty_n_elems);
  75.135 -  bool pop_local_slow(juint localBot, Age oldAge);
  75.136 +  bool push_slow(E t, uint dirty_n_elems);
  75.137 +  bool pop_local_slow(uint localBot, Age oldAge);
  75.138  
  75.139  public:
  75.140    // Initializes the queue to empty.
  75.141 @@ -170,7 +179,7 @@
  75.142  
  75.143  template<class E>
  75.144  GenericTaskQueue<E>::GenericTaskQueue():TaskQueueSuper() {
  75.145 -  assert(sizeof(Age) == sizeof(jint), "Depends on this.");
  75.146 +  assert(sizeof(Age) == sizeof(int), "Depends on this.");
  75.147  }
  75.148  
  75.149  template<class E>
  75.150 @@ -182,9 +191,9 @@
  75.151  template<class E>
  75.152  void GenericTaskQueue<E>::oops_do(OopClosure* f) {
  75.153    // tty->print_cr("START OopTaskQueue::oops_do");
  75.154 -  int iters = size();
  75.155 -  juint index = _bottom;
  75.156 -  for (int i = 0; i < iters; ++i) {
  75.157 +  uint iters = size();
  75.158 +  uint index = _bottom;
  75.159 +  for (uint i = 0; i < iters; ++i) {
  75.160      index = decrement_index(index);
  75.161      // tty->print_cr("  doing entry %d," INTPTR_T " -> " INTPTR_T,
  75.162      //            index, &_elems[index], _elems[index]);
  75.163 @@ -198,10 +207,10 @@
  75.164  
  75.165  
  75.166  template<class E>
  75.167 -bool GenericTaskQueue<E>::push_slow(E t, juint dirty_n_elems) {
  75.168 +bool GenericTaskQueue<E>::push_slow(E t, uint dirty_n_elems) {
  75.169    if (dirty_n_elems == n() - 1) {
  75.170      // Actually means 0, so do the push.
  75.171 -    juint localBot = _bottom;
  75.172 +    uint localBot = _bottom;
  75.173      _elems[localBot] = t;
  75.174      _bottom = increment_index(localBot);
  75.175      return true;
  75.176 @@ -211,7 +220,7 @@
  75.177  
  75.178  template<class E>
  75.179  bool GenericTaskQueue<E>::
  75.180 -pop_local_slow(juint localBot, Age oldAge) {
  75.181 +pop_local_slow(uint localBot, Age oldAge) {
  75.182    // This queue was observed to contain exactly one element; either this
  75.183    // thread will claim it, or a competing "pop_global".  In either case,
  75.184    // the queue will be logically empty afterwards.  Create a new Age value
  75.185 @@ -230,9 +239,8 @@
  75.186      Age tempAge;
  75.187      // No competing pop_global has yet incremented "top"; we'll try to
  75.188      // install new_age, thus claiming the element.
  75.189 -    assert(sizeof(Age) == sizeof(jint) && sizeof(jint) == sizeof(juint),
  75.190 -           "Assumption about CAS unit.");
  75.191 -    *(jint*)&tempAge = Atomic::cmpxchg(*(jint*)&newAge, (volatile jint*)&_age, *(jint*)&oldAge);
  75.192 +    assert(sizeof(Age) == sizeof(int), "Assumption about CAS unit.");
  75.193 +    *(uint*)&tempAge = Atomic::cmpxchg(*(uint*)&newAge, (volatile uint*)&_age, *(uint*)&oldAge);
  75.194      if (tempAge == oldAge) {
  75.195        // We win.
  75.196        assert(dirty_size(localBot, get_top()) != n() - 1,
  75.197 @@ -253,8 +261,8 @@
  75.198  bool GenericTaskQueue<E>::pop_global(E& t) {
  75.199    Age newAge;
  75.200    Age oldAge = get_age();
  75.201 -  juint localBot = _bottom;
  75.202 -  juint n_elems = size(localBot, oldAge.top());
  75.203 +  uint localBot = _bottom;
  75.204 +  uint n_elems = size(localBot, oldAge.top());
  75.205    if (n_elems == 0) {
  75.206      return false;
  75.207    }
  75.208 @@ -263,7 +271,7 @@
  75.209    newAge._top = increment_index(newAge.top());
  75.210    if ( newAge._top == 0 ) newAge._tag++;
  75.211    Age resAge;
  75.212 -  *(jint*)&resAge = Atomic::cmpxchg(*(jint*)&newAge, (volatile jint*)&_age, *(jint*)&oldAge);
  75.213 +  *(uint*)&resAge = Atomic::cmpxchg(*(uint*)&newAge, (volatile uint*)&_age, *(uint*)&oldAge);
  75.214    // Note that using "_bottom" here might fail, since a pop_local might
  75.215    // have decremented it.
  75.216    assert(dirty_size(localBot, newAge._top) != n() - 1,
  75.217 @@ -287,7 +295,7 @@
  75.218  
  75.219  template<class E> class GenericTaskQueueSet: public TaskQueueSetSuper {
  75.220  private:
  75.221 -  int _n;
  75.222 +  uint _n;
  75.223    GenericTaskQueue<E>** _queues;
  75.224  
  75.225  public:
  75.226 @@ -300,51 +308,51 @@
  75.227      }
  75.228    }
  75.229  
  75.230 -  bool steal_1_random(int queue_num, int* seed, E& t);
  75.231 -  bool steal_best_of_2(int queue_num, int* seed, E& t);
  75.232 -  bool steal_best_of_all(int queue_num, int* seed, E& t);
  75.233 +  bool steal_1_random(uint queue_num, int* seed, E& t);
  75.234 +  bool steal_best_of_2(uint queue_num, int* seed, E& t);
  75.235 +  bool steal_best_of_all(uint queue_num, int* seed, E& t);
  75.236  
  75.237 -  void register_queue(int i, GenericTaskQueue<E>* q);
  75.238 +  void register_queue(uint i, GenericTaskQueue<E>* q);
  75.239  
  75.240 -  GenericTaskQueue<E>* queue(int n);
  75.241 +  GenericTaskQueue<E>* queue(uint n);
  75.242  
  75.243    // The thread with queue number "queue_num" (and whose random number seed
  75.244    // is at "seed") is trying to steal a task from some other queue.  (It
  75.245    // may try several queues, according to some configuration parameter.)
  75.246    // If some steal succeeds, returns "true" and sets "t" the stolen task,
  75.247    // otherwise returns false.
  75.248 -  bool steal(int queue_num, int* seed, E& t);
  75.249 +  bool steal(uint queue_num, int* seed, E& t);
  75.250  
  75.251    bool peek();
  75.252  };
  75.253  
  75.254  template<class E>
  75.255 -void GenericTaskQueueSet<E>::register_queue(int i, GenericTaskQueue<E>* q) {
  75.256 -  assert(0 <= i && i < _n, "index out of range.");
  75.257 +void GenericTaskQueueSet<E>::register_queue(uint i, GenericTaskQueue<E>* q) {
  75.258 +  assert(i < _n, "index out of range.");
  75.259    _queues[i] = q;
  75.260  }
  75.261  
  75.262  template<class E>
  75.263 -GenericTaskQueue<E>* GenericTaskQueueSet<E>::queue(int i) {
  75.264 +GenericTaskQueue<E>* GenericTaskQueueSet<E>::queue(uint i) {
  75.265    return _queues[i];
  75.266  }
  75.267  
  75.268  template<class E>
  75.269 -bool GenericTaskQueueSet<E>::steal(int queue_num, int* seed, E& t) {
  75.270 -  for (int i = 0; i < 2 * _n; i++)
  75.271 +bool GenericTaskQueueSet<E>::steal(uint queue_num, int* seed, E& t) {
  75.272 +  for (uint i = 0; i < 2 * _n; i++)
  75.273      if (steal_best_of_2(queue_num, seed, t))
  75.274        return true;
  75.275    return false;
  75.276  }
  75.277  
  75.278  template<class E>
  75.279 -bool GenericTaskQueueSet<E>::steal_best_of_all(int queue_num, int* seed, E& t) {
  75.280 +bool GenericTaskQueueSet<E>::steal_best_of_all(uint queue_num, int* seed, E& t) {
  75.281    if (_n > 2) {
  75.282      int best_k;
  75.283 -    jint best_sz = 0;
  75.284 -    for (int k = 0; k < _n; k++) {
  75.285 +    uint best_sz = 0;
  75.286 +    for (uint k = 0; k < _n; k++) {
  75.287        if (k == queue_num) continue;
  75.288 -      jint sz = _queues[k]->size();
  75.289 +      uint sz = _queues[k]->size();
  75.290        if (sz > best_sz) {
  75.291          best_sz = sz;
  75.292          best_k = k;
  75.293 @@ -362,9 +370,9 @@
  75.294  }
  75.295  
  75.296  template<class E>
  75.297 -bool GenericTaskQueueSet<E>::steal_1_random(int queue_num, int* seed, E& t) {
  75.298 +bool GenericTaskQueueSet<E>::steal_1_random(uint queue_num, int* seed, E& t) {
  75.299    if (_n > 2) {
  75.300 -    int k = queue_num;
  75.301 +    uint k = queue_num;
  75.302      while (k == queue_num) k = randomParkAndMiller(seed) % _n;
  75.303      return _queues[2]->pop_global(t);
  75.304    } else if (_n == 2) {
  75.305 @@ -378,20 +386,20 @@
  75.306  }
  75.307  
  75.308  template<class E>
  75.309 -bool GenericTaskQueueSet<E>::steal_best_of_2(int queue_num, int* seed, E& t) {
  75.310 +bool GenericTaskQueueSet<E>::steal_best_of_2(uint queue_num, int* seed, E& t) {
  75.311    if (_n > 2) {
  75.312 -    int k1 = queue_num;
  75.313 +    uint k1 = queue_num;
  75.314      while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
  75.315 -    int k2 = queue_num;
  75.316 +    uint k2 = queue_num;
  75.317      while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
  75.318      // Sample both and try the larger.
  75.319 -    juint sz1 = _queues[k1]->size();
  75.320 -    juint sz2 = _queues[k2]->size();
  75.321 +    uint sz1 = _queues[k1]->size();
  75.322 +    uint sz2 = _queues[k2]->size();
  75.323      if (sz2 > sz1) return _queues[k2]->pop_global(t);
  75.324      else return _queues[k1]->pop_global(t);
  75.325    } else if (_n == 2) {
  75.326      // Just try the other one.
  75.327 -    int k = (queue_num + 1) % 2;
  75.328 +    uint k = (queue_num + 1) % 2;
  75.329      return _queues[k]->pop_global(t);
  75.330    } else {
  75.331      assert(_n == 1, "can't be zero.");
  75.332 @@ -402,7 +410,7 @@
  75.333  template<class E>
  75.334  bool GenericTaskQueueSet<E>::peek() {
  75.335    // Try all the queues.
  75.336 -  for (int j = 0; j < _n; j++) {
  75.337 +  for (uint j = 0; j < _n; j++) {
  75.338      if (_queues[j]->peek())
  75.339        return true;
  75.340    }
  75.341 @@ -418,11 +426,19 @@
  75.342  // A class to aid in the termination of a set of parallel tasks using
  75.343  // TaskQueueSet's for work stealing.
  75.344  
  75.345 +#undef TRACESPINNING
  75.346 +
  75.347  class ParallelTaskTerminator: public StackObj {
  75.348  private:
  75.349    int _n_threads;
  75.350    TaskQueueSetSuper* _queue_set;
  75.351 -  jint _offered_termination;
  75.352 +  int _offered_termination;
  75.353 +
  75.354 +#ifdef TRACESPINNING
  75.355 +  static uint _total_yields;
  75.356 +  static uint _total_spins;
  75.357 +  static uint _total_peeks;
  75.358 +#endif
  75.359  
  75.360    bool peek_in_queue_set();
  75.361  protected:
  75.362 @@ -454,13 +470,19 @@
  75.363    // the terminator is finished.
  75.364    void reset_for_reuse();
  75.365  
  75.366 +#ifdef TRACESPINNING
  75.367 +  static uint total_yields() { return _total_yields; }
  75.368 +  static uint total_spins() { return _total_spins; }
  75.369 +  static uint total_peeks() { return _total_peeks; }
  75.370 +  static void print_termination_counts();
  75.371 +#endif
  75.372  };
  75.373  
  75.374  #define SIMPLE_STACK 0
  75.375  
  75.376  template<class E> inline bool GenericTaskQueue<E>::push(E t) {
  75.377  #if SIMPLE_STACK
  75.378 -  juint localBot = _bottom;
  75.379 +  uint localBot = _bottom;
  75.380    if (_bottom < max_elems()) {
  75.381      _elems[localBot] = t;
  75.382      _bottom = localBot + 1;
  75.383 @@ -469,10 +491,10 @@
  75.384      return false;
  75.385    }
  75.386  #else
  75.387 -  juint localBot = _bottom;
  75.388 +  uint localBot = _bottom;
  75.389    assert((localBot >= 0) && (localBot < n()), "_bottom out of range.");
  75.390 -  jushort top = get_top();
  75.391 -  juint dirty_n_elems = dirty_size(localBot, top);
  75.392 +  TAG_TYPE top = get_top();
  75.393 +  uint dirty_n_elems = dirty_size(localBot, top);
  75.394    assert((dirty_n_elems >= 0) && (dirty_n_elems < n()),
  75.395           "n_elems out of range.");
  75.396    if (dirty_n_elems < max_elems()) {
  75.397 @@ -487,19 +509,19 @@
  75.398  
  75.399  template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
  75.400  #if SIMPLE_STACK
  75.401 -  juint localBot = _bottom;
  75.402 +  uint localBot = _bottom;
  75.403    assert(localBot > 0, "precondition.");
  75.404    localBot--;
  75.405    t = _elems[localBot];
  75.406    _bottom = localBot;
  75.407    return true;
  75.408  #else
  75.409 -  juint localBot = _bottom;
  75.410 +  uint localBot = _bottom;
  75.411    // This value cannot be n-1.  That can only occur as a result of
  75.412    // the assignment to bottom in this method.  If it does, this method
  75.413    // resets the size( to 0 before the next call (which is sequential,
  75.414    // since this is pop_local.)
  75.415 -  juint dirty_n_elems = dirty_size(localBot, get_top());
  75.416 +  uint dirty_n_elems = dirty_size(localBot, get_top());
  75.417    assert(dirty_n_elems != n() - 1, "Shouldn't be possible...");
  75.418    if (dirty_n_elems == 0) return false;
  75.419    localBot = decrement_index(localBot);
  75.420 @@ -512,7 +534,7 @@
  75.421    // If there's still at least one element in the queue, based on the
  75.422    // "_bottom" and "age" we've read, then there can be no interference with
  75.423    // a "pop_global" operation, and we're done.
  75.424 -  juint tp = get_top();
  75.425 +  TAG_TYPE tp = get_top();    // XXX
  75.426    if (size(localBot, tp) > 0) {
  75.427      assert(dirty_size(localBot, tp) != n() - 1,
  75.428             "Shouldn't be possible...");
  75.429 @@ -581,7 +603,7 @@
  75.430    bool is_empty();
  75.431    bool stealable_is_empty();
  75.432    bool overflow_is_empty();
  75.433 -  juint stealable_size() { return _region_queue.size(); }
  75.434 +  uint stealable_size() { return _region_queue.size(); }
  75.435    RegionTaskQueue* task_queue() { return &_region_queue; }
  75.436  };
  75.437  
    76.1 --- a/src/share/vm/utilities/workgroup.hpp	Thu Feb 12 14:00:38 2009 -0800
    76.2 +++ b/src/share/vm/utilities/workgroup.hpp	Wed Feb 18 18:14:18 2009 -0800
    76.3 @@ -32,7 +32,7 @@
    76.4  
    76.5  // An abstract task to be worked on by a gang.
    76.6  // You subclass this to supply your own work() method
    76.7 -class AbstractGangTask: public CHeapObj {
    76.8 +class AbstractGangTask VALUE_OBJ_CLASS_SPEC {
    76.9  public:
   76.10    // The abstract work method.
   76.11    // The argument tells you which member of the gang you are.
    77.1 --- a/test/Makefile	Thu Feb 12 14:00:38 2009 -0800
    77.2 +++ b/test/Makefile	Wed Feb 18 18:14:18 2009 -0800
    77.3 @@ -28,9 +28,9 @@
    77.4  
    77.5  # Get OS/ARCH specifics
    77.6  OSNAME = $(shell uname -s)
    77.7 -SLASH_JAVA = /java
    77.8  ifeq ($(OSNAME), SunOS)
    77.9    PLATFORM = solaris
   77.10 +  SLASH_JAVA = /java
   77.11    ARCH = $(shell uname -p)
   77.12    ifeq ($(ARCH), i386)
   77.13      ARCH=i586
   77.14 @@ -38,6 +38,7 @@
   77.15  endif
   77.16  ifeq ($(OSNAME), Linux)
   77.17    PLATFORM = linux
   77.18 +  SLASH_JAVA = /java
   77.19    ARCH = $(shell uname -m)
   77.20    ifeq ($(ARCH), i386)
   77.21      ARCH = i586
   77.22 @@ -62,6 +63,10 @@
   77.23    EXESUFFIX = .exe
   77.24  endif
   77.25  
   77.26 +ifdef ALT_SLASH_JAVA
   77.27 +  SLASH_JAVA = $(ALT_SLASH_JAVA)
   77.28 +endif
   77.29 +
   77.30  # Utilities used
   77.31  CD    = cd
   77.32  CP    = cp
    78.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    78.2 +++ b/test/compiler/6603011/Test.java	Wed Feb 18 18:14:18 2009 -0800
    78.3 @@ -0,0 +1,220 @@
    78.4 +/*
    78.5 + * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
    78.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    78.7 + *
    78.8 + * This code is free software; you can redistribute it and/or modify it
    78.9 + * under the terms of the GNU General Public License version 2 only, as
   78.10 + * published by the Free Software Foundation.
   78.11 + *
   78.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   78.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   78.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   78.15 + * version 2 for more details (a copy is included in the LICENSE file that
   78.16 + * accompanied this code).
   78.17 + *
   78.18 + * You should have received a copy of the GNU General Public License version
   78.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   78.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   78.21 + *
   78.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
   78.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
   78.24 + * have any questions.
   78.25 + */
   78.26 +
   78.27 +/**
   78.28 + * @test
   78.29 + * @bug 6603011
   78.30 + * @summary long/int division by constant
   78.31 + *
   78.32 + * @run main/othervm -Xcomp -Xbatch -XX:-Inline Test
   78.33 + */
   78.34 +
   78.35 +//
   78.36 +// -XX:-Inline is essential to this test so that verification functions
   78.37 +//   divi, modi, divl and modl generate "plain" divides.
   78.38 +// -Xcomp -Xbatch are also useful to ensure the full range of
   78.39 +//   dividend and divisor combinations are tested
   78.40 +//
   78.41 +
   78.42 +import java.net.*;
   78.43 +
   78.44 +class s {
   78.45 +  static int  divi(int  dividend, int  divisor) { return dividend / divisor; }
   78.46 +  static int  modi(int  dividend, int  divisor) { return dividend % divisor; }
   78.47 +  static long divl(long dividend, long divisor) { return dividend / divisor; }
   78.48 +  static long modl(long dividend, long divisor) { return dividend % divisor; }
   78.49 +}
   78.50 +
   78.51 +public class Test implements Runnable {
   78.52 +  // Report verbose messages on failure; turn off to suppress
   78.53 +  // too much output with gross numbers of failures.
   78.54 +  static final boolean VERBOSE = true;
   78.55 +
   78.56 +  // Initailize DIVISOR so that it is final in this class.
   78.57 +  static final int DIVISOR;
   78.58 +  static {
   78.59 +    int value = 0;
   78.60 +    try {
   78.61 +      value = Integer.decode(System.getProperty("divisor"));
   78.62 +    } catch (Throwable e) {
   78.63 +    }
   78.64 +    DIVISOR = value;
   78.65 +  }
   78.66 +
   78.67 +  // The methods of interest. We want the JIT to compile these
   78.68 +  // and convert the divide into a multiply.
   78.69 +  public int divbyI (int dividend)   { return dividend / DIVISOR; }
   78.70 +  public int modbyI (int dividend)   { return dividend % DIVISOR; }
   78.71 +  public long divbyL (long dividend) { return dividend / DIVISOR; }
   78.72 +  public long modbyL (long dividend) { return dividend % DIVISOR; }
   78.73 +
   78.74 +  public int divisor() { return DIVISOR; }
   78.75 +
   78.76 +  public boolean checkI (int dividend) {
   78.77 +    int quo = divbyI(dividend);
   78.78 +    int rem = modbyI(dividend);
   78.79 +    int quo0 = s.divi(dividend, divisor());
   78.80 +    int rem0 = s.modi(dividend, divisor());
   78.81 +
   78.82 +    if (quo != quo0 || rem != rem0) {
   78.83 +      if (VERBOSE) {
   78.84 +        System.out.println("Computed: " + dividend + " / " + divisor() + " = " +
   78.85 +                           quo  + ", " + dividend + " % " + divisor() + " = " + rem );
   78.86 +        System.out.println("expected: " + dividend + " / " + divisor() + " = " +
   78.87 +                           quo0 + ", " + dividend + " % " + divisor() + " = " + rem0);
   78.88 +        // Report sign of rem failure
   78.89 +        if (rem != 0 && (rem ^ dividend) < 0) {
   78.90 +          System.out.println("  rem & dividend have different signs");
   78.91 +        }
   78.92 +        // Report range of rem failure
   78.93 +        if (java.lang.Math.abs(rem) >= java.lang.Math.abs(divisor())) {
   78.94 +          System.out.println("  remainder out of range");
   78.95 +        }
   78.96 +        // Report quo/rem identity relationship failure
   78.97 +        if ((quo * divisor()) + rem != dividend) {
   78.98 +          System.out.println("  quotien/remainder invariant broken");
   78.99 +        }
  78.100 +      }
  78.101 +      return false;
  78.102 +    }
  78.103 +    return true;
  78.104 +  }
  78.105 +
  78.106 +  public boolean checkL (long dividend) {
  78.107 +    long quo = divbyL(dividend);
  78.108 +    long rem = modbyL(dividend);
  78.109 +    long quo0 = s.divl(dividend, divisor());
  78.110 +    long rem0 = s.modl(dividend, divisor());
  78.111 +
  78.112 +    if (quo != quo0 || rem != rem0) {
  78.113 +      if (VERBOSE) {
  78.114 +        System.out.println("  " + dividend + " / " + divisor() + " = " +
  78.115 +                           quo + ", " + dividend + " % " + divisor() + " = " + rem);
  78.116 +        // Report sign of rem failure
  78.117 +        if (rem != 0 && (rem ^ dividend) < 0) {
  78.118 +          System.out.println("  rem & dividend have different signs");
  78.119 +        }
  78.120 +        // Report range of rem failure
  78.121 +        if (java.lang.Math.abs(rem) >= java.lang.Math.abs(divisor())) {
  78.122 +          System.out.println("  remainder out of range");
  78.123 +        }
  78.124 +        // Report quo/rem identity relationship failure
  78.125 +        if ((quo * divisor()) + rem != dividend) {
  78.126 +          System.out.println(" (" + quo + " * " + divisor() + ") + " + rem + " != "
  78.127 +                             + dividend);
  78.128 +        }
  78.129 +      }
  78.130 +      return false;
  78.131 +    }
  78.132 +    return true;
  78.133 +  }
  78.134 +
  78.135 +  public void run() {
  78.136 +    // Don't try to divide by zero
  78.137 +    if (divisor() == 0) return;
  78.138 +
  78.139 +    // Range of dividends to check. Try dividends from start to end
  78.140 +    // inclusive, as well as variations on those values as shifted
  78.141 +    // left.
  78.142 +    int start = -1024;
  78.143 +    int end = 1024;
  78.144 +
  78.145 +    // Test int division using a variety of dividends.
  78.146 +    int wrong = 0;
  78.147 +    int total = 0;
  78.148 +
  78.149 +    outerloop:
  78.150 +    for (int i = start; i <= end; i++) {
  78.151 +      for (int s = 0; s < 32; s += 4) {
  78.152 +        total++;
  78.153 +        int dividend = i << s;
  78.154 +        if (!checkI(dividend)) {
  78.155 +          wrong++;
  78.156 +          // Stop on the first failure
  78.157 +          // break outerloop;
  78.158 +        }
  78.159 +      }
  78.160 +    }
  78.161 +    if (wrong > 0) {
  78.162 +      System.out.println("divisor " + divisor() + ": " +
  78.163 +                         wrong + "/" + total + " wrong int divisions");
  78.164 +    }
  78.165 +
  78.166 +    // Test long division using a variety of dividends.
  78.167 +    wrong = 0;
  78.168 +    total = 0;
  78.169 +
  78.170 +    outerloop:
  78.171 +    for (int i = start; i <= end; i++) {
  78.172 +      for (int s = 0; s < 64; s += 4) {
  78.173 +        total++;
  78.174 +        long dividend = i << s;
  78.175 +        if (!checkL(dividend)) {
  78.176 +          wrong++;
  78.177 +          // Stop on the first failure
  78.178 +          // break outerloop;
  78.179 +        }
  78.180 +      }
  78.181 +    }
  78.182 +    if (wrong > 0) {
  78.183 +      System.out.println("divisor " + divisor() + ": " +
  78.184 +                         wrong + "/" + total + " wrong long divisions");
  78.185 +    }
  78.186 +
  78.187 +  }
  78.188 +
  78.189 +  // Reload this class with the "divisor" property set to the input parameter.
  78.190 +  // This allows the JIT to see q.DIVISOR as a final constant, and change
  78.191 +  // any divisions or mod operations into multiplies.
  78.192 +  public static void test_divisor(int divisor,
  78.193 +                                  URLClassLoader apploader) throws Exception {
  78.194 +    System.setProperty("divisor", "" + divisor);
  78.195 +    ClassLoader loader = new URLClassLoader(apploader.getURLs(),
  78.196 +                                            apploader.getParent());
  78.197 +    Class c = loader.loadClass("Test");
  78.198 +    Runnable r = (Runnable)c.newInstance();
  78.199 +    r.run();
  78.200 +  }
  78.201 +
  78.202 +  public static void main(String[] args) throws Exception {
  78.203 +    Class cl = Class.forName("Test");
  78.204 +    URLClassLoader apploader = (URLClassLoader)cl.getClassLoader();
  78.205 +
  78.206 +
  78.207 +    // Test every divisor between -100 and 100.
  78.208 +    for (int i = -100; i <= 100; i++) {
  78.209 +      test_divisor(i, apploader);
  78.210 +    }
  78.211 +
  78.212 +    // Try a few divisors outside the typical range.
  78.213 +    // The values below have been observed in rt.jar.
  78.214 +    test_divisor(101, apploader);
  78.215 +    test_divisor(400, apploader);
  78.216 +    test_divisor(1000, apploader);
  78.217 +    test_divisor(3600, apploader);
  78.218 +    test_divisor(9973, apploader);
  78.219 +    test_divisor(86400, apploader);
  78.220 +    test_divisor(1000000, apploader);
  78.221 +  }
  78.222 +
  78.223 +}
    79.1 --- a/test/compiler/6775880/Test.java	Thu Feb 12 14:00:38 2009 -0800
    79.2 +++ b/test/compiler/6775880/Test.java	Wed Feb 18 18:14:18 2009 -0800
    79.3 @@ -27,7 +27,7 @@
    79.4   * @bug 6775880
    79.5   * @summary EA +DeoptimizeALot: assert(mon_info->owner()->is_locked(),"object must be locked now")
    79.6   * @compile -source 1.4 -target 1.4 Test.java
    79.7 - * @run main/othervm -server -Xbatch -XX:+DoEscapeAnalysis -XX:+DeoptimizeALot -XX:CompileCommand=exclude,java.lang.AbstractStringBuilder::append Test
    79.8 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+DoEscapeAnalysis -XX:+DeoptimizeALot -XX:CompileCommand=exclude,java.lang.AbstractStringBuilder::append Test
    79.9   */
   79.10  
   79.11  public class Test {
    80.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    80.2 +++ b/test/compiler/6795161/Test.java	Wed Feb 18 18:14:18 2009 -0800
    80.3 @@ -0,0 +1,60 @@
    80.4 +/*
    80.5 + * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
    80.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    80.7 + *
    80.8 + * This code is free software; you can redistribute it and/or modify it
    80.9 + * under the terms of the GNU General Public License version 2 only, as
   80.10 + * published by the Free Software Foundation.
   80.11 + *
   80.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   80.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   80.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   80.15 + * version 2 for more details (a copy is included in the LICENSE file that
   80.16 + * accompanied this code).
   80.17 + *
   80.18 + * You should have received a copy of the GNU General Public License version
   80.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   80.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   80.21 + *
   80.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
   80.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
   80.24 + * have any questions.
   80.25 + *
   80.26 + */
   80.27 +
   80.28 +/*
   80.29 + * @test
   80.30 + * @bug 6795161
   80.31 + * @summary Escape analysis leads to data corruption
   80.32 + * @run main/othervm -server -Xcomp -XX:CompileOnly=Test -XX:+DoEscapeAnalysis Test
   80.33 + */
   80.34 +
   80.35 +class Test_Class_1 {
   80.36 +    static String var_1;
   80.37 +
   80.38 +    static void badFunc(int size)
   80.39 +    {
   80.40 +        try {
   80.41 +          for (int i = 0; i < 1; (new byte[size-i])[0] = 0, i++) {}
   80.42 +        } catch (Exception e) {
   80.43 +          // don't comment it out, it will lead to correct results ;)
   80.44 +          //System.out.println("Got exception: " + e);
   80.45 +        }
   80.46 +    }
   80.47 +}
   80.48 +
   80.49 +public class Test {
   80.50 +    static String var_1_copy = Test_Class_1.var_1;
   80.51 +
   80.52 +    static byte var_check;
   80.53 +
   80.54 +    public static void main(String[] args)
   80.55 +    {
   80.56 +        var_check = 1;
   80.57 +
   80.58 +        Test_Class_1.badFunc(-1);
   80.59 +
   80.60 +        System.out.println("EATester.var_check = " + Test.var_check + " (expected 1)\n");
   80.61 +    }
   80.62 +}
   80.63 +
    81.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    81.2 +++ b/test/compiler/6795362/Test6795362.java	Wed Feb 18 18:14:18 2009 -0800
    81.3 @@ -0,0 +1,48 @@
    81.4 +/*
    81.5 + * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
    81.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    81.7 + *
    81.8 + * This code is free software; you can redistribute it and/or modify it
    81.9 + * under the terms of the GNU General Public License version 2 only, as
   81.10 + * published by the Free Software Foundation.
   81.11 + *
   81.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   81.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   81.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   81.15 + * version 2 for more details (a copy is included in the LICENSE file that
   81.16 + * accompanied this code).
   81.17 + *
   81.18 + * You should have received a copy of the GNU General Public License version
   81.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   81.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   81.21 + *
   81.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
   81.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
   81.24 + * have any questions.
   81.25 + */
   81.26 +
   81.27 +/**
   81.28 + * @test
   81.29 + * @bug 6795362
   81.30 + * @summary 32bit server compiler leads to wrong results on solaris-x86
   81.31 + *
   81.32 + * @run main/othervm -Xcomp -XX:CompileOnly=Test6795362.sub Test6795362
   81.33 + */
   81.34 +
   81.35 +public class Test6795362 {
   81.36 +    public static void main(String[] args)
   81.37 +    {
   81.38 +        sub();
   81.39 +
   81.40 +        if (var_bad != 0)
   81.41 +            throw new InternalError(var_bad + " != 0");
   81.42 +    }
   81.43 +
   81.44 +    static long var_bad = -1L;
   81.45 +
   81.46 +    static void sub()
   81.47 +    {
   81.48 +        var_bad >>= 65;
   81.49 +        var_bad /= 65;
   81.50 +    }
   81.51 +}
    82.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    82.2 +++ b/test/compiler/6799693/Test.java	Wed Feb 18 18:14:18 2009 -0800
    82.3 @@ -0,0 +1,47 @@
    82.4 +/*
    82.5 + * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
    82.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    82.7 + *
    82.8 + * This code is free software; you can redistribute it and/or modify it
    82.9 + * under the terms of the GNU General Public License version 2 only, as
   82.10 + * published by the Free Software Foundation.
   82.11 + *
   82.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   82.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   82.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   82.15 + * version 2 for more details (a copy is included in the LICENSE file that
   82.16 + * accompanied this code).
   82.17 + *
   82.18 + * You should have received a copy of the GNU General Public License version
   82.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   82.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   82.21 + *
   82.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
   82.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
   82.24 + * have any questions.
   82.25 + *
   82.26 + */
   82.27 +
   82.28 +/*
   82.29 + * @test
   82.30 + * @bug 6799693
   82.31 + * @summary Server compiler leads to data corruption when expression throws an Exception
   82.32 + * @run main/othervm -Xcomp -XX:CompileOnly=Test Test
   82.33 + */
   82.34 +
   82.35 +public class Test {
   82.36 +   static int var_bad = 1;
   82.37 +
   82.38 +   public static void main(String[] args)
   82.39 +   {
   82.40 +      var_bad++;
   82.41 +
   82.42 +      try {
   82.43 +         for (int i = 0; i < 10; i++) (new byte[((byte)-1 << i)])[0]  = 0;
   82.44 +      }
   82.45 +      catch (Exception e) { System.out.println("Got " + e); }
   82.46 +
   82.47 +      System.out.println("Test.var_bad = " +  var_bad + " (expected 2)\n");
   82.48 +   }
   82.49 +}
   82.50 +
    83.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    83.2 +++ b/test/compiler/6800154/Test6800154.java	Wed Feb 18 18:14:18 2009 -0800
    83.3 @@ -0,0 +1,109 @@
    83.4 +/*
    83.5 + * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
    83.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    83.7 + *
    83.8 + * This code is free software; you can redistribute it and/or modify it
    83.9 + * under the terms of the GNU General Public License version 2 only, as
   83.10 + * published by the Free Software Foundation.
   83.11 + *
   83.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   83.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   83.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   83.15 + * version 2 for more details (a copy is included in the LICENSE file that
   83.16 + * accompanied this code).
   83.17 + *
   83.18 + * You should have received a copy of the GNU General Public License version
   83.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   83.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   83.21 + *
   83.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
   83.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
   83.24 + * have any questions.
   83.25 + */
   83.26 +
   83.27 +/**
   83.28 + * @test
   83.29 + * @bug 6800154
   83.30 + * @summary Add comments to long_by_long_mulhi() for better understandability
   83.31 + *
   83.32 + * @run main/othervm -Xcomp -XX:CompileOnly=Test6800154.divcomp Test6800154
   83.33 + */
   83.34 +
   83.35 +import java.net.URLClassLoader;
   83.36 +
   83.37 +public class Test6800154 implements Runnable {
   83.38 +    static final long[] DIVIDENDS = {
   83.39 +        0,
   83.40 +        1,
   83.41 +        2,
   83.42 +        1423487,
   83.43 +        4444441,
   83.44 +        4918923241323L,
   83.45 +        -1,
   83.46 +        -24351,
   83.47 +        0x3333,
   83.48 +        0x0000000080000000L,
   83.49 +        0x7fffffffffffffffL,
   83.50 +        0x8000000000000000L
   83.51 +    };
   83.52 +
   83.53 +    static final long[] DIVISORS = {
   83.54 +        1,
   83.55 +        2,
   83.56 +        17,
   83.57 +        12342,
   83.58 +        24123,
   83.59 +        143444,
   83.60 +        123444442344L,
   83.61 +        -1,
   83.62 +        -2,
   83.63 +        -4423423234231423L,
   83.64 +        0x0000000080000000L,
   83.65 +        0x7fffffffffffffffL,
   83.66 +        0x8000000000000000L
   83.67 +    };
   83.68 +
   83.69 +    // Initialize DIVISOR so that it is final in this class.
   83.70 +    static final long DIVISOR;
   83.71 +
   83.72 +    static {
   83.73 +        long value = 0;
   83.74 +        try {
   83.75 +            value = Long.decode(System.getProperty("divisor"));
   83.76 +        } catch (Throwable e) {
   83.77 +        }
   83.78 +        DIVISOR = value;
   83.79 +    }
   83.80 +
   83.81 +    public static void main(String[] args) throws Exception
   83.82 +    {
   83.83 +        Class cl = Class.forName("Test6800154");
   83.84 +        URLClassLoader apploader = (URLClassLoader) cl.getClassLoader();
   83.85 +
   83.86 +        // Iterate over all divisors.
   83.87 +        for (int i = 0; i < DIVISORS.length; i++) {
   83.88 +            System.setProperty("divisor", "" + DIVISORS[i]);
   83.89 +            ClassLoader loader = new URLClassLoader(apploader.getURLs(), apploader.getParent());
   83.90 +            Class c = loader.loadClass("Test6800154");
   83.91 +            Runnable r = (Runnable) c.newInstance();
   83.92 +            r.run();
   83.93 +        }
   83.94 +    }
   83.95 +
   83.96 +    public void run()
   83.97 +    {
   83.98 +        // Iterate over all dividends.
   83.99 +        for (int i = 0; i < DIVIDENDS.length; i++) {
  83.100 +            long dividend = DIVIDENDS[i];
  83.101 +
  83.102 +            long expected = divint(dividend);
  83.103 +            long result = divcomp(dividend);
  83.104 +
  83.105 +            if (result != expected)
  83.106 +                throw new InternalError(dividend + " / " + DIVISOR + " failed: " + result + " != " + expected);
  83.107 +        }
  83.108 +    }
  83.109 +
  83.110 +    static long divint(long a)  { return a / DIVISOR; }
  83.111 +    static long divcomp(long a) { return a / DIVISOR; }
  83.112 +}
    84.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    84.2 +++ b/test/compiler/6805724/Test6805724.java	Wed Feb 18 18:14:18 2009 -0800
    84.3 @@ -0,0 +1,80 @@
    84.4 +/*
    84.5 + * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
    84.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    84.7 + *
    84.8 + * This code is free software; you can redistribute it and/or modify it
    84.9 + * under the terms of the GNU General Public License version 2 only, as
   84.10 + * published by the Free Software Foundation.
   84.11 + *
   84.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   84.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   84.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   84.15 + * version 2 for more details (a copy is included in the LICENSE file that
   84.16 + * accompanied this code).
   84.17 + *
   84.18 + * You should have received a copy of the GNU General Public License version
   84.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   84.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   84.21 + *
   84.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
   84.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
   84.24 + * have any questions.
   84.25 + */
   84.26 +
   84.27 +/**
   84.28 + * @test
   84.29 + * @bug 6805724
   84.30 + * @summary ModLNode::Ideal() generates functionally incorrect graph when divisor is any (2^k-1) constant.
   84.31 + *
   84.32 + * @run main/othervm -Xcomp -XX:CompileOnly=Test6805724.fcomp Test6805724
   84.33 + */
   84.34 +
   84.35 +import java.net.URLClassLoader;
   84.36 +
   84.37 +public class Test6805724 implements Runnable {
   84.38 +    // Initialize DIVISOR so that it is final in this class.
   84.39 +    static final long DIVISOR;  // 2^k-1 constant
   84.40 +
   84.41 +    static {
   84.42 +        long value = 0;
   84.43 +        try {
   84.44 +            value = Long.decode(System.getProperty("divisor"));
   84.45 +        } catch (Throwable t) {
   84.46 +            // This one is required for the Class.forName() in main.
   84.47 +        }
   84.48 +        DIVISOR = value;
   84.49 +    }
   84.50 +
   84.51 +    static long fint(long x) {
   84.52 +        return x % DIVISOR;
   84.53 +    }
   84.54 +
   84.55 +    static long fcomp(long x) {
   84.56 +        return x % DIVISOR;
   84.57 +    }
   84.58 +
   84.59 +    public void run() {
   84.60 +        long a = 0x617981E1L;
   84.61 +
   84.62 +        long expected = fint(a);
   84.63 +        long result = fcomp(a);
   84.64 +
   84.65 +        if (result != expected)
   84.66 +            throw new InternalError(result + " != " + expected);
   84.67 +    }
   84.68 +
   84.69 +    public static void main(String args[]) throws Exception {
   84.70 +        Class cl = Class.forName("Test6805724");
   84.71 +        URLClassLoader apploader = (URLClassLoader) cl.getClassLoader();
   84.72 +
   84.73 +        // Iterate over all 2^k-1 divisors.
   84.74 +        for (int k = 1; k < Long.SIZE; k++) {
   84.75 +            long divisor = (1L << k) - 1;
   84.76 +            System.setProperty("divisor", "" + divisor);
   84.77 +            ClassLoader loader = new URLClassLoader(apploader.getURLs(), apploader.getParent());
   84.78 +            Class c = loader.loadClass("Test6805724");
   84.79 +            Runnable r = (Runnable) c.newInstance();
   84.80 +            r.run();
   84.81 +        }
   84.82 +    }
   84.83 +}

mercurial