Merge hs24-b08

Fri, 20 Apr 2012 16:23:48 -0700

author
amurillo
date
Fri, 20 Apr 2012 16:23:48 -0700
changeset 3716
dff6e3459210
parent 3696
dce0525b7ee5
parent 3715
f3f101a5e59b
child 3717
50b4400ca1ec

Merge

     1.1 --- a/agent/src/os/linux/ps_core.c	Thu Apr 19 12:18:46 2012 -0700
     1.2 +++ b/agent/src/os/linux/ps_core.c	Fri Apr 20 16:23:48 2012 -0700
     1.3 @@ -440,7 +440,7 @@
     1.4        int j = 0;
     1.5        print_debug("---- sorted virtual address map ----\n");
     1.6        for (j = 0; j < ph->core->num_maps; j++) {
     1.7 -        print_debug("base = 0x%lx\tsize = %d\n", ph->core->map_array[j]->vaddr,
     1.8 +        print_debug("base = 0x%lx\tsize = %zd\n", ph->core->map_array[j]->vaddr,
     1.9                                           ph->core->map_array[j]->memsz);
    1.10        }
    1.11     }
     2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Thu Apr 19 12:18:46 2012 -0700
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Fri Apr 20 16:23:48 2012 -0700
     2.3 @@ -1,5 +1,5 @@
     2.4  /*
     2.5 - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
     2.6 + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
     2.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.8   *
     2.9   * This code is free software; you can redistribute it and/or modify it
    2.10 @@ -42,7 +42,7 @@
    2.11  public class HeapRegionSeq extends VMObject {
    2.12      // HeapRegion** _regions;
    2.13      static private AddressField regionsField;
    2.14 -    // size_t _length;
    2.15 +    // uint _length;
    2.16      static private CIntegerField lengthField;
    2.17  
    2.18      static {
     3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java	Thu Apr 19 12:18:46 2012 -0700
     3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java	Fri Apr 20 16:23:48 2012 -0700
     3.3 @@ -40,9 +40,9 @@
     3.4  // Mirror class for HeapRegionSetBase. Represents a group of regions.
     3.5  
     3.6  public class HeapRegionSetBase extends VMObject {
     3.7 -    // size_t _length;
     3.8 +    // uint _length;
     3.9      static private CIntegerField lengthField;
    3.10 -    // size_t _region_num;
    3.11 +    // uint _region_num;
    3.12      static private CIntegerField regionNumField;
    3.13      // size_t _total_used_bytes;
    3.14      static private CIntegerField totalUsedBytesField;
     4.1 --- a/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java	Thu Apr 19 12:18:46 2012 -0700
     4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java	Fri Apr 20 16:23:48 2012 -0700
     4.3 @@ -1,5 +1,5 @@
     4.4  /*
     4.5 - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
     4.6 + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
     4.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4.8   *
     4.9   * This code is free software; you can redistribute it and/or modify it
    4.10 @@ -889,15 +889,9 @@
    4.11          Klass kls = ((ReferenceTypeImpl)type).ref();
    4.12          if (kls instanceof InstanceKlass) {
    4.13              InstanceKlass ik = (InstanceKlass) kls;
    4.14 -            if (ik.isInterface()) {
    4.15 -                if (ik.nofImplementors() == 0L) {
    4.16 -                    return new ArrayList(0);
    4.17 -                }
    4.18 -            } else {
    4.19 -                // if the Klass is final or if there are no subklasses loaded yet
    4.20 -                if (ik.getAccessFlagsObj().isFinal() || ik.getSubklassKlass() == null) {
    4.21 -                    includeSubtypes = false;
    4.22 -                }
    4.23 +            // if the Klass is final or if there are no subklasses loaded yet
    4.24 +            if (ik.getAccessFlagsObj().isFinal() || ik.getSubklassKlass() == null) {
    4.25 +                includeSubtypes = false;
    4.26              }
    4.27          } else {
    4.28              // no subtypes for primitive array types
     5.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Thu Apr 19 12:18:46 2012 -0700
     5.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Fri Apr 20 16:23:48 2012 -0700
     5.3 @@ -1,5 +1,5 @@
     5.4  /*
     5.5 - * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
     5.6 + * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
     5.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5.8   *
     5.9   * This code is free software; you can redistribute it and/or modify it
    5.10 @@ -52,7 +52,6 @@
    5.11    private static int HIGH_OFFSET;
    5.12    private static int GENERIC_SIGNATURE_INDEX_OFFSET;
    5.13    private static int FIELD_SLOTS;
    5.14 -  public static int IMPLEMENTORS_LIMIT;
    5.15  
    5.16    // ClassState constants
    5.17    private static int CLASS_STATE_UNPARSABLE_BY_GC;
    5.18 @@ -70,13 +69,6 @@
    5.19      methodOrdering       = new OopField(type.getOopField("_method_ordering"), Oop.getHeaderSize());
    5.20      localInterfaces      = new OopField(type.getOopField("_local_interfaces"), Oop.getHeaderSize());
    5.21      transitiveInterfaces = new OopField(type.getOopField("_transitive_interfaces"), Oop.getHeaderSize());
    5.22 -    nofImplementors      = new CIntField(type.getCIntegerField("_nof_implementors"), Oop.getHeaderSize());
    5.23 -    IMPLEMENTORS_LIMIT   = db.lookupIntConstant("instanceKlass::implementors_limit").intValue();
    5.24 -    implementors         = new OopField[IMPLEMENTORS_LIMIT];
    5.25 -    for (int i = 0; i < IMPLEMENTORS_LIMIT; i++) {
    5.26 -      long arrayOffset = Oop.getHeaderSize() + (i * db.getAddressSize());
    5.27 -      implementors[i]    = new OopField(type.getOopField("_implementors[0]"), arrayOffset);
    5.28 -    }
    5.29      fields               = new OopField(type.getOopField("_fields"), Oop.getHeaderSize());
    5.30      javaFieldsCount      = new CIntField(type.getCIntegerField("_java_fields_count"), Oop.getHeaderSize());
    5.31      constants            = new OopField(type.getOopField("_constants"), Oop.getHeaderSize());
    5.32 @@ -136,8 +128,6 @@
    5.33    private static OopField  methodOrdering;
    5.34    private static OopField  localInterfaces;
    5.35    private static OopField  transitiveInterfaces;
    5.36 -  private static CIntField nofImplementors;
    5.37 -  private static OopField[] implementors;
    5.38    private static OopField  fields;
    5.39    private static CIntField javaFieldsCount;
    5.40    private static OopField  constants;
    5.41 @@ -317,9 +307,6 @@
    5.42    public TypeArray getMethodOrdering()      { return (TypeArray)    methodOrdering.getValue(this); }
    5.43    public ObjArray  getLocalInterfaces()     { return (ObjArray)     localInterfaces.getValue(this); }
    5.44    public ObjArray  getTransitiveInterfaces() { return (ObjArray)     transitiveInterfaces.getValue(this); }
    5.45 -  public long      nofImplementors()        { return                nofImplementors.getValue(this); }
    5.46 -  public Klass     getImplementor()         { return (Klass)        implementors[0].getValue(this); }
    5.47 -  public Klass     getImplementor(int i)    { return (Klass)        implementors[i].getValue(this); }
    5.48    public TypeArray getFields()              { return (TypeArray)    fields.getValue(this); }
    5.49    public int       getJavaFieldsCount()     { return                (int) javaFieldsCount.getValue(this); }
    5.50    public int       getAllFieldsCount()      { return                (int)getFields().getLength() / FIELD_SLOTS; }
    5.51 @@ -527,9 +514,6 @@
    5.52        visitor.doOop(methodOrdering, true);
    5.53        visitor.doOop(localInterfaces, true);
    5.54        visitor.doOop(transitiveInterfaces, true);
    5.55 -      visitor.doCInt(nofImplementors, true);
    5.56 -      for (int i = 0; i < IMPLEMENTORS_LIMIT; i++)
    5.57 -        visitor.doOop(implementors[i], true);
    5.58        visitor.doOop(fields, true);
    5.59        visitor.doOop(constants, true);
    5.60        visitor.doOop(classLoader, true);
     6.1 --- a/make/hotspot_version	Thu Apr 19 12:18:46 2012 -0700
     6.2 +++ b/make/hotspot_version	Fri Apr 20 16:23:48 2012 -0700
     6.3 @@ -35,7 +35,7 @@
     6.4  
     6.5  HS_MAJOR_VER=24
     6.6  HS_MINOR_VER=0
     6.7 -HS_BUILD_NUMBER=07
     6.8 +HS_BUILD_NUMBER=08
     6.9  
    6.10  JDK_MAJOR_VER=1
    6.11  JDK_MINOR_VER=8
     7.1 --- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Apr 19 12:18:46 2012 -0700
     7.2 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Apr 20 16:23:48 2012 -0700
     7.3 @@ -2651,56 +2651,49 @@
     7.4      // Check to see if a field modification watch has been set before we take
     7.5      // the time to call into the VM.
     7.6      Label L2;
     7.7 -    __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
     7.8 -    __ testl(rcx,rcx);
     7.9 -    __ jcc(Assembler::zero, L2);
    7.10 -    __ pop_ptr(rbx);               // copy the object pointer from tos
    7.11 -    __ verify_oop(rbx);
    7.12 -    __ push_ptr(rbx);              // put the object pointer back on tos
    7.13 -    __ subptr(rsp, sizeof(jvalue));  // add space for a jvalue object
    7.14 -    __ mov(rcx, rsp);
    7.15 -    __ push_ptr(rbx);                 // save object pointer so we can steal rbx,
    7.16 -    __ xorptr(rbx, rbx);
    7.17 -    const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
    7.18 -    const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
    7.19 -    switch (bytecode()) {          // load values into the jvalue object
    7.20 -    case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
    7.21 -    case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
    7.22 -    case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
    7.23 -    case Bytecodes::_fast_iputfield: __ movl(lo_value, rax);                         break;
    7.24 -    case Bytecodes::_fast_lputfield:
    7.25 -      NOT_LP64(__ movptr(hi_value, rdx));
    7.26 -      __ movptr(lo_value, rax);
    7.27 -      break;
    7.28 -
    7.29 -    // need to call fld_s() after fstp_s() to restore the value for below
    7.30 -    case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value);        break;
    7.31 -
    7.32 -    // need to call fld_d() after fstp_d() to restore the value for below
    7.33 -    case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value);        break;
    7.34 -
    7.35 -    // since rcx is not an object we don't call store_check() here
    7.36 -    case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax);                       break;
    7.37 -
    7.38 -    default:  ShouldNotReachHere();
    7.39 -    }
    7.40 -    __ pop_ptr(rbx);  // restore copy of object pointer
    7.41 -
    7.42 -    // Save rax, and sometimes rdx because call_VM() will clobber them,
    7.43 -    // then use them for JVM/DI purposes
    7.44 -    __ push(rax);
    7.45 -    if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
    7.46 -    // access constant pool cache entry
    7.47 -    __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
    7.48 -    __ verify_oop(rbx);
    7.49 -    // rbx,: object pointer copied above
    7.50 -    // rax,: cache entry pointer
    7.51 -    // rcx: jvalue object on the stack
    7.52 -    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
    7.53 -    if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);  // restore high value
    7.54 -    __ pop(rax);     // restore lower value
    7.55 -    __ addptr(rsp, sizeof(jvalue));  // release jvalue object space
    7.56 -    __ bind(L2);
    7.57 +     __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
    7.58 +     __ testl(rcx,rcx);
    7.59 +     __ jcc(Assembler::zero, L2);
    7.60 +     __ pop_ptr(rbx);               // copy the object pointer from tos
    7.61 +     __ verify_oop(rbx);
    7.62 +     __ push_ptr(rbx);              // put the object pointer back on tos
    7.63 +
    7.64 +     // Save tos values before call_VM() clobbers them. Since we have
    7.65 +     // to do it for every data type, we use the saved values as the
    7.66 +     // jvalue object.
    7.67 +     switch (bytecode()) {          // load values into the jvalue object
    7.68 +     case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
    7.69 +     case Bytecodes::_fast_bputfield: // fall through
    7.70 +     case Bytecodes::_fast_sputfield: // fall through
    7.71 +     case Bytecodes::_fast_cputfield: // fall through
    7.72 +     case Bytecodes::_fast_iputfield: __ push_i(rax); break;
    7.73 +     case Bytecodes::_fast_dputfield: __ push_d(); break;
    7.74 +     case Bytecodes::_fast_fputfield: __ push_f(); break;
    7.75 +     case Bytecodes::_fast_lputfield: __ push_l(rax); break;
    7.76 +
    7.77 +     default:
    7.78 +       ShouldNotReachHere();
    7.79 +     }
    7.80 +     __ mov(rcx, rsp);              // points to jvalue on the stack
    7.81 +     // access constant pool cache entry
    7.82 +     __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
    7.83 +     __ verify_oop(rbx);
    7.84 +     // rbx,: object pointer copied above
    7.85 +     // rax,: cache entry pointer
    7.86 +     // rcx: jvalue object on the stack
    7.87 +     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
    7.88 +
    7.89 +     switch (bytecode()) {             // restore tos values
    7.90 +     case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
    7.91 +     case Bytecodes::_fast_bputfield: // fall through
    7.92 +     case Bytecodes::_fast_sputfield: // fall through
    7.93 +     case Bytecodes::_fast_cputfield: // fall through
    7.94 +     case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
    7.95 +     case Bytecodes::_fast_dputfield: __ pop_d(); break;
    7.96 +     case Bytecodes::_fast_fputfield: __ pop_f(); break;
    7.97 +     case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
    7.98 +     }
    7.99 +     __ bind(L2);
   7.100    }
   7.101  }
   7.102  
     8.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Apr 19 12:18:46 2012 -0700
     8.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Apr 20 16:23:48 2012 -0700
     8.3 @@ -2685,26 +2685,23 @@
     8.4      __ pop_ptr(rbx);                  // copy the object pointer from tos
     8.5      __ verify_oop(rbx);
     8.6      __ push_ptr(rbx);                 // put the object pointer back on tos
     8.7 -    __ subptr(rsp, sizeof(jvalue));  // add space for a jvalue object
     8.8 -    __ mov(c_rarg3, rsp);
     8.9 -    const Address field(c_rarg3, 0);
    8.10 -
    8.11 +    // Save tos values before call_VM() clobbers them. Since we have
    8.12 +    // to do it for every data type, we use the saved values as the
    8.13 +    // jvalue object.
    8.14      switch (bytecode()) {          // load values into the jvalue object
    8.15 -    case Bytecodes::_fast_aputfield: __ movq(field, rax); break;
    8.16 -    case Bytecodes::_fast_lputfield: __ movq(field, rax); break;
    8.17 -    case Bytecodes::_fast_iputfield: __ movl(field, rax); break;
    8.18 -    case Bytecodes::_fast_bputfield: __ movb(field, rax); break;
    8.19 +    case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
    8.20 +    case Bytecodes::_fast_bputfield: // fall through
    8.21      case Bytecodes::_fast_sputfield: // fall through
    8.22 -    case Bytecodes::_fast_cputfield: __ movw(field, rax); break;
    8.23 -    case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break;
    8.24 -    case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break;
    8.25 +    case Bytecodes::_fast_cputfield: // fall through
    8.26 +    case Bytecodes::_fast_iputfield: __ push_i(rax); break;
    8.27 +    case Bytecodes::_fast_dputfield: __ push_d(); break;
    8.28 +    case Bytecodes::_fast_fputfield: __ push_f(); break;
    8.29 +    case Bytecodes::_fast_lputfield: __ push_l(rax); break;
    8.30 +
    8.31      default:
    8.32        ShouldNotReachHere();
    8.33      }
    8.34 -
    8.35 -    // Save rax because call_VM() will clobber it, then use it for
    8.36 -    // JVMTI purposes
    8.37 -    __ push(rax);
    8.38 +    __ mov(c_rarg3, rsp);             // points to jvalue on the stack
    8.39      // access constant pool cache entry
    8.40      __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
    8.41      __ verify_oop(rbx);
    8.42 @@ -2715,8 +2712,17 @@
    8.43                 CAST_FROM_FN_PTR(address,
    8.44                                  InterpreterRuntime::post_field_modification),
    8.45                 rbx, c_rarg2, c_rarg3);
    8.46 -    __ pop(rax);     // restore lower value
    8.47 -    __ addptr(rsp, sizeof(jvalue));  // release jvalue object space
    8.48 +
    8.49 +    switch (bytecode()) {             // restore tos values
    8.50 +    case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
    8.51 +    case Bytecodes::_fast_bputfield: // fall through
    8.52 +    case Bytecodes::_fast_sputfield: // fall through
    8.53 +    case Bytecodes::_fast_cputfield: // fall through
    8.54 +    case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
    8.55 +    case Bytecodes::_fast_dputfield: __ pop_d(); break;
    8.56 +    case Bytecodes::_fast_fputfield: __ pop_f(); break;
    8.57 +    case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
    8.58 +    }
    8.59      __ bind(L2);
    8.60    }
    8.61  }
     9.1 --- a/src/os/bsd/vm/osThread_bsd.hpp	Thu Apr 19 12:18:46 2012 -0700
     9.2 +++ b/src/os/bsd/vm/osThread_bsd.hpp	Fri Apr 20 16:23:48 2012 -0700
     9.3 @@ -72,15 +72,18 @@
     9.4  
     9.5  #ifdef _ALLBSD_SOURCE
     9.6  #ifdef __APPLE__
     9.7 +  static size_t thread_id_size()         { return sizeof(thread_t); }
     9.8    thread_t thread_id() const {
     9.9      return _thread_id;
    9.10    }
    9.11  #else
    9.12 +  static size_t thread_id_size()         { return sizeof(pthread_t); }
    9.13    pthread_t thread_id() const {
    9.14      return _thread_id;
    9.15    }
    9.16  #endif
    9.17  #else
    9.18 +  static size_t thread_id_size()         { return sizeof(pid_t); }
    9.19    pid_t thread_id() const {
    9.20      return _thread_id;
    9.21    }
    10.1 --- a/src/os/linux/vm/osThread_linux.hpp	Thu Apr 19 12:18:46 2012 -0700
    10.2 +++ b/src/os/linux/vm/osThread_linux.hpp	Fri Apr 20 16:23:48 2012 -0700
    10.3 @@ -1,5 +1,5 @@
    10.4  /*
    10.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    10.6 + * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    10.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8   *
    10.9   * This code is free software; you can redistribute it and/or modify it
   10.10 @@ -56,6 +56,8 @@
   10.11    sigset_t  caller_sigmask() const       { return _caller_sigmask; }
   10.12    void    set_caller_sigmask(sigset_t sigmask)  { _caller_sigmask = sigmask; }
   10.13  
   10.14 +  static size_t thread_id_size()         { return sizeof(pid_t); }
   10.15 +
   10.16    pid_t thread_id() const {
   10.17      return _thread_id;
   10.18    }
    11.1 --- a/src/os/solaris/vm/osThread_solaris.hpp	Thu Apr 19 12:18:46 2012 -0700
    11.2 +++ b/src/os/solaris/vm/osThread_solaris.hpp	Fri Apr 20 16:23:48 2012 -0700
    11.3 @@ -36,6 +36,7 @@
    11.4    bool     _vm_created_thread; // true if the VM created this thread,
    11.5                                 // false if primary thread or attached thread
    11.6   public:
    11.7 +  static size_t thread_id_size()   { return sizeof(thread_t); }
    11.8    thread_t thread_id() const       { return _thread_id; }
    11.9    uint     lwp_id() const          { return _lwp_id; }
   11.10    int      native_priority() const { return _native_priority; }
    12.1 --- a/src/os/windows/vm/osThread_windows.hpp	Thu Apr 19 12:18:46 2012 -0700
    12.2 +++ b/src/os/windows/vm/osThread_windows.hpp	Fri Apr 20 16:23:48 2012 -0700
    12.3 @@ -1,5 +1,5 @@
    12.4  /*
    12.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    12.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    12.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.8   *
    12.9   * This code is free software; you can redistribute it and/or modify it
   12.10 @@ -42,6 +42,8 @@
   12.11    HANDLE interrupt_event() const                   { return _interrupt_event; }
   12.12    void set_interrupt_event(HANDLE interrupt_event) { _interrupt_event = interrupt_event; }
   12.13  
   12.14 +
   12.15 +  static size_t thread_id_size()                   { return sizeof(unsigned long); }
   12.16    unsigned long thread_id() const                  { return _thread_id; }
   12.17  #ifndef PRODUCT
   12.18    // Used for debugging, return a unique integer for each thread.
    13.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Apr 19 12:18:46 2012 -0700
    13.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Apr 20 16:23:48 2012 -0700
    13.3 @@ -1694,7 +1694,9 @@
    13.4        // they are roughly equivalent to Object.
    13.5        ciInstanceKlass* singleton = NULL;
    13.6        if (target->holder()->nof_implementors() == 1) {
    13.7 -        singleton = target->holder()->implementor(0);
    13.8 +        singleton = target->holder()->implementor();
    13.9 +        assert(singleton != NULL && singleton != target->holder(),
   13.10 +               "just checking");
   13.11  
   13.12          assert(holder->is_interface(), "invokeinterface to non interface?");
   13.13          ciInstanceKlass* decl_interface = (ciInstanceKlass*)holder;
   13.14 @@ -3130,10 +3132,23 @@
   13.15    bool cantrap = true;
   13.16    vmIntrinsics::ID id = callee->intrinsic_id();
   13.17    switch (id) {
   13.18 -    case vmIntrinsics::_arraycopy     :
   13.19 +    case vmIntrinsics::_arraycopy:
   13.20        if (!InlineArrayCopy) return false;
   13.21        break;
   13.22  
   13.23 +#ifdef TRACE_HAVE_INTRINSICS
   13.24 +    case vmIntrinsics::_classID:
   13.25 +    case vmIntrinsics::_threadID:
   13.26 +      preserves_state = true;
   13.27 +      cantrap = true;
   13.28 +      break;
   13.29 +
   13.30 +    case vmIntrinsics::_counterTime:
   13.31 +      preserves_state = true;
   13.32 +      cantrap = false;
   13.33 +      break;
   13.34 +#endif
   13.35 +
   13.36      case vmIntrinsics::_currentTimeMillis:
   13.37      case vmIntrinsics::_nanoTime:
   13.38        preserves_state = true;
    14.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Apr 19 12:18:46 2012 -0700
    14.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Apr 20 16:23:48 2012 -0700
    14.3 @@ -1,5 +1,5 @@
    14.4  /*
    14.5 - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
    14.6 + * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
    14.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.8   *
    14.9   * This code is free software; you can redistribute it and/or modify it
   14.10 @@ -2879,6 +2879,50 @@
   14.11    __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
   14.12  }
   14.13  
   14.14 +void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
   14.15 +    assert(x->number_of_arguments() == expected_arguments, "wrong type");
   14.16 +    LIR_Opr reg = result_register_for(x->type());
   14.17 +    __ call_runtime_leaf(routine, getThreadTemp(),
   14.18 +                         reg, new LIR_OprList());
   14.19 +    LIR_Opr result = rlock_result(x);
   14.20 +    __ move(reg, result);
   14.21 +}
   14.22 +
   14.23 +#ifdef TRACE_HAVE_INTRINSICS
   14.24 +void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
   14.25 +    LIR_Opr thread = getThreadPointer();
   14.26 +    LIR_Opr osthread = new_pointer_register();
   14.27 +    __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
   14.28 +    size_t thread_id_size = OSThread::thread_id_size();
   14.29 +    if (thread_id_size == (size_t) BytesPerLong) {
   14.30 +      LIR_Opr id = new_register(T_LONG);
   14.31 +      __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
   14.32 +      __ convert(Bytecodes::_l2i, id, rlock_result(x));
   14.33 +    } else if (thread_id_size == (size_t) BytesPerInt) {
   14.34 +      __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
   14.35 +    } else {
   14.36 +      ShouldNotReachHere();
   14.37 +    }
   14.38 +}
   14.39 +
   14.40 +void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
   14.41 +    CodeEmitInfo* info = state_for(x);
   14.42 +    CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
   14.43 +    assert(info != NULL, "must have info");
   14.44 +    LIRItem arg(x->argument_at(1), this);
   14.45 +    arg.load_item();
   14.46 +    LIR_Opr klass = new_register(T_OBJECT);
   14.47 +    __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_OBJECT), klass, info);
   14.48 +    LIR_Opr id = new_register(T_LONG);
   14.49 +    ByteSize offset = TRACE_ID_OFFSET;
   14.50 +    LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
   14.51 +    __ move(trace_id_addr, id);
   14.52 +    __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
   14.53 +    __ store(id, trace_id_addr);
   14.54 +    __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
   14.55 +    __ move(id, rlock_result(x));
   14.56 +}
   14.57 +#endif
   14.58  
   14.59  void LIRGenerator::do_Intrinsic(Intrinsic* x) {
   14.60    switch (x->id()) {
   14.61 @@ -2890,25 +2934,21 @@
   14.62      break;
   14.63    }
   14.64  
   14.65 -  case vmIntrinsics::_currentTimeMillis: {
   14.66 -    assert(x->number_of_arguments() == 0, "wrong type");
   14.67 -    LIR_Opr reg = result_register_for(x->type());
   14.68 -    __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(),
   14.69 -                         reg, new LIR_OprList());
   14.70 -    LIR_Opr result = rlock_result(x);
   14.71 -    __ move(reg, result);
   14.72 +#ifdef TRACE_HAVE_INTRINSICS
   14.73 +  case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
   14.74 +  case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
   14.75 +  case vmIntrinsics::_counterTime:
   14.76 +    do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
   14.77      break;
   14.78 -  }
   14.79 -
   14.80 -  case vmIntrinsics::_nanoTime: {
   14.81 -    assert(x->number_of_arguments() == 0, "wrong type");
   14.82 -    LIR_Opr reg = result_register_for(x->type());
   14.83 -    __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(),
   14.84 -                         reg, new LIR_OprList());
   14.85 -    LIR_Opr result = rlock_result(x);
   14.86 -    __ move(reg, result);
   14.87 +#endif
   14.88 +
   14.89 +  case vmIntrinsics::_currentTimeMillis:
   14.90 +    do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
   14.91      break;
   14.92 -  }
   14.93 +
   14.94 +  case vmIntrinsics::_nanoTime:
   14.95 +    do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
   14.96 +    break;
   14.97  
   14.98    case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
   14.99    case vmIntrinsics::_getClass:       do_getClass(x);      break;
    15.1 --- a/src/share/vm/c1/c1_LIRGenerator.hpp	Thu Apr 19 12:18:46 2012 -0700
    15.2 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Fri Apr 20 16:23:48 2012 -0700
    15.3 @@ -1,5 +1,5 @@
    15.4  /*
    15.5 - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
    15.6 + * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
    15.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    15.8   *
    15.9   * This code is free software; you can redistribute it and/or modify it
   15.10 @@ -426,6 +426,12 @@
   15.11    SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
   15.12    void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
   15.13  
   15.14 +  void do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x);
   15.15 +#ifdef TRACE_HAVE_INTRINSICS
   15.16 +  void do_ThreadIDIntrinsic(Intrinsic* x);
   15.17 +  void do_ClassIDIntrinsic(Intrinsic* x);
   15.18 +#endif
   15.19 +
   15.20   public:
   15.21    Compilation*  compilation() const              { return _compilation; }
   15.22    FrameMap*     frame_map() const                { return _compilation->frame_map(); }
    16.1 --- a/src/share/vm/c1/c1_Runtime1.cpp	Thu Apr 19 12:18:46 2012 -0700
    16.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp	Fri Apr 20 16:23:48 2012 -0700
    16.3 @@ -295,6 +295,9 @@
    16.4    FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
    16.5    FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
    16.6    FUNCTION_CASE(entry, trace_block_entry);
    16.7 +#ifdef TRACE_HAVE_INTRINSICS
    16.8 +  FUNCTION_CASE(entry, TRACE_TIME_METHOD);
    16.9 +#endif
   16.10  
   16.11  #undef FUNCTION_CASE
   16.12  
    17.1 --- a/src/share/vm/ci/ciInstanceKlass.cpp	Thu Apr 19 12:18:46 2012 -0700
    17.2 +++ b/src/share/vm/ci/ciInstanceKlass.cpp	Fri Apr 20 16:23:48 2012 -0700
    17.3 @@ -1,5 +1,5 @@
    17.4  /*
    17.5 - * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
    17.6 + * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    17.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    17.8   *
    17.9   * This code is free software; you can redistribute it and/or modify it
   17.10 @@ -59,10 +59,7 @@
   17.11    _has_nonstatic_fields = ik->has_nonstatic_fields();
   17.12    _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
   17.13  
   17.14 -  _nof_implementors = ik->nof_implementors();
   17.15 -  for (int i = 0; i < implementors_limit; i++) {
   17.16 -    _implementors[i] = NULL;  // we will fill these lazily
   17.17 -  }
   17.18 +  _implementor = NULL; // we will fill these lazily
   17.19  
   17.20    Thread *thread = Thread::current();
   17.21    if (ciObjectFactory::is_initialized()) {
   17.22 @@ -102,7 +99,6 @@
   17.23    _nonstatic_field_size = -1;
   17.24    _has_nonstatic_fields = false;
   17.25    _nonstatic_fields = NULL;
   17.26 -  _nof_implementors = -1;
   17.27    _loader = loader;
   17.28    _protection_domain = protection_domain;
   17.29    _is_shared = false;
   17.30 @@ -133,17 +129,6 @@
   17.31  }
   17.32  
   17.33  // ------------------------------------------------------------------
   17.34 -// ciInstanceKlass::compute_shared_nof_implementors
   17.35 -int ciInstanceKlass::compute_shared_nof_implementors() {
   17.36 -  // We requery this property, since it is a very old ciObject.
   17.37 -  GUARDED_VM_ENTRY(
   17.38 -    instanceKlass* ik = get_instanceKlass();
   17.39 -    _nof_implementors = ik->nof_implementors();
   17.40 -    return _nof_implementors;
   17.41 -  )
   17.42 -}
   17.43 -
   17.44 -// ------------------------------------------------------------------
   17.45  // ciInstanceKlass::loader
   17.46  oop ciInstanceKlass::loader() {
   17.47    ASSERT_IN_VM;
   17.48 @@ -540,7 +525,7 @@
   17.49    if (is_shared()) {
   17.50      return is_final();  // approximately correct
   17.51    } else {
   17.52 -    return !_has_subklass && (_nof_implementors == 0);
   17.53 +    return !_has_subklass && (nof_implementors() == 0);
   17.54    }
   17.55  }
   17.56  
   17.57 @@ -548,35 +533,31 @@
   17.58  // ciInstanceKlass::implementor
   17.59  //
   17.60  // Report an implementor of this interface.
   17.61 -// Returns NULL if exact information is not available.
   17.62  // Note that there are various races here, since my copy
   17.63  // of _nof_implementors might be out of date with respect
   17.64  // to results returned by instanceKlass::implementor.
   17.65  // This is OK, since any dependencies we decide to assert
   17.66  // will be checked later under the Compile_lock.
   17.67 -ciInstanceKlass* ciInstanceKlass::implementor(int n) {
   17.68 -  if (n >= implementors_limit) {
   17.69 -    return NULL;
   17.70 -  }
   17.71 -  ciInstanceKlass* impl = _implementors[n];
   17.72 +ciInstanceKlass* ciInstanceKlass::implementor() {
   17.73 +  ciInstanceKlass* impl = _implementor;
   17.74    if (impl == NULL) {
   17.75 -    if (_nof_implementors > implementors_limit) {
   17.76 -      return NULL;
   17.77 -    }
   17.78      // Go into the VM to fetch the implementor.
   17.79      {
   17.80        VM_ENTRY_MARK;
   17.81 -      klassOop k = get_instanceKlass()->implementor(n);
   17.82 +      klassOop k = get_instanceKlass()->implementor();
   17.83        if (k != NULL) {
   17.84 -        impl = CURRENT_THREAD_ENV->get_object(k)->as_instance_klass();
   17.85 +        if (k == get_instanceKlass()->as_klassOop()) {
   17.86 +          // More than one implementors. Use 'this' in this case.
   17.87 +          impl = this;
   17.88 +        } else {
   17.89 +          impl = CURRENT_THREAD_ENV->get_object(k)->as_instance_klass();
   17.90 +        }
   17.91        }
   17.92      }
   17.93      // Memoize this result.
   17.94      if (!is_shared()) {
   17.95 -      _implementors[n] = (impl == NULL)? this: impl;
   17.96 +      _implementor = impl;
   17.97      }
   17.98 -  } else if (impl == this) {
   17.99 -    impl = NULL;  // memoized null result from a VM query
  17.100    }
  17.101    return impl;
  17.102  }
    18.1 --- a/src/share/vm/ci/ciInstanceKlass.hpp	Thu Apr 19 12:18:46 2012 -0700
    18.2 +++ b/src/share/vm/ci/ciInstanceKlass.hpp	Fri Apr 20 16:23:48 2012 -0700
    18.3 @@ -1,5 +1,5 @@
    18.4  /*
    18.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    18.6 + * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    18.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    18.8   *
    18.9   * This code is free software; you can redistribute it and/or modify it
   18.10 @@ -65,9 +65,11 @@
   18.11    ciConstantPoolCache*   _field_cache;  // cached map index->field
   18.12    GrowableArray<ciField*>* _nonstatic_fields;
   18.13  
   18.14 -  enum { implementors_limit = instanceKlass::implementors_limit };
   18.15 -  ciInstanceKlass*       _implementors[implementors_limit];
   18.16 -  jint                   _nof_implementors;
   18.17 +  // The possible values of the _implementor fall into following three cases:
   18.18 +  //   NULL: no implementor.
   18.19 +  //   A ciInstanceKlass that's not itself: one implementor.
   18.20 +  //   Itsef: more than one implementors.
   18.21 +  ciInstanceKlass*       _implementor;
   18.22  
   18.23    GrowableArray<ciField*>* _non_static_fields;
   18.24  
   18.25 @@ -97,7 +99,6 @@
   18.26  
   18.27    void compute_shared_init_state();
   18.28    bool compute_shared_has_subklass();
   18.29 -  int  compute_shared_nof_implementors();
   18.30    int  compute_nonstatic_fields();
   18.31    GrowableArray<ciField*>* compute_nonstatic_fields_impl(GrowableArray<ciField*>* super_fields);
   18.32  
   18.33 @@ -158,10 +159,17 @@
   18.34      assert(is_loaded(), "must be loaded");
   18.35      return _nonstatic_oop_map_size; }
   18.36    ciInstanceKlass*       super();
   18.37 -  jint                   nof_implementors()  {
   18.38 +  jint                   nof_implementors() {
   18.39 +    ciInstanceKlass* impl;
   18.40      assert(is_loaded(), "must be loaded");
   18.41 -    if (_is_shared)  return compute_shared_nof_implementors();
   18.42 -    return _nof_implementors;
   18.43 +    impl = implementor();
   18.44 +    if (impl == NULL) {
   18.45 +      return 0;
   18.46 +    } else if (impl != this) {
   18.47 +      return 1;
   18.48 +    } else {
   18.49 +      return 2;
   18.50 +    }
   18.51    }
   18.52  
   18.53    ciInstanceKlass* get_canonical_holder(int offset);
   18.54 @@ -207,7 +215,7 @@
   18.55    // but consider adding to vmSymbols.hpp instead.
   18.56  
   18.57    bool is_leaf_type();
   18.58 -  ciInstanceKlass* implementor(int n);
   18.59 +  ciInstanceKlass* implementor();
   18.60  
   18.61    // Is the defining class loader of this class the default loader?
   18.62    bool uses_default_loader();
    19.1 --- a/src/share/vm/classfile/classFileParser.cpp	Thu Apr 19 12:18:46 2012 -0700
    19.2 +++ b/src/share/vm/classfile/classFileParser.cpp	Fri Apr 20 16:23:48 2012 -0700
    19.3 @@ -3354,6 +3354,7 @@
    19.4      klassOop ik = oopFactory::new_instanceKlass(name, vtable_size, itable_size,
    19.5                                                  static_field_size,
    19.6                                                  total_oop_map_count,
    19.7 +                                                access_flags,
    19.8                                                  rt, CHECK_(nullHandle));
    19.9      instanceKlassHandle this_klass (THREAD, ik);
   19.10  
   19.11 @@ -3362,7 +3363,6 @@
   19.12             "sanity");
   19.13  
   19.14      // Fill in information already parsed
   19.15 -    this_klass->set_access_flags(access_flags);
   19.16      this_klass->set_should_verify_class(verify);
   19.17      jint lh = Klass::instance_layout_helper(instance_size, false);
   19.18      this_klass->set_layout_helper(lh);
    20.1 --- a/src/share/vm/classfile/vmSymbols.hpp	Thu Apr 19 12:18:46 2012 -0700
    20.2 +++ b/src/share/vm/classfile/vmSymbols.hpp	Fri Apr 20 16:23:48 2012 -0700
    20.3 @@ -27,6 +27,7 @@
    20.4  
    20.5  #include "oops/symbol.hpp"
    20.6  #include "memory/iterator.hpp"
    20.7 +#include "trace/traceMacros.hpp"
    20.8  
    20.9  // The class vmSymbols is a name space for fast lookup of
   20.10  // symbols commonly used in the VM.
   20.11 @@ -424,6 +425,7 @@
   20.12    template(throwable_throwable_signature,             "(Ljava/lang/Throwable;)Ljava/lang/Throwable;")             \
   20.13    template(class_void_signature,                      "(Ljava/lang/Class;)V")                     \
   20.14    template(class_int_signature,                       "(Ljava/lang/Class;)I")                     \
   20.15 +  template(class_long_signature,                      "(Ljava/lang/Class;)J")                     \
   20.16    template(class_boolean_signature,                   "(Ljava/lang/Class;)Z")                     \
   20.17    template(throwable_string_void_signature,           "(Ljava/lang/Throwable;Ljava/lang/String;)V")               \
   20.18    template(string_array_void_signature,               "([Ljava/lang/String;)V")                                   \
   20.19 @@ -539,10 +541,12 @@
   20.20    template(serializePropertiesToByteArray_signature,   "()[B")                                                    \
   20.21    template(serializeAgentPropertiesToByteArray_name,   "serializeAgentPropertiesToByteArray")                     \
   20.22    template(classRedefinedCount_name,                   "classRedefinedCount")                                     \
   20.23 +                                                                                                                  \
   20.24 +  /* trace signatures */                                                                                          \
   20.25 +  TRACE_TEMPLATES(template)                                                                                       \
   20.26 +                                                                                                                  \
   20.27    /*end*/
   20.28  
   20.29 -
   20.30 -
   20.31  // Here are all the intrinsics known to the runtime and the CI.
   20.32  // Each intrinsic consists of a public enum name (like _hashCode),
   20.33  // followed by a specification of its klass, name, and signature:
   20.34 @@ -648,6 +652,8 @@
   20.35    do_intrinsic(_nanoTime,                 java_lang_System,       nanoTime_name,          void_long_signature,   F_S)   \
   20.36     do_name(     nanoTime_name,                                   "nanoTime")                                            \
   20.37                                                                                                                          \
   20.38 +  TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)                                             \
   20.39 +                                                                                                                        \
   20.40    do_intrinsic(_arraycopy,                java_lang_System,       arraycopy_name, arraycopy_signature,           F_S)   \
   20.41     do_name(     arraycopy_name,                                  "arraycopy")                                           \
   20.42     do_signature(arraycopy_signature,                             "(Ljava/lang/Object;ILjava/lang/Object;II)V")          \
    21.1 --- a/src/share/vm/code/dependencies.cpp	Thu Apr 19 12:18:46 2012 -0700
    21.2 +++ b/src/share/vm/code/dependencies.cpp	Fri Apr 20 16:23:48 2012 -0700
    21.3 @@ -1,5 +1,5 @@
    21.4  /*
    21.5 - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
    21.6 + * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
    21.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    21.8   *
    21.9   * This code is free software; you can redistribute it and/or modify it
   21.10 @@ -1033,21 +1033,25 @@
   21.11      // (Old CHA had the same limitation.)
   21.12      return context_type;
   21.13    }
   21.14 -  for (int i = 0; i < nof_impls; i++) {
   21.15 -    klassOop impl = instanceKlass::cast(context_type)->implementor(i);
   21.16 -    if (impl == NULL) {
   21.17 -      // implementors array overflowed => no exact info.
   21.18 +  if (nof_impls > 0) {
   21.19 +    klassOop impl = instanceKlass::cast(context_type)->implementor();
   21.20 +    assert(impl != NULL, "just checking");
   21.21 +    // If impl is the same as the context_type, then more than one
   21.22 +    // implementor has seen. No exact info in this case.
   21.23 +    if (impl == context_type) {
   21.24        return context_type;  // report an inexact witness to this sad affair
   21.25      }
   21.26      if (do_counts)
   21.27        { NOT_PRODUCT(deps_find_witness_steps++); }
   21.28      if (is_participant(impl)) {
   21.29 -      if (participants_hide_witnesses)  continue;
   21.30 -      // else fall through to process this guy's subclasses
   21.31 +      if (!participants_hide_witnesses) {
   21.32 +        ADD_SUBCLASS_CHAIN(impl);
   21.33 +      }
   21.34      } else if (is_witness(impl) && !ignore_witness(impl)) {
   21.35        return impl;
   21.36 +    } else {
   21.37 +      ADD_SUBCLASS_CHAIN(impl);
   21.38      }
   21.39 -    ADD_SUBCLASS_CHAIN(impl);
   21.40    }
   21.41  
   21.42    // Recursively process each non-trivial sibling chain.
   21.43 @@ -1174,8 +1178,9 @@
   21.44    } else if (ctx->nof_implementors() != 0) {
   21.45      // if it is an interface, it must be unimplemented
   21.46      // (if it is not an interface, nof_implementors is always zero)
   21.47 -    klassOop impl = ctx->implementor(0);
   21.48 -    return (impl != NULL)? impl: ctxk;
   21.49 +    klassOop impl = ctx->implementor();
   21.50 +    assert(impl != NULL, "must be set");
   21.51 +    return impl;
   21.52    } else {
   21.53      return NULL;
   21.54    }
    22.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Apr 19 12:18:46 2012 -0700
    22.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Apr 20 16:23:48 2012 -0700
    22.3 @@ -1,5 +1,5 @@
    22.4  /*
    22.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    22.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    22.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    22.8   *
    22.9   * This code is free software; you can redistribute it and/or modify it
   22.10 @@ -2444,7 +2444,7 @@
   22.11    virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
   22.12  };
   22.13  
   22.14 -void CompactibleFreeListSpace::verify(bool ignored) const {
   22.15 +void CompactibleFreeListSpace::verify() const {
   22.16    assert_lock_strong(&_freelistLock);
   22.17    verify_objects_initialized();
   22.18    MemRegion span = _collector->_span;
    23.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Apr 19 12:18:46 2012 -0700
    23.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Apr 20 16:23:48 2012 -0700
    23.3 @@ -1,5 +1,5 @@
    23.4  /*
    23.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    23.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    23.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    23.8   *
    23.9   * This code is free software; you can redistribute it and/or modify it
   23.10 @@ -492,7 +492,7 @@
   23.11    void print()                            const;
   23.12    void print_on(outputStream* st)         const;
   23.13    void prepare_for_verify();
   23.14 -  void verify(bool allow_dirty)           const;
   23.15 +  void verify()                           const;
   23.16    void verifyFreeLists()                  const PRODUCT_RETURN;
   23.17    void verifyIndexedFreeLists()           const;
   23.18    void verifyIndexedFreeList(size_t size) const;
    24.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Apr 19 12:18:46 2012 -0700
    24.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Apr 20 16:23:48 2012 -0700
    24.3 @@ -3109,21 +3109,21 @@
    24.4  }
    24.5  
    24.6  void
    24.7 -ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
    24.8 +ConcurrentMarkSweepGeneration::verify() {
    24.9    // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
   24.10    // are not called when the heap is verified during universe initialization and
   24.11    // at vm shutdown.
   24.12    if (freelistLock()->owned_by_self()) {
   24.13 -    cmsSpace()->verify(false /* ignored */);
   24.14 +    cmsSpace()->verify();
   24.15    } else {
   24.16      MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
   24.17 -    cmsSpace()->verify(false /* ignored */);
   24.18 -  }
   24.19 -}
   24.20 -
   24.21 -void CMSCollector::verify(bool allow_dirty /* ignored */) {
   24.22 -  _cmsGen->verify(allow_dirty);
   24.23 -  _permGen->verify(allow_dirty);
   24.24 +    cmsSpace()->verify();
   24.25 +  }
   24.26 +}
   24.27 +
   24.28 +void CMSCollector::verify() {
   24.29 +  _cmsGen->verify();
   24.30 +  _permGen->verify();
   24.31  }
   24.32  
   24.33  #ifndef PRODUCT
    25.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Apr 19 12:18:46 2012 -0700
    25.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Apr 20 16:23:48 2012 -0700
    25.3 @@ -1,5 +1,5 @@
    25.4  /*
    25.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    25.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    25.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    25.8   *
    25.9   * This code is free software; you can redistribute it and/or modify it
   25.10 @@ -988,7 +988,7 @@
   25.11    CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
   25.12  
   25.13    // debugging
   25.14 -  void verify(bool);
   25.15 +  void verify();
   25.16    bool verify_after_remark();
   25.17    void verify_ok_to_terminate() const PRODUCT_RETURN;
   25.18    void verify_work_stacks_empty() const PRODUCT_RETURN;
   25.19 @@ -1279,7 +1279,7 @@
   25.20  
   25.21    // Debugging
   25.22    void prepare_for_verify();
   25.23 -  void verify(bool allow_dirty);
   25.24 +  void verify();
   25.25    void print_statistics()               PRODUCT_RETURN;
   25.26  
   25.27    // Performance Counters support
    26.1 --- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Thu Apr 19 12:18:46 2012 -0700
    26.2 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Fri Apr 20 16:23:48 2012 -0700
    26.3 @@ -29,102 +29,6 @@
    26.4  #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    26.5  #include "memory/space.inline.hpp"
    26.6  
    26.7 -CSetChooserCache::CSetChooserCache() {
    26.8 -  for (int i = 0; i < CacheLength; ++i)
    26.9 -    _cache[i] = NULL;
   26.10 -  clear();
   26.11 -}
   26.12 -
   26.13 -void CSetChooserCache::clear() {
   26.14 -  _occupancy = 0;
   26.15 -  _first = 0;
   26.16 -  for (int i = 0; i < CacheLength; ++i) {
   26.17 -    HeapRegion *hr = _cache[i];
   26.18 -    if (hr != NULL)
   26.19 -      hr->set_sort_index(-1);
   26.20 -    _cache[i] = NULL;
   26.21 -  }
   26.22 -}
   26.23 -
   26.24 -#ifndef PRODUCT
   26.25 -bool CSetChooserCache::verify() {
   26.26 -  guarantee(false, "CSetChooserCache::verify(): don't call this any more");
   26.27 -
   26.28 -  int index = _first;
   26.29 -  HeapRegion *prev = NULL;
   26.30 -  for (int i = 0; i < _occupancy; ++i) {
   26.31 -    guarantee(_cache[index] != NULL, "cache entry should not be empty");
   26.32 -    HeapRegion *hr = _cache[index];
   26.33 -    guarantee(!hr->is_young(), "should not be young!");
   26.34 -    if (prev != NULL) {
   26.35 -      guarantee(prev->gc_efficiency() >= hr->gc_efficiency(),
   26.36 -                "cache should be correctly ordered");
   26.37 -    }
   26.38 -    guarantee(hr->sort_index() == get_sort_index(index),
   26.39 -              "sort index should be correct");
   26.40 -    index = trim_index(index + 1);
   26.41 -    prev = hr;
   26.42 -  }
   26.43 -
   26.44 -  for (int i = 0; i < (CacheLength - _occupancy); ++i) {
   26.45 -    guarantee(_cache[index] == NULL, "cache entry should be empty");
   26.46 -    index = trim_index(index + 1);
   26.47 -  }
   26.48 -
   26.49 -  guarantee(index == _first, "we should have reached where we started from");
   26.50 -  return true;
   26.51 -}
   26.52 -#endif // PRODUCT
   26.53 -
   26.54 -void CSetChooserCache::insert(HeapRegion *hr) {
   26.55 -  guarantee(false, "CSetChooserCache::insert(): don't call this any more");
   26.56 -
   26.57 -  assert(!is_full(), "cache should not be empty");
   26.58 -  hr->calc_gc_efficiency();
   26.59 -
   26.60 -  int empty_index;
   26.61 -  if (_occupancy == 0) {
   26.62 -    empty_index = _first;
   26.63 -  } else {
   26.64 -    empty_index = trim_index(_first + _occupancy);
   26.65 -    assert(_cache[empty_index] == NULL, "last slot should be empty");
   26.66 -    int last_index = trim_index(empty_index - 1);
   26.67 -    HeapRegion *last = _cache[last_index];
   26.68 -    assert(last != NULL,"as the cache is not empty, last should not be empty");
   26.69 -    while (empty_index != _first &&
   26.70 -           last->gc_efficiency() < hr->gc_efficiency()) {
   26.71 -      _cache[empty_index] = last;
   26.72 -      last->set_sort_index(get_sort_index(empty_index));
   26.73 -      empty_index = last_index;
   26.74 -      last_index = trim_index(last_index - 1);
   26.75 -      last = _cache[last_index];
   26.76 -    }
   26.77 -  }
   26.78 -  _cache[empty_index] = hr;
   26.79 -  hr->set_sort_index(get_sort_index(empty_index));
   26.80 -
   26.81 -  ++_occupancy;
   26.82 -  assert(verify(), "cache should be consistent");
   26.83 -}
   26.84 -
   26.85 -HeapRegion *CSetChooserCache::remove_first() {
   26.86 -  guarantee(false, "CSetChooserCache::remove_first(): "
   26.87 -                   "don't call this any more");
   26.88 -
   26.89 -  if (_occupancy > 0) {
   26.90 -    assert(_cache[_first] != NULL, "cache should have at least one region");
   26.91 -    HeapRegion *ret = _cache[_first];
   26.92 -    _cache[_first] = NULL;
   26.93 -    ret->set_sort_index(-1);
   26.94 -    --_occupancy;
   26.95 -    _first = trim_index(_first + 1);
   26.96 -    assert(verify(), "cache should be consistent");
   26.97 -    return ret;
   26.98 -  } else {
   26.99 -    return NULL;
  26.100 -  }
  26.101 -}
  26.102 -
  26.103  // Even though we don't use the GC efficiency in our heuristics as
  26.104  // much as we used to, we still order according to GC efficiency. This
  26.105  // will cause regions with a lot of live objects and large RSets to
  26.106 @@ -134,7 +38,7 @@
  26.107  // the ones we'll skip are ones with both large RSets and a lot of
  26.108  // live objects, not the ones with just a lot of live objects if we
  26.109  // ordered according to the amount of reclaimable bytes per region.
  26.110 -static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
  26.111 +static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
  26.112    if (hr1 == NULL) {
  26.113      if (hr2 == NULL) {
  26.114        return 0;
  26.115 @@ -156,8 +60,8 @@
  26.116    }
  26.117  }
  26.118  
  26.119 -static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
  26.120 -  return orderRegions(*hr1p, *hr2p);
  26.121 +static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) {
  26.122 +  return order_regions(*hr1p, *hr2p);
  26.123  }
  26.124  
  26.125  CollectionSetChooser::CollectionSetChooser() :
  26.126 @@ -175,105 +79,74 @@
  26.127    //
  26.128    // Note: containing object is allocated on C heap since it is CHeapObj.
  26.129    //
  26.130 -  _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
  26.131 +  _regions((ResourceObj::set_allocation_type((address) &_regions,
  26.132                                               ResourceObj::C_HEAP),
  26.133                    100), true /* C_Heap */),
  26.134 -    _curr_index(0), _length(0),
  26.135 -    _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0),
  26.136 -    _first_par_unreserved_idx(0) {
  26.137 -  _regionLiveThresholdBytes =
  26.138 +    _curr_index(0), _length(0), _first_par_unreserved_idx(0),
  26.139 +    _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
  26.140 +  _region_live_threshold_bytes =
  26.141      HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
  26.142  }
  26.143  
  26.144  #ifndef PRODUCT
  26.145 -bool CollectionSetChooser::verify() {
  26.146 -  guarantee(_length >= 0, err_msg("_length: %d", _length));
  26.147 -  guarantee(0 <= _curr_index && _curr_index <= _length,
  26.148 -            err_msg("_curr_index: %d _length: %d", _curr_index, _length));
  26.149 -  int index = 0;
  26.150 +void CollectionSetChooser::verify() {
  26.151 +  guarantee(_length <= regions_length(),
  26.152 +         err_msg("_length: %u regions length: %u", _length, regions_length()));
  26.153 +  guarantee(_curr_index <= _length,
  26.154 +            err_msg("_curr_index: %u _length: %u", _curr_index, _length));
  26.155 +  uint index = 0;
  26.156    size_t sum_of_reclaimable_bytes = 0;
  26.157    while (index < _curr_index) {
  26.158 -    guarantee(_markedRegions.at(index) == NULL,
  26.159 +    guarantee(regions_at(index) == NULL,
  26.160                "all entries before _curr_index should be NULL");
  26.161      index += 1;
  26.162    }
  26.163    HeapRegion *prev = NULL;
  26.164    while (index < _length) {
  26.165 -    HeapRegion *curr = _markedRegions.at(index++);
  26.166 -    guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
  26.167 -    int si = curr->sort_index();
  26.168 +    HeapRegion *curr = regions_at(index++);
  26.169 +    guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
  26.170      guarantee(!curr->is_young(), "should not be young!");
  26.171      guarantee(!curr->isHumongous(), "should not be humongous!");
  26.172 -    guarantee(si > -1 && si == (index-1), "sort index invariant");
  26.173      if (prev != NULL) {
  26.174 -      guarantee(orderRegions(prev, curr) != 1,
  26.175 +      guarantee(order_regions(prev, curr) != 1,
  26.176                  err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
  26.177                          prev->gc_efficiency(), curr->gc_efficiency()));
  26.178      }
  26.179      sum_of_reclaimable_bytes += curr->reclaimable_bytes();
  26.180      prev = curr;
  26.181    }
  26.182 -  guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes,
  26.183 +  guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes,
  26.184              err_msg("reclaimable bytes inconsistent, "
  26.185                      "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
  26.186 -                    _remainingReclaimableBytes, sum_of_reclaimable_bytes));
  26.187 -  return true;
  26.188 +                    _remaining_reclaimable_bytes, sum_of_reclaimable_bytes));
  26.189  }
  26.190 -#endif
  26.191 +#endif // !PRODUCT
  26.192  
  26.193 -void CollectionSetChooser::fillCache() {
  26.194 -  guarantee(false, "fillCache: don't call this any more");
  26.195 -
  26.196 -  while (!_cache.is_full() && (_curr_index < _length)) {
  26.197 -    HeapRegion* hr = _markedRegions.at(_curr_index);
  26.198 -    assert(hr != NULL,
  26.199 -           err_msg("Unexpected NULL hr in _markedRegions at index %d",
  26.200 -                   _curr_index));
  26.201 -    _curr_index += 1;
  26.202 -    assert(!hr->is_young(), "should not be young!");
  26.203 -    assert(hr->sort_index() == _curr_index-1, "sort_index invariant");
  26.204 -    _markedRegions.at_put(hr->sort_index(), NULL);
  26.205 -    _cache.insert(hr);
  26.206 -    assert(!_cache.is_empty(), "cache should not be empty");
  26.207 -  }
  26.208 -  assert(verify(), "cache should be consistent");
  26.209 -}
  26.210 -
  26.211 -void CollectionSetChooser::sortMarkedHeapRegions() {
  26.212 +void CollectionSetChooser::sort_regions() {
  26.213    // First trim any unused portion of the top in the parallel case.
  26.214    if (_first_par_unreserved_idx > 0) {
  26.215 -    if (G1PrintParCleanupStats) {
  26.216 -      gclog_or_tty->print("     Truncating _markedRegions from %d to %d.\n",
  26.217 -                          _markedRegions.length(), _first_par_unreserved_idx);
  26.218 -    }
  26.219 -    assert(_first_par_unreserved_idx <= _markedRegions.length(),
  26.220 +    assert(_first_par_unreserved_idx <= regions_length(),
  26.221             "Or we didn't reserved enough length");
  26.222 -    _markedRegions.trunc_to(_first_par_unreserved_idx);
  26.223 +    regions_trunc_to(_first_par_unreserved_idx);
  26.224    }
  26.225 -  _markedRegions.sort(orderRegions);
  26.226 -  assert(_length <= _markedRegions.length(), "Requirement");
  26.227 -  assert(_length == 0 || _markedRegions.at(_length - 1) != NULL,
  26.228 -         "Testing _length");
  26.229 -  assert(_length == _markedRegions.length() ||
  26.230 -                        _markedRegions.at(_length) == NULL, "Testing _length");
  26.231 -  if (G1PrintParCleanupStats) {
  26.232 -    gclog_or_tty->print_cr("     Sorted %d marked regions.", _length);
  26.233 +  _regions.sort(order_regions);
  26.234 +  assert(_length <= regions_length(), "Requirement");
  26.235 +#ifdef ASSERT
  26.236 +  for (uint i = 0; i < _length; i++) {
  26.237 +    assert(regions_at(i) != NULL, "Should be true by sorting!");
  26.238    }
  26.239 -  for (int i = 0; i < _length; i++) {
  26.240 -    assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
  26.241 -    _markedRegions.at(i)->set_sort_index(i);
  26.242 -  }
  26.243 +#endif // ASSERT
  26.244    if (G1PrintRegionLivenessInfo) {
  26.245      G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
  26.246 -    for (int i = 0; i < _length; ++i) {
  26.247 -      HeapRegion* r = _markedRegions.at(i);
  26.248 +    for (uint i = 0; i < _length; ++i) {
  26.249 +      HeapRegion* r = regions_at(i);
  26.250        cl.doHeapRegion(r);
  26.251      }
  26.252    }
  26.253 -  assert(verify(), "CSet chooser verification");
  26.254 +  verify();
  26.255  }
  26.256  
  26.257 -size_t CollectionSetChooser::calcMinOldCSetLength() {
  26.258 +uint CollectionSetChooser::calc_min_old_cset_length() {
  26.259    // The min old CSet region bound is based on the maximum desired
  26.260    // number of mixed GCs after a cycle. I.e., even if some old regions
  26.261    // look expensive, we should add them to the CSet anyway to make
  26.262 @@ -291,10 +164,10 @@
  26.263    if (result * gc_num < region_num) {
  26.264      result += 1;
  26.265    }
  26.266 -  return result;
  26.267 +  return (uint) result;
  26.268  }
  26.269  
  26.270 -size_t CollectionSetChooser::calcMaxOldCSetLength() {
  26.271 +uint CollectionSetChooser::calc_max_old_cset_length() {
  26.272    // The max old CSet region bound is based on the threshold expressed
  26.273    // as a percentage of the heap size. I.e., it should bound the
  26.274    // number of old regions added to the CSet irrespective of how many
  26.275 @@ -308,23 +181,23 @@
  26.276    if (100 * result < region_num * perc) {
  26.277      result += 1;
  26.278    }
  26.279 -  return result;
  26.280 +  return (uint) result;
  26.281  }
  26.282  
  26.283 -void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
  26.284 +void CollectionSetChooser::add_region(HeapRegion* hr) {
  26.285    assert(!hr->isHumongous(),
  26.286           "Humongous regions shouldn't be added to the collection set");
  26.287    assert(!hr->is_young(), "should not be young!");
  26.288 -  _markedRegions.append(hr);
  26.289 +  _regions.append(hr);
  26.290    _length++;
  26.291 -  _remainingReclaimableBytes += hr->reclaimable_bytes();
  26.292 +  _remaining_reclaimable_bytes += hr->reclaimable_bytes();
  26.293    hr->calc_gc_efficiency();
  26.294  }
  26.295  
  26.296 -void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
  26.297 -                                                             size_t chunkSize) {
  26.298 +void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions,
  26.299 +                                                           uint chunk_size) {
  26.300    _first_par_unreserved_idx = 0;
  26.301 -  int n_threads = ParallelGCThreads;
  26.302 +  uint n_threads = (uint) ParallelGCThreads;
  26.303    if (UseDynamicNumberOfGCThreads) {
  26.304      assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
  26.305        "Should have been set earlier");
  26.306 @@ -335,57 +208,46 @@
  26.307      n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
  26.308                       1U);
  26.309    }
  26.310 -  size_t max_waste = n_threads * chunkSize;
  26.311 -  // it should be aligned with respect to chunkSize
  26.312 -  size_t aligned_n_regions =
  26.313 -                     (n_regions + (chunkSize - 1)) / chunkSize * chunkSize;
  26.314 -  assert( aligned_n_regions % chunkSize == 0, "should be aligned" );
  26.315 -  _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
  26.316 +  uint max_waste = n_threads * chunk_size;
  26.317 +  // it should be aligned with respect to chunk_size
  26.318 +  uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size;
  26.319 +  assert(aligned_n_regions % chunk_size == 0, "should be aligned");
  26.320 +  regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL);
  26.321  }
  26.322  
  26.323 -jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
  26.324 -  // Don't do this assert because this can be called at a point
  26.325 -  // where the loop up stream will not execute again but might
  26.326 -  // try to claim more chunks (loop test has not been done yet).
  26.327 -  // assert(_markedRegions.length() > _first_par_unreserved_idx,
  26.328 -  //  "Striding beyond the marked regions");
  26.329 -  jint res = Atomic::add(n_regions, &_first_par_unreserved_idx);
  26.330 -  assert(_markedRegions.length() > res + n_regions - 1,
  26.331 +uint CollectionSetChooser::claim_array_chunk(uint chunk_size) {
  26.332 +  uint res = (uint) Atomic::add((jint) chunk_size,
  26.333 +                                (volatile jint*) &_first_par_unreserved_idx);
  26.334 +  assert(regions_length() > res + chunk_size - 1,
  26.335           "Should already have been expanded");
  26.336 -  return res - n_regions;
  26.337 +  return res - chunk_size;
  26.338  }
  26.339  
  26.340 -void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
  26.341 -  assert(_markedRegions.at(index) == NULL, "precondition");
  26.342 +void CollectionSetChooser::set_region(uint index, HeapRegion* hr) {
  26.343 +  assert(regions_at(index) == NULL, "precondition");
  26.344    assert(!hr->is_young(), "should not be young!");
  26.345 -  _markedRegions.at_put(index, hr);
  26.346 +  regions_at_put(index, hr);
  26.347    hr->calc_gc_efficiency();
  26.348  }
  26.349  
  26.350 -void CollectionSetChooser::updateTotals(jint region_num,
  26.351 -                                        size_t reclaimable_bytes) {
  26.352 +void CollectionSetChooser::update_totals(uint region_num,
  26.353 +                                         size_t reclaimable_bytes) {
  26.354    // Only take the lock if we actually need to update the totals.
  26.355    if (region_num > 0) {
  26.356      assert(reclaimable_bytes > 0, "invariant");
  26.357      // We could have just used atomics instead of taking the
  26.358      // lock. However, we currently don't have an atomic add for size_t.
  26.359      MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  26.360 -    _length += (int) region_num;
  26.361 -    _remainingReclaimableBytes += reclaimable_bytes;
  26.362 +    _length += region_num;
  26.363 +    _remaining_reclaimable_bytes += reclaimable_bytes;
  26.364    } else {
  26.365      assert(reclaimable_bytes == 0, "invariant");
  26.366    }
  26.367  }
  26.368  
  26.369 -void CollectionSetChooser::clearMarkedHeapRegions() {
  26.370 -  for (int i = 0; i < _markedRegions.length(); i++) {
  26.371 -    HeapRegion* r = _markedRegions.at(i);
  26.372 -    if (r != NULL) {
  26.373 -      r->set_sort_index(-1);
  26.374 -    }
  26.375 -  }
  26.376 -  _markedRegions.clear();
  26.377 +void CollectionSetChooser::clear() {
  26.378 +  _regions.clear();
  26.379    _curr_index = 0;
  26.380    _length = 0;
  26.381 -  _remainingReclaimableBytes = 0;
  26.382 +  _remaining_reclaimable_bytes = 0;
  26.383  };
    27.1 --- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Thu Apr 19 12:18:46 2012 -0700
    27.2 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Fri Apr 20 16:23:48 2012 -0700
    27.3 @@ -28,77 +28,42 @@
    27.4  #include "gc_implementation/g1/heapRegion.hpp"
    27.5  #include "utilities/growableArray.hpp"
    27.6  
    27.7 -class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
    27.8 -private:
    27.9 -  enum {
   27.10 -    CacheLength = 16
   27.11 -  } PrivateConstants;
   27.12 -
   27.13 -  HeapRegion*  _cache[CacheLength];
   27.14 -  int          _occupancy; // number of regions in cache
   27.15 -  int          _first;     // (index of) "first" region in the cache
   27.16 -
   27.17 -  // adding CacheLength to deal with negative values
   27.18 -  inline int trim_index(int index) {
   27.19 -    return (index + CacheLength) % CacheLength;
   27.20 -  }
   27.21 -
   27.22 -  inline int get_sort_index(int index) {
   27.23 -    return -index-2;
   27.24 -  }
   27.25 -  inline int get_index(int sort_index) {
   27.26 -    return -sort_index-2;
   27.27 -  }
   27.28 -
   27.29 -public:
   27.30 -  CSetChooserCache(void);
   27.31 -
   27.32 -  inline int occupancy(void) { return _occupancy; }
   27.33 -  inline bool is_full()      { return _occupancy == CacheLength; }
   27.34 -  inline bool is_empty()     { return _occupancy == 0; }
   27.35 -
   27.36 -  void clear(void);
   27.37 -  void insert(HeapRegion *hr);
   27.38 -  HeapRegion *remove_first(void);
   27.39 -  inline HeapRegion *get_first(void) {
   27.40 -    return _cache[_first];
   27.41 -  }
   27.42 -
   27.43 -#ifndef PRODUCT
   27.44 -  bool verify (void);
   27.45 -  bool region_in_cache(HeapRegion *hr) {
   27.46 -    int sort_index = hr->sort_index();
   27.47 -    if (sort_index < -1) {
   27.48 -      int index = get_index(sort_index);
   27.49 -      guarantee(index < CacheLength, "should be within bounds");
   27.50 -      return _cache[index] == hr;
   27.51 -    } else
   27.52 -      return 0;
   27.53 -  }
   27.54 -#endif // PRODUCT
   27.55 -};
   27.56 -
   27.57  class CollectionSetChooser: public CHeapObj {
   27.58  
   27.59 -  GrowableArray<HeapRegion*> _markedRegions;
   27.60 +  GrowableArray<HeapRegion*> _regions;
   27.61 +
   27.62 +  // Unfortunately, GrowableArray uses ints for length and indexes. To
   27.63 +  // avoid excessive casting in the rest of the class the following
   27.64 +  // wrapper methods are provided that use uints.
   27.65 +
   27.66 +  uint regions_length()          { return (uint) _regions.length(); }
   27.67 +  HeapRegion* regions_at(uint i) { return _regions.at((int) i);     }
   27.68 +  void regions_at_put(uint i, HeapRegion* hr) {
   27.69 +    _regions.at_put((int) i, hr);
   27.70 +  }
   27.71 +  void regions_at_put_grow(uint i, HeapRegion* hr) {
   27.72 +    _regions.at_put_grow((int) i, hr);
   27.73 +  }
   27.74 +  void regions_trunc_to(uint i)  { _regions.trunc_to((uint) i); }
   27.75  
   27.76    // The index of the next candidate old region to be considered for
   27.77    // addition to the CSet.
   27.78 -  int _curr_index;
   27.79 +  uint _curr_index;
   27.80  
   27.81    // The number of candidate old regions added to the CSet chooser.
   27.82 -  int _length;
   27.83 +  uint _length;
   27.84  
   27.85 -  CSetChooserCache _cache;
   27.86 -  jint _first_par_unreserved_idx;
   27.87 +  // Keeps track of the start of the next array chunk to be claimed by
   27.88 +  // parallel GC workers.
   27.89 +  uint _first_par_unreserved_idx;
   27.90  
   27.91    // If a region has more live bytes than this threshold, it will not
   27.92    // be added to the CSet chooser and will not be a candidate for
   27.93    // collection.
   27.94 -  size_t _regionLiveThresholdBytes;
   27.95 +  size_t _region_live_threshold_bytes;
   27.96  
   27.97    // The sum of reclaimable bytes over all the regions in the CSet chooser.
   27.98 -  size_t _remainingReclaimableBytes;
   27.99 +  size_t _remaining_reclaimable_bytes;
  27.100  
  27.101  public:
  27.102  
  27.103 @@ -107,9 +72,9 @@
  27.104    HeapRegion* peek() {
  27.105      HeapRegion* res = NULL;
  27.106      if (_curr_index < _length) {
  27.107 -      res = _markedRegions.at(_curr_index);
  27.108 +      res = regions_at(_curr_index);
  27.109        assert(res != NULL,
  27.110 -             err_msg("Unexpected NULL hr in _markedRegions at index %d",
  27.111 +             err_msg("Unexpected NULL hr in _regions at index %u",
  27.112                       _curr_index));
  27.113      }
  27.114      return res;
  27.115 @@ -121,90 +86,71 @@
  27.116    void remove_and_move_to_next(HeapRegion* hr) {
  27.117      assert(hr != NULL, "pre-condition");
  27.118      assert(_curr_index < _length, "pre-condition");
  27.119 -    assert(_markedRegions.at(_curr_index) == hr, "pre-condition");
  27.120 -    hr->set_sort_index(-1);
  27.121 -    _markedRegions.at_put(_curr_index, NULL);
  27.122 -    assert(hr->reclaimable_bytes() <= _remainingReclaimableBytes,
  27.123 +    assert(regions_at(_curr_index) == hr, "pre-condition");
  27.124 +    regions_at_put(_curr_index, NULL);
  27.125 +    assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes,
  27.126             err_msg("remaining reclaimable bytes inconsistent "
  27.127                     "from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT,
  27.128 -                   hr->reclaimable_bytes(), _remainingReclaimableBytes));
  27.129 -    _remainingReclaimableBytes -= hr->reclaimable_bytes();
  27.130 +                   hr->reclaimable_bytes(), _remaining_reclaimable_bytes));
  27.131 +    _remaining_reclaimable_bytes -= hr->reclaimable_bytes();
  27.132      _curr_index += 1;
  27.133    }
  27.134  
  27.135    CollectionSetChooser();
  27.136  
  27.137 -  void sortMarkedHeapRegions();
  27.138 -  void fillCache();
  27.139 +  void sort_regions();
  27.140  
  27.141    // Determine whether to add the given region to the CSet chooser or
  27.142    // not. Currently, we skip humongous regions (we never add them to
  27.143    // the CSet, we only reclaim them during cleanup) and regions whose
  27.144    // live bytes are over the threshold.
  27.145 -  bool shouldAdd(HeapRegion* hr) {
  27.146 +  bool should_add(HeapRegion* hr) {
  27.147      assert(hr->is_marked(), "pre-condition");
  27.148      assert(!hr->is_young(), "should never consider young regions");
  27.149      return !hr->isHumongous() &&
  27.150 -            hr->live_bytes() < _regionLiveThresholdBytes;
  27.151 +            hr->live_bytes() < _region_live_threshold_bytes;
  27.152    }
  27.153  
  27.154    // Calculate the minimum number of old regions we'll add to the CSet
  27.155    // during a mixed GC.
  27.156 -  size_t calcMinOldCSetLength();
  27.157 +  uint calc_min_old_cset_length();
  27.158  
  27.159    // Calculate the maximum number of old regions we'll add to the CSet
  27.160    // during a mixed GC.
  27.161 -  size_t calcMaxOldCSetLength();
  27.162 +  uint calc_max_old_cset_length();
  27.163  
  27.164    // Serial version.
  27.165 -  void addMarkedHeapRegion(HeapRegion *hr);
  27.166 +  void add_region(HeapRegion *hr);
  27.167  
  27.168 -  // Must be called before calls to getParMarkedHeapRegionChunk.
  27.169 -  // "n_regions" is the number of regions, "chunkSize" the chunk size.
  27.170 -  void prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize);
  27.171 -  // Returns the first index in a contiguous chunk of "n_regions" indexes
  27.172 +  // Must be called before calls to claim_array_chunk().
  27.173 +  // n_regions is the number of regions, chunk_size the chunk size.
  27.174 +  void prepare_for_par_region_addition(uint n_regions, uint chunk_size);
  27.175 +  // Returns the first index in a contiguous chunk of chunk_size indexes
  27.176    // that the calling thread has reserved.  These must be set by the
  27.177 -  // calling thread using "setMarkedHeapRegion" (to NULL if necessary).
  27.178 -  jint getParMarkedHeapRegionChunk(jint n_regions);
  27.179 +  // calling thread using set_region() (to NULL if necessary).
  27.180 +  uint claim_array_chunk(uint chunk_size);
  27.181    // Set the marked array entry at index to hr.  Careful to claim the index
  27.182    // first if in parallel.
  27.183 -  void setMarkedHeapRegion(jint index, HeapRegion* hr);
  27.184 +  void set_region(uint index, HeapRegion* hr);
  27.185    // Atomically increment the number of added regions by region_num
  27.186    // and the amount of reclaimable bytes by reclaimable_bytes.
  27.187 -  void updateTotals(jint region_num, size_t reclaimable_bytes);
  27.188 +  void update_totals(uint region_num, size_t reclaimable_bytes);
  27.189  
  27.190 -  void clearMarkedHeapRegions();
  27.191 +  void clear();
  27.192  
  27.193    // Return the number of candidate regions that remain to be collected.
  27.194 -  size_t remainingRegions() { return _length - _curr_index; }
  27.195 +  uint remaining_regions() { return _length - _curr_index; }
  27.196  
  27.197    // Determine whether the CSet chooser has more candidate regions or not.
  27.198 -  bool isEmpty() { return remainingRegions() == 0; }
  27.199 +  bool is_empty() { return remaining_regions() == 0; }
  27.200  
  27.201    // Return the reclaimable bytes that remain to be collected on
  27.202    // all the candidate regions in the CSet chooser.
  27.203 -  size_t remainingReclaimableBytes () { return _remainingReclaimableBytes; }
  27.204 +  size_t remaining_reclaimable_bytes() { return _remaining_reclaimable_bytes; }
  27.205  
  27.206 -  // Returns true if the used portion of "_markedRegions" is properly
  27.207 +  // Returns true if the used portion of "_regions" is properly
  27.208    // sorted, otherwise asserts false.
  27.209 -#ifndef PRODUCT
  27.210 -  bool verify(void);
  27.211 -  bool regionProperlyOrdered(HeapRegion* r) {
  27.212 -    int si = r->sort_index();
  27.213 -    if (si > -1) {
  27.214 -      guarantee(_curr_index <= si && si < _length,
  27.215 -                err_msg("curr: %d sort index: %d: length: %d",
  27.216 -                        _curr_index, si, _length));
  27.217 -      guarantee(_markedRegions.at(si) == r,
  27.218 -                err_msg("sort index: %d at: "PTR_FORMAT" r: "PTR_FORMAT,
  27.219 -                        si, _markedRegions.at(si), r));
  27.220 -    } else {
  27.221 -      guarantee(si == -1, err_msg("sort index: %d", si));
  27.222 -    }
  27.223 -    return true;
  27.224 -  }
  27.225 -#endif
  27.226 -
  27.227 +  void verify() PRODUCT_RETURN;
  27.228  };
  27.229  
  27.230  #endif // SHARE_VM_GC_IMPLEMENTATION_G1_COLLECTIONSETCHOOSER_HPP
    28.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Apr 19 12:18:46 2012 -0700
    28.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Apr 20 16:23:48 2012 -0700
    28.3 @@ -29,6 +29,7 @@
    28.4  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    28.5  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    28.6  #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    28.7 +#include "gc_implementation/g1/g1Log.hpp"
    28.8  #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    28.9  #include "gc_implementation/g1/g1RemSet.hpp"
   28.10  #include "gc_implementation/g1/heapRegion.inline.hpp"
   28.11 @@ -402,8 +403,7 @@
   28.12    return MAX2((n_par_threads + 2) / 4, 1U);
   28.13  }
   28.14  
   28.15 -ConcurrentMark::ConcurrentMark(ReservedSpace rs,
   28.16 -                               int max_regions) :
   28.17 +ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
   28.18    _markBitMap1(rs, MinObjAlignment - 1),
   28.19    _markBitMap2(rs, MinObjAlignment - 1),
   28.20  
   28.21 @@ -414,7 +414,7 @@
   28.22    _cleanup_sleep_factor(0.0),
   28.23    _cleanup_task_overhead(1.0),
   28.24    _cleanup_list("Cleanup List"),
   28.25 -  _region_bm(max_regions, false /* in_resource_area*/),
   28.26 +  _region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/),
   28.27    _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
   28.28             CardTableModRefBS::card_shift,
   28.29             false /* in_resource_area*/),
   28.30 @@ -496,7 +496,7 @@
   28.31      _task_queues->register_queue(i, task_queue);
   28.32  
   28.33      _count_card_bitmaps[i] = BitMap(card_bm_size, false);
   28.34 -    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions);
   28.35 +    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions);
   28.36  
   28.37      _tasks[i] = new CMTask(i, this,
   28.38                             _count_marked_bytes[i],
   28.39 @@ -846,7 +846,7 @@
   28.40      clear_marking_state(concurrent() /* clear_overflow */);
   28.41      force_overflow()->update();
   28.42  
   28.43 -    if (PrintGC) {
   28.44 +    if (G1Log::fine()) {
   28.45        gclog_or_tty->date_stamp(PrintGCDateStamps);
   28.46        gclog_or_tty->stamp(PrintGCTimeStamps);
   28.47        gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
   28.48 @@ -1119,8 +1119,7 @@
   28.49      HandleMark hm;  // handle scope
   28.50      gclog_or_tty->print(" VerifyDuringGC:(before)");
   28.51      Universe::heap()->prepare_for_verify();
   28.52 -    Universe::verify(/* allow dirty */ true,
   28.53 -                     /* silent      */ false,
   28.54 +    Universe::verify(/* silent      */ false,
   28.55                       /* option      */ VerifyOption_G1UsePrevMarking);
   28.56    }
   28.57  
   28.58 @@ -1159,8 +1158,7 @@
   28.59        HandleMark hm;  // handle scope
   28.60        gclog_or_tty->print(" VerifyDuringGC:(after)");
   28.61        Universe::heap()->prepare_for_verify();
   28.62 -      Universe::verify(/* allow dirty */ true,
   28.63 -                       /* silent      */ false,
   28.64 +      Universe::verify(/* silent      */ false,
   28.65                         /* option      */ VerifyOption_G1UseNextMarking);
   28.66      }
   28.67      assert(!restart_for_overflow(), "sanity");
   28.68 @@ -1194,11 +1192,6 @@
   28.69    BitMap* _region_bm;
   28.70    BitMap* _card_bm;
   28.71  
   28.72 -  // Debugging
   28.73 -  size_t _tot_words_done;
   28.74 -  size_t _tot_live;
   28.75 -  size_t _tot_used;
   28.76 -
   28.77    size_t _region_marked_bytes;
   28.78  
   28.79    intptr_t _bottom_card_num;
   28.80 @@ -1217,9 +1210,7 @@
   28.81    CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
   28.82                           BitMap* region_bm, BitMap* card_bm) :
   28.83      _bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
   28.84 -    _region_marked_bytes(0), _tot_words_done(0),
   28.85 -    _tot_live(0), _tot_used(0),
   28.86 -    _bottom_card_num(cm->heap_bottom_card_num()) { }
   28.87 +    _region_marked_bytes(0), _bottom_card_num(cm->heap_bottom_card_num()) { }
   28.88  
   28.89    // It takes a region that's not empty (i.e., it has at least one
   28.90    // live object in it and sets its corresponding bit on the region
   28.91 @@ -1229,18 +1220,17 @@
   28.92    void set_bit_for_region(HeapRegion* hr) {
   28.93      assert(!hr->continuesHumongous(), "should have filtered those out");
   28.94  
   28.95 -    size_t index = hr->hrs_index();
   28.96 +    BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
   28.97      if (!hr->startsHumongous()) {
   28.98        // Normal (non-humongous) case: just set the bit.
   28.99 -      _region_bm->par_at_put((BitMap::idx_t) index, true);
  28.100 +      _region_bm->par_at_put(index, true);
  28.101      } else {
  28.102        // Starts humongous case: calculate how many regions are part of
  28.103        // this humongous region and then set the bit range.
  28.104        G1CollectedHeap* g1h = G1CollectedHeap::heap();
  28.105        HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
  28.106 -      size_t end_index = last_hr->hrs_index() + 1;
  28.107 -      _region_bm->par_at_put_range((BitMap::idx_t) index,
  28.108 -                                   (BitMap::idx_t) end_index, true);
  28.109 +      BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
  28.110 +      _region_bm->par_at_put_range(index, end_index, true);
  28.111      }
  28.112    }
  28.113  
  28.114 @@ -1265,9 +1255,6 @@
  28.115                     "start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT,
  28.116                     start, nextTop, hr->end()));
  28.117  
  28.118 -    // Record the number of word's we'll examine.
  28.119 -    size_t words_done = (nextTop - start);
  28.120 -
  28.121      // Find the first marked object at or after "start".
  28.122      start = _bm->getNextMarkedWordAddress(start, nextTop);
  28.123  
  28.124 @@ -1346,19 +1333,10 @@
  28.125      // it can be queried by a calling verificiation routine
  28.126      _region_marked_bytes = marked_bytes;
  28.127  
  28.128 -    _tot_live += hr->next_live_bytes();
  28.129 -    _tot_used += hr->used();
  28.130 -    _tot_words_done = words_done;
  28.131 -
  28.132      return false;
  28.133    }
  28.134  
  28.135    size_t region_marked_bytes() const { return _region_marked_bytes; }
  28.136 -
  28.137 -  // Debugging
  28.138 -  size_t tot_words_done() const      { return _tot_words_done; }
  28.139 -  size_t tot_live() const            { return _tot_live; }
  28.140 -  size_t tot_used() const            { return _tot_used; }
  28.141  };
  28.142  
  28.143  // Heap region closure used for verifying the counting data
  28.144 @@ -1419,7 +1397,7 @@
  28.145      // Verify that _top_at_conc_count == ntams
  28.146      if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) {
  28.147        if (_verbose) {
  28.148 -        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": top at conc count incorrect: "
  28.149 +        gclog_or_tty->print_cr("Region %u: top at conc count incorrect: "
  28.150                                 "expected " PTR_FORMAT ", actual: " PTR_FORMAT,
  28.151                                 hr->hrs_index(), hr->next_top_at_mark_start(),
  28.152                                 hr->top_at_conc_mark_count());
  28.153 @@ -1435,7 +1413,7 @@
  28.154      // we have missed accounting some objects during the actual marking.
  28.155      if (exp_marked_bytes > act_marked_bytes) {
  28.156        if (_verbose) {
  28.157 -        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": marked bytes mismatch: "
  28.158 +        gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
  28.159                                 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
  28.160                                 hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
  28.161        }
  28.162 @@ -1446,15 +1424,16 @@
  28.163      // (which was just calculated) region bit maps.
  28.164      // We're not OK if the bit in the calculated expected region
  28.165      // bitmap is set and the bit in the actual region bitmap is not.
  28.166 -    BitMap::idx_t index = (BitMap::idx_t)hr->hrs_index();
  28.167 +    BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
  28.168  
  28.169      bool expected = _exp_region_bm->at(index);
  28.170      bool actual = _region_bm->at(index);
  28.171      if (expected && !actual) {
  28.172        if (_verbose) {
  28.173 -        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": region bitmap mismatch: "
  28.174 -                               "expected: %d, actual: %d",
  28.175 -                               hr->hrs_index(), expected, actual);
  28.176 +        gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
  28.177 +                               "expected: %s, actual: %s",
  28.178 +                               hr->hrs_index(),
  28.179 +                               BOOL_TO_STR(expected), BOOL_TO_STR(actual));
  28.180        }
  28.181        failures += 1;
  28.182      }
  28.183 @@ -1472,9 +1451,10 @@
  28.184  
  28.185        if (expected && !actual) {
  28.186          if (_verbose) {
  28.187 -          gclog_or_tty->print_cr("Region " SIZE_FORMAT ": card bitmap mismatch at " SIZE_FORMAT ": "
  28.188 -                                 "expected: %d, actual: %d",
  28.189 -                                 hr->hrs_index(), i, expected, actual);
  28.190 +          gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
  28.191 +                                 "expected: %s, actual: %s",
  28.192 +                                 hr->hrs_index(), i,
  28.193 +                                 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
  28.194          }
  28.195          failures += 1;
  28.196        }
  28.197 @@ -1575,10 +1555,6 @@
  28.198    BitMap* _region_bm;
  28.199    BitMap* _card_bm;
  28.200  
  28.201 -  size_t _total_live_bytes;
  28.202 -  size_t _total_used_bytes;
  28.203 -  size_t _total_words_done;
  28.204 -
  28.205    void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
  28.206      assert(start_idx <= last_idx, "sanity");
  28.207  
  28.208 @@ -1604,18 +1580,17 @@
  28.209    void set_bit_for_region(HeapRegion* hr) {
  28.210      assert(!hr->continuesHumongous(), "should have filtered those out");
  28.211  
  28.212 -    size_t index = hr->hrs_index();
  28.213 +    BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
  28.214      if (!hr->startsHumongous()) {
  28.215        // Normal (non-humongous) case: just set the bit.
  28.216 -      _region_bm->par_set_bit((BitMap::idx_t) index);
  28.217 +      _region_bm->par_set_bit(index);
  28.218      } else {
  28.219        // Starts humongous case: calculate how many regions are part of
  28.220        // this humongous region and then set the bit range.
  28.221        G1CollectedHeap* g1h = G1CollectedHeap::heap();
  28.222        HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
  28.223 -      size_t end_index = last_hr->hrs_index() + 1;
  28.224 -      _region_bm->par_at_put_range((BitMap::idx_t) index,
  28.225 -                                   (BitMap::idx_t) end_index, true);
  28.226 +      BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
  28.227 +      _region_bm->par_at_put_range(index, end_index, true);
  28.228      }
  28.229    }
  28.230  
  28.231 @@ -1623,8 +1598,7 @@
  28.232    FinalCountDataUpdateClosure(ConcurrentMark* cm,
  28.233                                BitMap* region_bm,
  28.234                                BitMap* card_bm) :
  28.235 -    _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
  28.236 -    _total_words_done(0), _total_live_bytes(0), _total_used_bytes(0) { }
  28.237 +    _cm(cm), _region_bm(region_bm), _card_bm(card_bm) { }
  28.238  
  28.239    bool doHeapRegion(HeapRegion* hr) {
  28.240  
  28.241 @@ -1646,8 +1620,6 @@
  28.242      assert(hr->bottom() <= start && start <= hr->end() &&
  28.243             hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
  28.244  
  28.245 -    size_t words_done = ntams - hr->bottom();
  28.246 -
  28.247      if (start < ntams) {
  28.248        // Region was changed between remark and cleanup pauses
  28.249        // We need to add (ntams - start) to the marked bytes
  28.250 @@ -1678,16 +1650,8 @@
  28.251        set_bit_for_region(hr);
  28.252      }
  28.253  
  28.254 -    _total_words_done += words_done;
  28.255 -    _total_used_bytes += hr->used();
  28.256 -    _total_live_bytes += hr->next_marked_bytes();
  28.257 -
  28.258      return false;
  28.259    }
  28.260 -
  28.261 -  size_t total_words_done() const { return _total_words_done; }
  28.262 -  size_t total_live_bytes() const { return _total_live_bytes; }
  28.263 -  size_t total_used_bytes() const { return _total_used_bytes; }
  28.264  };
  28.265  
  28.266  class G1ParFinalCountTask: public AbstractGangTask {
  28.267 @@ -1699,9 +1663,6 @@
  28.268  
  28.269    uint    _n_workers;
  28.270  
  28.271 -  size_t *_live_bytes;
  28.272 -  size_t *_used_bytes;
  28.273 -
  28.274  public:
  28.275    G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
  28.276      : AbstractGangTask("G1 final counting"),
  28.277 @@ -1709,8 +1670,7 @@
  28.278        _actual_region_bm(region_bm), _actual_card_bm(card_bm),
  28.279        _n_workers(0) {
  28.280      // Use the value already set as the number of active threads
  28.281 -    // in the call to run_task().  Needed for the allocation of
  28.282 -    // _live_bytes and _used_bytes.
  28.283 +    // in the call to run_task().
  28.284      if (G1CollectedHeap::use_parallel_gc_threads()) {
  28.285        assert( _g1h->workers()->active_workers() > 0,
  28.286          "Should have been previously set");
  28.287 @@ -1718,14 +1678,6 @@
  28.288      } else {
  28.289        _n_workers = 1;
  28.290      }
  28.291 -
  28.292 -    _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
  28.293 -    _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
  28.294 -  }
  28.295 -
  28.296 -  ~G1ParFinalCountTask() {
  28.297 -    FREE_C_HEAP_ARRAY(size_t, _live_bytes);
  28.298 -    FREE_C_HEAP_ARRAY(size_t, _used_bytes);
  28.299    }
  28.300  
  28.301    void work(uint worker_id) {
  28.302 @@ -1743,23 +1695,6 @@
  28.303      } else {
  28.304        _g1h->heap_region_iterate(&final_update_cl);
  28.305      }
  28.306 -
  28.307 -    _live_bytes[worker_id] = final_update_cl.total_live_bytes();
  28.308 -    _used_bytes[worker_id] = final_update_cl.total_used_bytes();
  28.309 -  }
  28.310 -
  28.311 -  size_t live_bytes()  {
  28.312 -    size_t live_bytes = 0;
  28.313 -    for (uint i = 0; i < _n_workers; ++i)
  28.314 -      live_bytes += _live_bytes[i];
  28.315 -    return live_bytes;
  28.316 -  }
  28.317 -
  28.318 -  size_t used_bytes()  {
  28.319 -    size_t used_bytes = 0;
  28.320 -    for (uint i = 0; i < _n_workers; ++i)
  28.321 -      used_bytes += _used_bytes[i];
  28.322 -    return used_bytes;
  28.323    }
  28.324  };
  28.325  
  28.326 @@ -1769,7 +1704,7 @@
  28.327    G1CollectedHeap* _g1;
  28.328    int _worker_num;
  28.329    size_t _max_live_bytes;
  28.330 -  size_t _regions_claimed;
  28.331 +  uint _regions_claimed;
  28.332    size_t _freed_bytes;
  28.333    FreeRegionList* _local_cleanup_list;
  28.334    OldRegionSet* _old_proxy_set;
  28.335 @@ -1822,7 +1757,7 @@
  28.336    }
  28.337  
  28.338    size_t max_live_bytes() { return _max_live_bytes; }
  28.339 -  size_t regions_claimed() { return _regions_claimed; }
  28.340 +  uint regions_claimed() { return _regions_claimed; }
  28.341    double claimed_region_time_sec() { return _claimed_region_time; }
  28.342    double max_region_time_sec() { return _max_region_time; }
  28.343  };
  28.344 @@ -1894,15 +1829,6 @@
  28.345  
  28.346        HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
  28.347      }
  28.348 -    double end = os::elapsedTime();
  28.349 -    if (G1PrintParCleanupStats) {
  28.350 -      gclog_or_tty->print("     Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
  28.351 -                          "claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n",
  28.352 -                          worker_id, start, end, (end-start)*1000.0,
  28.353 -                          g1_note_end.regions_claimed(),
  28.354 -                          g1_note_end.claimed_region_time_sec()*1000.0,
  28.355 -                          g1_note_end.max_region_time_sec()*1000.0);
  28.356 -    }
  28.357    }
  28.358    size_t max_live_bytes() { return _max_live_bytes; }
  28.359    size_t freed_bytes() { return _freed_bytes; }
  28.360 @@ -1949,8 +1875,7 @@
  28.361      HandleMark hm;  // handle scope
  28.362      gclog_or_tty->print(" VerifyDuringGC:(before)");
  28.363      Universe::heap()->prepare_for_verify();
  28.364 -    Universe::verify(/* allow dirty */ true,
  28.365 -                     /* silent      */ false,
  28.366 +    Universe::verify(/* silent      */ false,
  28.367                       /* option      */ VerifyOption_G1UsePrevMarking);
  28.368    }
  28.369  
  28.370 @@ -2014,29 +1939,11 @@
  28.371      guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
  28.372    }
  28.373  
  28.374 -  size_t known_garbage_bytes =
  28.375 -    g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes();
  28.376 -  g1p->set_known_garbage_bytes(known_garbage_bytes);
  28.377 -
  28.378    size_t start_used_bytes = g1h->used();
  28.379    g1h->set_marking_complete();
  28.380  
  28.381 -  ergo_verbose4(ErgoConcCycles,
  28.382 -           "finish cleanup",
  28.383 -           ergo_format_byte("occupancy")
  28.384 -           ergo_format_byte("capacity")
  28.385 -           ergo_format_byte_perc("known garbage"),
  28.386 -           start_used_bytes, g1h->capacity(),
  28.387 -           known_garbage_bytes,
  28.388 -           ((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0);
  28.389 -
  28.390    double count_end = os::elapsedTime();
  28.391    double this_final_counting_time = (count_end - start);
  28.392 -  if (G1PrintParCleanupStats) {
  28.393 -    gclog_or_tty->print_cr("Cleanup:");
  28.394 -    gclog_or_tty->print_cr("  Finalize counting: %8.3f ms",
  28.395 -                           this_final_counting_time*1000.0);
  28.396 -  }
  28.397    _total_counting_time += this_final_counting_time;
  28.398  
  28.399    if (G1PrintRegionLivenessInfo) {
  28.400 @@ -2050,7 +1957,6 @@
  28.401    g1h->reset_gc_time_stamp();
  28.402  
  28.403    // Note end of marking in all heap regions.
  28.404 -  double note_end_start = os::elapsedTime();
  28.405    G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
  28.406    if (G1CollectedHeap::use_parallel_gc_threads()) {
  28.407      g1h->set_par_threads((int)n_workers);
  28.408 @@ -2069,11 +1975,6 @@
  28.409      // regions that there will be more free regions coming soon.
  28.410      g1h->set_free_regions_coming();
  28.411    }
  28.412 -  double note_end_end = os::elapsedTime();
  28.413 -  if (G1PrintParCleanupStats) {
  28.414 -    gclog_or_tty->print_cr("  note end of marking: %8.3f ms.",
  28.415 -                           (note_end_end - note_end_start)*1000.0);
  28.416 -  }
  28.417  
  28.418    // call below, since it affects the metric by which we sort the heap
  28.419    // regions.
  28.420 @@ -2105,16 +2006,13 @@
  28.421    double end = os::elapsedTime();
  28.422    _cleanup_times.add((end - start) * 1000.0);
  28.423  
  28.424 -  if (PrintGC || PrintGCDetails) {
  28.425 +  if (G1Log::fine()) {
  28.426      g1h->print_size_transition(gclog_or_tty,
  28.427                                 start_used_bytes,
  28.428                                 g1h->used(),
  28.429                                 g1h->capacity());
  28.430    }
  28.431  
  28.432 -  size_t cleaned_up_bytes = start_used_bytes - g1h->used();
  28.433 -  g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
  28.434 -
  28.435    // Clean up will have freed any regions completely full of garbage.
  28.436    // Update the soft reference policy with the new heap occupancy.
  28.437    Universe::update_heap_info_at_gc();
  28.438 @@ -2131,8 +2029,7 @@
  28.439      HandleMark hm;  // handle scope
  28.440      gclog_or_tty->print(" VerifyDuringGC:(after)");
  28.441      Universe::heap()->prepare_for_verify();
  28.442 -    Universe::verify(/* allow dirty */ true,
  28.443 -                     /* silent      */ false,
  28.444 +    Universe::verify(/* silent      */ false,
  28.445                       /* option      */ VerifyOption_G1UsePrevMarking);
  28.446    }
  28.447  
  28.448 @@ -2149,7 +2046,7 @@
  28.449  
  28.450    if (G1ConcRegionFreeingVerbose) {
  28.451      gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
  28.452 -                           "cleanup list has "SIZE_FORMAT" entries",
  28.453 +                           "cleanup list has %u entries",
  28.454                             _cleanup_list.length());
  28.455    }
  28.456  
  28.457 @@ -2171,9 +2068,8 @@
  28.458          _cleanup_list.is_empty()) {
  28.459        if (G1ConcRegionFreeingVerbose) {
  28.460          gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
  28.461 -                               "appending "SIZE_FORMAT" entries to the "
  28.462 -                               "secondary_free_list, clean list still has "
  28.463 -                               SIZE_FORMAT" entries",
  28.464 +                               "appending %u entries to the secondary_free_list, "
  28.465 +                               "cleanup list still has %u entries",
  28.466                                 tmp_free_list.length(),
  28.467                                 _cleanup_list.length());
  28.468        }
  28.469 @@ -2446,11 +2342,10 @@
  28.470    // Inner scope to exclude the cleaning of the string and symbol
  28.471    // tables from the displayed time.
  28.472    {
  28.473 -    bool verbose = PrintGC && PrintGCDetails;
  28.474 -    if (verbose) {
  28.475 +    if (G1Log::finer()) {
  28.476        gclog_or_tty->put(' ');
  28.477      }
  28.478 -    TraceTime t("GC ref-proc", verbose, false, gclog_or_tty);
  28.479 +    TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
  28.480  
  28.481      ReferenceProcessor* rp = g1h->ref_processor_cm();
  28.482  
  28.483 @@ -3144,7 +3039,7 @@
  28.484      assert(limit_idx <= end_idx, "or else use atomics");
  28.485  
  28.486      // Aggregate the "stripe" in the count data associated with hr.
  28.487 -    size_t hrs_index = hr->hrs_index();
  28.488 +    uint hrs_index = hr->hrs_index();
  28.489      size_t marked_bytes = 0;
  28.490  
  28.491      for (int i = 0; (size_t)i < _max_task_num; i += 1) {
  28.492 @@ -3252,7 +3147,7 @@
  28.493    // of the final counting task.
  28.494    _region_bm.clear();
  28.495  
  28.496 -  size_t max_regions = _g1h->max_regions();
  28.497 +  uint max_regions = _g1h->max_regions();
  28.498    assert(_max_task_num != 0, "unitialized");
  28.499  
  28.500    for (int i = 0; (size_t) i < _max_task_num; i += 1) {
  28.501 @@ -3262,7 +3157,7 @@
  28.502      assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
  28.503      assert(marked_bytes_array != NULL, "uninitialized");
  28.504  
  28.505 -    memset(marked_bytes_array, 0, (max_regions * sizeof(size_t)));
  28.506 +    memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
  28.507      task_card_bm->clear();
  28.508    }
  28.509  }
    29.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Apr 19 12:18:46 2012 -0700
    29.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Apr 20 16:23:48 2012 -0700
    29.3 @@ -636,7 +636,7 @@
    29.4      return _task_queues->steal(task_num, hash_seed, obj);
    29.5    }
    29.6  
    29.7 -  ConcurrentMark(ReservedSpace rs, int max_regions);
    29.8 +  ConcurrentMark(ReservedSpace rs, uint max_regions);
    29.9    ~ConcurrentMark();
   29.10  
   29.11    ConcurrentMarkThread* cmThread() { return _cmThread; }
    30.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Thu Apr 19 12:18:46 2012 -0700
    30.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Fri Apr 20 16:23:48 2012 -0700
    30.3 @@ -49,7 +49,7 @@
    30.4    HeapWord* start = mr.start();
    30.5    HeapWord* last = mr.last();
    30.6    size_t region_size_bytes = mr.byte_size();
    30.7 -  size_t index = hr->hrs_index();
    30.8 +  uint index = hr->hrs_index();
    30.9  
   30.10    assert(!hr->continuesHumongous(), "should not be HC region");
   30.11    assert(hr == g1h->heap_region_containing(start), "sanity");
    31.1 --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Thu Apr 19 12:18:46 2012 -0700
    31.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Apr 20 16:23:48 2012 -0700
    31.3 @@ -26,6 +26,7 @@
    31.4  #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    31.5  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    31.6  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31.7 +#include "gc_implementation/g1/g1Log.hpp"
    31.8  #include "gc_implementation/g1/g1MMUTracker.hpp"
    31.9  #include "gc_implementation/g1/vm_operations_g1.hpp"
   31.10  #include "memory/resourceArea.hpp"
   31.11 @@ -104,7 +105,7 @@
   31.12  
   31.13        double scan_start = os::elapsedTime();
   31.14        if (!cm()->has_aborted()) {
   31.15 -        if (PrintGC) {
   31.16 +        if (G1Log::fine()) {
   31.17            gclog_or_tty->date_stamp(PrintGCDateStamps);
   31.18            gclog_or_tty->stamp(PrintGCTimeStamps);
   31.19            gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
   31.20 @@ -113,7 +114,7 @@
   31.21          _cm->scanRootRegions();
   31.22  
   31.23          double scan_end = os::elapsedTime();
   31.24 -        if (PrintGC) {
   31.25 +        if (G1Log::fine()) {
   31.26            gclog_or_tty->date_stamp(PrintGCDateStamps);
   31.27            gclog_or_tty->stamp(PrintGCTimeStamps);
   31.28            gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf]",
   31.29 @@ -122,7 +123,7 @@
   31.30        }
   31.31  
   31.32        double mark_start_sec = os::elapsedTime();
   31.33 -      if (PrintGC) {
   31.34 +      if (G1Log::fine()) {
   31.35          gclog_or_tty->date_stamp(PrintGCDateStamps);
   31.36          gclog_or_tty->stamp(PrintGCTimeStamps);
   31.37          gclog_or_tty->print_cr("[GC concurrent-mark-start]");
   31.38 @@ -146,7 +147,7 @@
   31.39              os::sleep(current_thread, sleep_time_ms, false);
   31.40            }
   31.41  
   31.42 -          if (PrintGC) {
   31.43 +          if (G1Log::fine()) {
   31.44              gclog_or_tty->date_stamp(PrintGCDateStamps);
   31.45              gclog_or_tty->stamp(PrintGCTimeStamps);
   31.46              gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf sec]",
   31.47 @@ -165,7 +166,7 @@
   31.48          }
   31.49  
   31.50          if (cm()->restart_for_overflow()) {
   31.51 -          if (PrintGC) {
   31.52 +          if (G1Log::fine()) {
   31.53              gclog_or_tty->date_stamp(PrintGCDateStamps);
   31.54              gclog_or_tty->stamp(PrintGCTimeStamps);
   31.55              gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
   31.56 @@ -211,7 +212,7 @@
   31.57          // reclaimed by cleanup.
   31.58  
   31.59          double cleanup_start_sec = os::elapsedTime();
   31.60 -        if (PrintGC) {
   31.61 +        if (G1Log::fine()) {
   31.62            gclog_or_tty->date_stamp(PrintGCDateStamps);
   31.63            gclog_or_tty->stamp(PrintGCTimeStamps);
   31.64            gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
   31.65 @@ -232,7 +233,7 @@
   31.66          g1h->reset_free_regions_coming();
   31.67  
   31.68          double cleanup_end_sec = os::elapsedTime();
   31.69 -        if (PrintGC) {
   31.70 +        if (G1Log::fine()) {
   31.71            gclog_or_tty->date_stamp(PrintGCDateStamps);
   31.72            gclog_or_tty->stamp(PrintGCTimeStamps);
   31.73            gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]",
   31.74 @@ -273,7 +274,7 @@
   31.75        _sts.leave();
   31.76  
   31.77        if (cm()->has_aborted()) {
   31.78 -        if (PrintGC) {
   31.79 +        if (G1Log::fine()) {
   31.80            gclog_or_tty->date_stamp(PrintGCDateStamps);
   31.81            gclog_or_tty->stamp(PrintGCTimeStamps);
   31.82            gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
    32.1 --- a/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Thu Apr 19 12:18:46 2012 -0700
    32.2 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Fri Apr 20 16:23:48 2012 -0700
    32.3 @@ -1,5 +1,5 @@
    32.4  /*
    32.5 - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    32.6 + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    32.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    32.8   *
    32.9   * This code is free software; you can redistribute it and/or modify it
   32.10 @@ -140,7 +140,7 @@
   32.11  }
   32.12  
   32.13  void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
   32.14 -  msg->append("[%s] %s c: "SIZE_FORMAT" b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
   32.15 +  msg->append("[%s] %s c: %u b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
   32.16                _name, message, _count, BOOL_TO_STR(_bot_updates),
   32.17                _alloc_region, _used_bytes_before);
   32.18  }
   32.19 @@ -215,7 +215,7 @@
   32.20        jio_snprintf(rest_buffer, buffer_length, "");
   32.21      }
   32.22  
   32.23 -    tty->print_cr("[%s] "SIZE_FORMAT" %s : %s %s",
   32.24 +    tty->print_cr("[%s] %u %s : %s %s",
   32.25                    _name, _count, hr_buffer, str, rest_buffer);
   32.26    }
   32.27  }
    33.1 --- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Thu Apr 19 12:18:46 2012 -0700
    33.2 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Fri Apr 20 16:23:48 2012 -0700
    33.3 @@ -1,5 +1,5 @@
    33.4  /*
    33.5 - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    33.6 + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    33.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    33.8   *
    33.9   * This code is free software; you can redistribute it and/or modify it
   33.10 @@ -64,7 +64,7 @@
   33.11    // the region that is re-used using the set() method. This count can
   33.12    // be used in any heuristics that might want to bound how many
   33.13    // distinct regions this object can used during an active interval.
   33.14 -  size_t _count;
   33.15 +  uint _count;
   33.16  
   33.17    // When we set up a new active region we save its used bytes in this
   33.18    // field so that, when we retire it, we can calculate how much space
   33.19 @@ -136,7 +136,7 @@
   33.20      return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
   33.21    }
   33.22  
   33.23 -  size_t count() { return _count; }
   33.24 +  uint count() { return _count; }
   33.25  
   33.26    // The following two are the building blocks for the allocation method.
   33.27  
    34.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Apr 19 12:18:46 2012 -0700
    34.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Apr 20 16:23:48 2012 -0700
    34.3 @@ -33,6 +33,7 @@
    34.4  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    34.5  #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    34.6  #include "gc_implementation/g1/g1EvacFailure.hpp"
    34.7 +#include "gc_implementation/g1/g1Log.hpp"
    34.8  #include "gc_implementation/g1/g1MarkSweep.hpp"
    34.9  #include "gc_implementation/g1/g1OopClosures.inline.hpp"
   34.10  #include "gc_implementation/g1/g1RemSet.inline.hpp"
   34.11 @@ -233,7 +234,7 @@
   34.12  bool YoungList::check_list_well_formed() {
   34.13    bool ret = true;
   34.14  
   34.15 -  size_t length = 0;
   34.16 +  uint length = 0;
   34.17    HeapRegion* curr = _head;
   34.18    HeapRegion* last = NULL;
   34.19    while (curr != NULL) {
   34.20 @@ -252,7 +253,7 @@
   34.21  
   34.22    if (!ret) {
   34.23      gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   34.24 -    gclog_or_tty->print_cr("###   list has %d entries, _length is %d",
   34.25 +    gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
   34.26                             length, _length);
   34.27    }
   34.28  
   34.29 @@ -263,7 +264,7 @@
   34.30    bool ret = true;
   34.31  
   34.32    if (_length != 0) {
   34.33 -    gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
   34.34 +    gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
   34.35                    _length);
   34.36      ret = false;
   34.37    }
   34.38 @@ -336,8 +337,7 @@
   34.39      _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   34.40      young_index_in_cset += 1;
   34.41    }
   34.42 -  assert((size_t) young_index_in_cset == _survivor_length,
   34.43 -         "post-condition");
   34.44 +  assert((uint) young_index_in_cset == _survivor_length, "post-condition");
   34.45    _g1h->g1_policy()->note_stop_adding_survivor_regions();
   34.46  
   34.47    _head   = _survivor_head;
   34.48 @@ -532,7 +532,7 @@
   34.49      if (!_secondary_free_list.is_empty()) {
   34.50        if (G1ConcRegionFreeingVerbose) {
   34.51          gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   34.52 -                               "secondary_free_list has "SIZE_FORMAT" entries",
   34.53 +                               "secondary_free_list has %u entries",
   34.54                                 _secondary_free_list.length());
   34.55        }
   34.56        // It looks as if there are free regions available on the
   34.57 @@ -618,12 +618,12 @@
   34.58    return res;
   34.59  }
   34.60  
   34.61 -size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
   34.62 -                                                          size_t word_size) {
   34.63 +uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
   34.64 +                                                        size_t word_size) {
   34.65    assert(isHumongous(word_size), "word_size should be humongous");
   34.66    assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   34.67  
   34.68 -  size_t first = G1_NULL_HRS_INDEX;
   34.69 +  uint first = G1_NULL_HRS_INDEX;
   34.70    if (num_regions == 1) {
   34.71      // Only one region to allocate, no need to go through the slower
   34.72      // path. The caller will attempt the expasion if this fails, so
   34.73 @@ -649,7 +649,7 @@
   34.74      if (free_regions() >= num_regions) {
   34.75        first = _hrs.find_contiguous(num_regions);
   34.76        if (first != G1_NULL_HRS_INDEX) {
   34.77 -        for (size_t i = first; i < first + num_regions; ++i) {
   34.78 +        for (uint i = first; i < first + num_regions; ++i) {
   34.79            HeapRegion* hr = region_at(i);
   34.80            assert(hr->is_empty(), "sanity");
   34.81            assert(is_on_master_free_list(hr), "sanity");
   34.82 @@ -663,15 +663,15 @@
   34.83  }
   34.84  
   34.85  HeapWord*
   34.86 -G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
   34.87 -                                                           size_t num_regions,
   34.88 +G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
   34.89 +                                                           uint num_regions,
   34.90                                                             size_t word_size) {
   34.91    assert(first != G1_NULL_HRS_INDEX, "pre-condition");
   34.92    assert(isHumongous(word_size), "word_size should be humongous");
   34.93    assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   34.94  
   34.95    // Index of last region in the series + 1.
   34.96 -  size_t last = first + num_regions;
   34.97 +  uint last = first + num_regions;
   34.98  
   34.99    // We need to initialize the region(s) we just discovered. This is
  34.100    // a bit tricky given that it can happen concurrently with
  34.101 @@ -682,7 +682,7 @@
  34.102    // a specific order.
  34.103  
  34.104    // The word size sum of all the regions we will allocate.
  34.105 -  size_t word_size_sum = num_regions * HeapRegion::GrainWords;
  34.106 +  size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
  34.107    assert(word_size <= word_size_sum, "sanity");
  34.108  
  34.109    // This will be the "starts humongous" region.
  34.110 @@ -721,7 +721,7 @@
  34.111    // Then, if there are any, we will set up the "continues
  34.112    // humongous" regions.
  34.113    HeapRegion* hr = NULL;
  34.114 -  for (size_t i = first + 1; i < last; ++i) {
  34.115 +  for (uint i = first + 1; i < last; ++i) {
  34.116      hr = region_at(i);
  34.117      hr->set_continuesHumongous(first_hr);
  34.118    }
  34.119 @@ -767,7 +767,7 @@
  34.120    // last one) is actually used when we will free up the humongous
  34.121    // region in free_humongous_region().
  34.122    hr = NULL;
  34.123 -  for (size_t i = first + 1; i < last; ++i) {
  34.124 +  for (uint i = first + 1; i < last; ++i) {
  34.125      hr = region_at(i);
  34.126      if ((i + 1) == last) {
  34.127        // last continues humongous region
  34.128 @@ -803,14 +803,14 @@
  34.129  
  34.130    verify_region_sets_optional();
  34.131  
  34.132 -  size_t num_regions =
  34.133 -         round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
  34.134 -  size_t x_size = expansion_regions();
  34.135 -  size_t fs = _hrs.free_suffix();
  34.136 -  size_t first = humongous_obj_allocate_find_first(num_regions, word_size);
  34.137 +  size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
  34.138 +  uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
  34.139 +  uint x_num = expansion_regions();
  34.140 +  uint fs = _hrs.free_suffix();
  34.141 +  uint first = humongous_obj_allocate_find_first(num_regions, word_size);
  34.142    if (first == G1_NULL_HRS_INDEX) {
  34.143      // The only thing we can do now is attempt expansion.
  34.144 -    if (fs + x_size >= num_regions) {
  34.145 +    if (fs + x_num >= num_regions) {
  34.146        // If the number of regions we're trying to allocate for this
  34.147        // object is at most the number of regions in the free suffix,
  34.148        // then the call to humongous_obj_allocate_find_first() above
  34.149 @@ -1255,10 +1255,10 @@
  34.150      // Timing
  34.151      bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
  34.152      assert(!system_gc || explicit_gc, "invariant");
  34.153 -    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  34.154 -    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  34.155 +    gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
  34.156 +    TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  34.157      TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
  34.158 -                PrintGC, true, gclog_or_tty);
  34.159 +                G1Log::fine(), true, gclog_or_tty);
  34.160  
  34.161      TraceCollectorStats tcs(g1mm()->full_collection_counters());
  34.162      TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
  34.163 @@ -1290,8 +1290,7 @@
  34.164        HandleMark hm;  // Discard invalid handles created during verification
  34.165        gclog_or_tty->print(" VerifyBeforeGC:");
  34.166        prepare_for_verify();
  34.167 -      Universe::verify(/* allow dirty */ true,
  34.168 -                       /* silent      */ false,
  34.169 +      Universe::verify(/* silent      */ false,
  34.170                         /* option      */ VerifyOption_G1UsePrevMarking);
  34.171  
  34.172      }
  34.173 @@ -1365,8 +1364,7 @@
  34.174        HandleMark hm;  // Discard invalid handles created during verification
  34.175        gclog_or_tty->print(" VerifyAfterGC:");
  34.176        prepare_for_verify();
  34.177 -      Universe::verify(/* allow dirty */ false,
  34.178 -                       /* silent      */ false,
  34.179 +      Universe::verify(/* silent      */ false,
  34.180                         /* option      */ VerifyOption_G1UsePrevMarking);
  34.181  
  34.182      }
  34.183 @@ -1444,7 +1442,7 @@
  34.184        heap_region_iterate(&rebuild_rs);
  34.185      }
  34.186  
  34.187 -    if (PrintGC) {
  34.188 +    if (G1Log::fine()) {
  34.189        print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
  34.190      }
  34.191  
  34.192 @@ -1782,7 +1780,7 @@
  34.193      ReservedSpace::page_align_size_down(shrink_bytes);
  34.194    aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  34.195                                           HeapRegion::GrainBytes);
  34.196 -  size_t num_regions_deleted = 0;
  34.197 +  uint num_regions_deleted = 0;
  34.198    MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
  34.199    HeapWord* old_end = (HeapWord*) _g1_storage.high();
  34.200    assert(mr.end() == old_end, "post-condition");
  34.201 @@ -1917,6 +1915,8 @@
  34.202    CollectedHeap::pre_initialize();
  34.203    os::enable_vtime();
  34.204  
  34.205 +  G1Log::init();
  34.206 +
  34.207    // Necessary to satisfy locking discipline assertions.
  34.208  
  34.209    MutexLocker x(Heap_lock);
  34.210 @@ -2003,7 +2003,7 @@
  34.211    _reserved.set_start((HeapWord*)heap_rs.base());
  34.212    _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  34.213  
  34.214 -  _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
  34.215 +  _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
  34.216  
  34.217    // Create the gen rem set (and barrier set) for the entire reserved region.
  34.218    _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  34.219 @@ -2040,7 +2040,7 @@
  34.220  
  34.221    // 6843694 - ensure that the maximum region index can fit
  34.222    // in the remembered set structures.
  34.223 -  const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  34.224 +  const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  34.225    guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  34.226  
  34.227    size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  34.228 @@ -2056,13 +2056,14 @@
  34.229    _g1h = this;
  34.230  
  34.231     _in_cset_fast_test_length = max_regions();
  34.232 -   _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
  34.233 +   _in_cset_fast_test_base =
  34.234 +                   NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length);
  34.235  
  34.236     // We're biasing _in_cset_fast_test to avoid subtracting the
  34.237     // beginning of the heap every time we want to index; basically
  34.238     // it's the same with what we do with the card table.
  34.239     _in_cset_fast_test = _in_cset_fast_test_base -
  34.240 -                ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  34.241 +               ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  34.242  
  34.243     // Clear the _cset_fast_test bitmap in anticipation of adding
  34.244     // regions to the incremental collection set for the first
  34.245 @@ -2071,7 +2072,7 @@
  34.246  
  34.247    // Create the ConcurrentMark data structure and thread.
  34.248    // (Must do this late, so that "max_regions" is defined.)
  34.249 -  _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
  34.250 +  _cm       = new ConcurrentMark(heap_rs, max_regions());
  34.251    _cmThread = _cm->cmThread();
  34.252  
  34.253    // Initialize the from_card cache structure of HeapRegionRemSet.
  34.254 @@ -2580,7 +2581,7 @@
  34.255                                                   uint worker,
  34.256                                                   uint no_of_par_workers,
  34.257                                                   jint claim_value) {
  34.258 -  const size_t regions = n_regions();
  34.259 +  const uint regions = n_regions();
  34.260    const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  34.261                               no_of_par_workers :
  34.262                               1);
  34.263 @@ -2588,11 +2589,11 @@
  34.264           no_of_par_workers == workers()->total_workers(),
  34.265           "Non dynamic should use fixed number of workers");
  34.266    // try to spread out the starting points of the workers
  34.267 -  const size_t start_index = regions / max_workers * (size_t) worker;
  34.268 +  const uint start_index = regions / max_workers * worker;
  34.269  
  34.270    // each worker will actually look at all regions
  34.271 -  for (size_t count = 0; count < regions; ++count) {
  34.272 -    const size_t index = (start_index + count) % regions;
  34.273 +  for (uint count = 0; count < regions; ++count) {
  34.274 +    const uint index = (start_index + count) % regions;
  34.275      assert(0 <= index && index < regions, "sanity");
  34.276      HeapRegion* r = region_at(index);
  34.277      // we'll ignore "continues humongous" regions (we'll process them
  34.278 @@ -2614,7 +2615,7 @@
  34.279          // result, we might end up processing them twice. So, we'll do
  34.280          // them first (notice: most closures will ignore them anyway) and
  34.281          // then we'll do the "starts humongous" region.
  34.282 -        for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
  34.283 +        for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
  34.284            HeapRegion* chr = region_at(ch_index);
  34.285  
  34.286            // if the region has already been claimed or it's not
  34.287 @@ -2682,8 +2683,9 @@
  34.288  class CheckClaimValuesClosure : public HeapRegionClosure {
  34.289  private:
  34.290    jint _claim_value;
  34.291 -  size_t _failures;
  34.292 +  uint _failures;
  34.293    HeapRegion* _sh_region;
  34.294 +
  34.295  public:
  34.296    CheckClaimValuesClosure(jint claim_value) :
  34.297      _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  34.298 @@ -2711,9 +2713,7 @@
  34.299      }
  34.300      return false;
  34.301    }
  34.302 -  size_t failures() {
  34.303 -    return _failures;
  34.304 -  }
  34.305 +  uint failures() { return _failures; }
  34.306  };
  34.307  
  34.308  bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  34.309 @@ -2723,17 +2723,15 @@
  34.310  }
  34.311  
  34.312  class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
  34.313 -  jint   _claim_value;
  34.314 -  size_t _failures;
  34.315 +private:
  34.316 +  jint _claim_value;
  34.317 +  uint _failures;
  34.318  
  34.319  public:
  34.320    CheckClaimValuesInCSetHRClosure(jint claim_value) :
  34.321 -    _claim_value(claim_value),
  34.322 -    _failures(0) { }
  34.323 -
  34.324 -  size_t failures() {
  34.325 -    return _failures;
  34.326 -  }
  34.327 +    _claim_value(claim_value), _failures(0) { }
  34.328 +
  34.329 +  uint failures() { return _failures; }
  34.330  
  34.331    bool doHeapRegion(HeapRegion* hr) {
  34.332      assert(hr->in_collection_set(), "how?");
  34.333 @@ -2800,14 +2798,14 @@
  34.334  
  34.335    result = g1_policy()->collection_set();
  34.336    if (G1CollectedHeap::use_parallel_gc_threads()) {
  34.337 -    size_t cs_size = g1_policy()->cset_region_length();
  34.338 +    uint cs_size = g1_policy()->cset_region_length();
  34.339      uint active_workers = workers()->active_workers();
  34.340      assert(UseDynamicNumberOfGCThreads ||
  34.341               active_workers == workers()->total_workers(),
  34.342               "Unless dynamic should use total workers");
  34.343  
  34.344 -    size_t end_ind   = (cs_size * worker_i) / active_workers;
  34.345 -    size_t start_ind = 0;
  34.346 +    uint end_ind   = (cs_size * worker_i) / active_workers;
  34.347 +    uint start_ind = 0;
  34.348  
  34.349      if (worker_i > 0 &&
  34.350          _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
  34.351 @@ -2817,7 +2815,7 @@
  34.352        result = _worker_cset_start_region[worker_i - 1];
  34.353      }
  34.354  
  34.355 -    for (size_t i = start_ind; i < end_ind; i++) {
  34.356 +    for (uint i = start_ind; i < end_ind; i++) {
  34.357        result = result->next_in_collection_set();
  34.358      }
  34.359    }
  34.360 @@ -3033,7 +3031,6 @@
  34.361  
  34.362  class VerifyRegionClosure: public HeapRegionClosure {
  34.363  private:
  34.364 -  bool         _allow_dirty;
  34.365    bool         _par;
  34.366    VerifyOption _vo;
  34.367    bool         _failures;
  34.368 @@ -3041,9 +3038,8 @@
  34.369    // _vo == UsePrevMarking -> use "prev" marking information,
  34.370    // _vo == UseNextMarking -> use "next" marking information,
  34.371    // _vo == UseMarkWord    -> use mark word from object header.
  34.372 -  VerifyRegionClosure(bool allow_dirty, bool par, VerifyOption vo)
  34.373 -    : _allow_dirty(allow_dirty),
  34.374 -      _par(par),
  34.375 +  VerifyRegionClosure(bool par, VerifyOption vo)
  34.376 +    : _par(par),
  34.377        _vo(vo),
  34.378        _failures(false) {}
  34.379  
  34.380 @@ -3056,7 +3052,7 @@
  34.381                "Should be unclaimed at verify points.");
  34.382      if (!r->continuesHumongous()) {
  34.383        bool failures = false;
  34.384 -      r->verify(_allow_dirty, _vo, &failures);
  34.385 +      r->verify(_vo, &failures);
  34.386        if (failures) {
  34.387          _failures = true;
  34.388        } else {
  34.389 @@ -3124,7 +3120,6 @@
  34.390  class G1ParVerifyTask: public AbstractGangTask {
  34.391  private:
  34.392    G1CollectedHeap* _g1h;
  34.393 -  bool             _allow_dirty;
  34.394    VerifyOption     _vo;
  34.395    bool             _failures;
  34.396  
  34.397 @@ -3132,10 +3127,9 @@
  34.398    // _vo == UsePrevMarking -> use "prev" marking information,
  34.399    // _vo == UseNextMarking -> use "next" marking information,
  34.400    // _vo == UseMarkWord    -> use mark word from object header.
  34.401 -  G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, VerifyOption vo) :
  34.402 +  G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
  34.403      AbstractGangTask("Parallel verify task"),
  34.404      _g1h(g1h),
  34.405 -    _allow_dirty(allow_dirty),
  34.406      _vo(vo),
  34.407      _failures(false) { }
  34.408  
  34.409 @@ -3145,7 +3139,7 @@
  34.410  
  34.411    void work(uint worker_id) {
  34.412      HandleMark hm;
  34.413 -    VerifyRegionClosure blk(_allow_dirty, true, _vo);
  34.414 +    VerifyRegionClosure blk(true, _vo);
  34.415      _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
  34.416                                            _g1h->workers()->active_workers(),
  34.417                                            HeapRegion::ParVerifyClaimValue);
  34.418 @@ -3155,12 +3149,11 @@
  34.419    }
  34.420  };
  34.421  
  34.422 -void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
  34.423 -  verify(allow_dirty, silent, VerifyOption_G1UsePrevMarking);
  34.424 -}
  34.425 -
  34.426 -void G1CollectedHeap::verify(bool allow_dirty,
  34.427 -                             bool silent,
  34.428 +void G1CollectedHeap::verify(bool silent) {
  34.429 +  verify(silent, VerifyOption_G1UsePrevMarking);
  34.430 +}
  34.431 +
  34.432 +void G1CollectedHeap::verify(bool silent,
  34.433                               VerifyOption vo) {
  34.434    if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  34.435      if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
  34.436 @@ -3212,7 +3205,7 @@
  34.437        assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  34.438               "sanity check");
  34.439  
  34.440 -      G1ParVerifyTask task(this, allow_dirty, vo);
  34.441 +      G1ParVerifyTask task(this, vo);
  34.442        assert(UseDynamicNumberOfGCThreads ||
  34.443          workers()->active_workers() == workers()->total_workers(),
  34.444          "If not dynamic should be using all the workers");
  34.445 @@ -3234,7 +3227,7 @@
  34.446        assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  34.447               "sanity check");
  34.448      } else {
  34.449 -      VerifyRegionClosure blk(allow_dirty, false, vo);
  34.450 +      VerifyRegionClosure blk(false, vo);
  34.451        heap_region_iterate(&blk);
  34.452        if (blk.failures()) {
  34.453          failures = true;
  34.454 @@ -3284,12 +3277,12 @@
  34.455              _g1_storage.high_boundary());
  34.456    st->cr();
  34.457    st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
  34.458 -  size_t young_regions = _young_list->length();
  34.459 -  st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
  34.460 -            young_regions, young_regions * HeapRegion::GrainBytes / K);
  34.461 -  size_t survivor_regions = g1_policy()->recorded_survivor_regions();
  34.462 -  st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
  34.463 -            survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
  34.464 +  uint young_regions = _young_list->length();
  34.465 +  st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
  34.466 +            (size_t) young_regions * HeapRegion::GrainBytes / K);
  34.467 +  uint survivor_regions = g1_policy()->recorded_survivor_regions();
  34.468 +  st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
  34.469 +            (size_t) survivor_regions * HeapRegion::GrainBytes / K);
  34.470    st->cr();
  34.471    perm()->as_gen()->print_on(st);
  34.472  }
  34.473 @@ -3299,7 +3292,11 @@
  34.474  
  34.475    // Print the per-region information.
  34.476    st->cr();
  34.477 -  st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), HS=humongous(starts), HC=humongous(continues), CS=collection set, F=free, TS=gc time stamp, PTAMS=previous top-at-mark-start, NTAMS=next top-at-mark-start)");
  34.478 +  st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
  34.479 +               "HS=humongous(starts), HC=humongous(continues), "
  34.480 +               "CS=collection set, F=free, TS=gc time stamp, "
  34.481 +               "PTAMS=previous top-at-mark-start, "
  34.482 +               "NTAMS=next top-at-mark-start)");
  34.483    PrintRegionClosure blk(st);
  34.484    heap_region_iterate(&blk);
  34.485  }
  34.486 @@ -3477,16 +3474,16 @@
  34.487  
  34.488  void
  34.489  G1CollectedHeap::setup_surviving_young_words() {
  34.490 -  guarantee( _surviving_young_words == NULL, "pre-condition" );
  34.491 -  size_t array_length = g1_policy()->young_cset_region_length();
  34.492 -  _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
  34.493 +  assert(_surviving_young_words == NULL, "pre-condition");
  34.494 +  uint array_length = g1_policy()->young_cset_region_length();
  34.495 +  _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length);
  34.496    if (_surviving_young_words == NULL) {
  34.497      vm_exit_out_of_memory(sizeof(size_t) * array_length,
  34.498                            "Not enough space for young surv words summary.");
  34.499    }
  34.500 -  memset(_surviving_young_words, 0, array_length * sizeof(size_t));
  34.501 +  memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
  34.502  #ifdef ASSERT
  34.503 -  for (size_t i = 0;  i < array_length; ++i) {
  34.504 +  for (uint i = 0;  i < array_length; ++i) {
  34.505      assert( _surviving_young_words[i] == 0, "memset above" );
  34.506    }
  34.507  #endif // !ASSERT
  34.508 @@ -3495,9 +3492,10 @@
  34.509  void
  34.510  G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  34.511    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  34.512 -  size_t array_length = g1_policy()->young_cset_region_length();
  34.513 -  for (size_t i = 0; i < array_length; ++i)
  34.514 +  uint array_length = g1_policy()->young_cset_region_length();
  34.515 +  for (uint i = 0; i < array_length; ++i) {
  34.516      _surviving_young_words[i] += surv_young_words[i];
  34.517 +  }
  34.518  }
  34.519  
  34.520  void
  34.521 @@ -3609,12 +3607,12 @@
  34.522        increment_total_full_collections();
  34.523      }
  34.524  
  34.525 -    // if PrintGCDetails is on, we'll print long statistics information
  34.526 +    // if the log level is "finer" is on, we'll print long statistics information
  34.527      // in the collector policy code, so let's not print this as the output
  34.528      // is messy if we do.
  34.529 -    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  34.530 -    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  34.531 -    TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
  34.532 +    gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
  34.533 +    TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  34.534 +    TraceTime t(verbose_str, G1Log::fine() && !G1Log::finer(), true, gclog_or_tty);
  34.535  
  34.536      TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
  34.537      TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
  34.538 @@ -3647,8 +3645,7 @@
  34.539          HandleMark hm;  // Discard invalid handles created during verification
  34.540          gclog_or_tty->print(" VerifyBeforeGC:");
  34.541          prepare_for_verify();
  34.542 -        Universe::verify(/* allow dirty */ false,
  34.543 -                         /* silent      */ false,
  34.544 +        Universe::verify(/* silent      */ false,
  34.545                           /* option      */ VerifyOption_G1UsePrevMarking);
  34.546        }
  34.547  
  34.548 @@ -3892,8 +3889,7 @@
  34.549            HandleMark hm;  // Discard invalid handles created during verification
  34.550            gclog_or_tty->print(" VerifyAfterGC:");
  34.551            prepare_for_verify();
  34.552 -          Universe::verify(/* allow dirty */ true,
  34.553 -                           /* silent      */ false,
  34.554 +          Universe::verify(/* silent      */ false,
  34.555                             /* option      */ VerifyOption_G1UsePrevMarking);
  34.556          }
  34.557  
  34.558 @@ -3931,8 +3927,8 @@
  34.559    }
  34.560  
  34.561    // The closing of the inner scope, immediately above, will complete
  34.562 -  // the PrintGC logging output. The record_collection_pause_end() call
  34.563 -  // above will complete the logging output of PrintGCDetails.
  34.564 +  // logging at the "fine" level. The record_collection_pause_end() call
  34.565 +  // above will complete logging at the "finer" level.
  34.566    //
  34.567    // It is not yet to safe, however, to tell the concurrent mark to
  34.568    // start as we have some optional output below. We don't want the
  34.569 @@ -4068,7 +4064,6 @@
  34.570  
  34.571  void G1CollectedHeap::remove_self_forwarding_pointers() {
  34.572    assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
  34.573 -  assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  34.574  
  34.575    G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
  34.576  
  34.577 @@ -4086,7 +4081,6 @@
  34.578    reset_cset_heap_region_claim_values();
  34.579  
  34.580    assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
  34.581 -  assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  34.582  
  34.583    // Now restore saved marks, if any.
  34.584    if (_objs_with_preserved_marks != NULL) {
  34.585 @@ -4248,16 +4242,16 @@
  34.586    // non-young regions (where the age is -1)
  34.587    // We also add a few elements at the beginning and at the end in
  34.588    // an attempt to eliminate cache contention
  34.589 -  size_t real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
  34.590 -  size_t array_length = PADDING_ELEM_NUM +
  34.591 -                        real_length +
  34.592 -                        PADDING_ELEM_NUM;
  34.593 +  uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
  34.594 +  uint array_length = PADDING_ELEM_NUM +
  34.595 +                      real_length +
  34.596 +                      PADDING_ELEM_NUM;
  34.597    _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
  34.598    if (_surviving_young_words_base == NULL)
  34.599      vm_exit_out_of_memory(array_length * sizeof(size_t),
  34.600                            "Not enough space for young surv histo.");
  34.601    _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  34.602 -  memset(_surviving_young_words, 0, real_length * sizeof(size_t));
  34.603 +  memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
  34.604  
  34.605    _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
  34.606    _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
  34.607 @@ -4394,7 +4388,7 @@
  34.608  template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
  34.609  oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
  34.610    ::copy_to_survivor_space(oop old) {
  34.611 -  size_t    word_sz = old->size();
  34.612 +  size_t word_sz = old->size();
  34.613    HeapRegion* from_region = _g1->heap_region_containing_raw(old);
  34.614    // +1 to make the -1 indexes valid...
  34.615    int       young_index = from_region->young_index_in_cset()+1;
  34.616 @@ -5514,9 +5508,9 @@
  34.617  
  34.618    if (evacuation_failed()) {
  34.619      remove_self_forwarding_pointers();
  34.620 -    if (PrintGCDetails) {
  34.621 +    if (G1Log::finer()) {
  34.622        gclog_or_tty->print(" (to-space overflow)");
  34.623 -    } else if (PrintGC) {
  34.624 +    } else if (G1Log::fine()) {
  34.625        gclog_or_tty->print("--");
  34.626      }
  34.627    }
  34.628 @@ -5591,8 +5585,8 @@
  34.629    hr->set_notHumongous();
  34.630    free_region(hr, &hr_pre_used, free_list, par);
  34.631  
  34.632 -  size_t i = hr->hrs_index() + 1;
  34.633 -  size_t num = 1;
  34.634 +  uint i = hr->hrs_index() + 1;
  34.635 +  uint num = 1;
  34.636    while (i < n_regions()) {
  34.637      HeapRegion* curr_hr = region_at(i);
  34.638      if (!curr_hr->continuesHumongous()) {
  34.639 @@ -5801,7 +5795,7 @@
  34.640      if (cur->is_young()) {
  34.641        int index = cur->young_index_in_cset();
  34.642        assert(index != -1, "invariant");
  34.643 -      assert((size_t) index < policy->young_cset_region_length(), "invariant");
  34.644 +      assert((uint) index < policy->young_cset_region_length(), "invariant");
  34.645        size_t words_survived = _surviving_young_words[index];
  34.646        cur->record_surv_words_in_group(words_survived);
  34.647  
  34.648 @@ -6141,7 +6135,7 @@
  34.649  // Methods for the GC alloc regions
  34.650  
  34.651  HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
  34.652 -                                                 size_t count,
  34.653 +                                                 uint count,
  34.654                                                   GCAllocPurpose ap) {
  34.655    assert(FreeList_lock->owned_by_self(), "pre-condition");
  34.656  
  34.657 @@ -6213,7 +6207,7 @@
  34.658    FreeRegionList*     _free_list;
  34.659    OldRegionSet*       _old_set;
  34.660    HumongousRegionSet* _humongous_set;
  34.661 -  size_t              _region_count;
  34.662 +  uint                _region_count;
  34.663  
  34.664  public:
  34.665    VerifyRegionListsClosure(OldRegionSet* old_set,
  34.666 @@ -6222,7 +6216,7 @@
  34.667      _old_set(old_set), _humongous_set(humongous_set),
  34.668      _free_list(free_list), _region_count(0) { }
  34.669  
  34.670 -  size_t region_count()      { return _region_count;      }
  34.671 +  uint region_count() { return _region_count; }
  34.672  
  34.673    bool doHeapRegion(HeapRegion* hr) {
  34.674      _region_count += 1;
  34.675 @@ -6244,7 +6238,7 @@
  34.676    }
  34.677  };
  34.678  
  34.679 -HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index,
  34.680 +HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
  34.681                                               HeapWord* bottom) {
  34.682    HeapWord* end = bottom + HeapRegion::GrainWords;
  34.683    MemRegion mr(bottom, end);
    35.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Apr 19 12:18:46 2012 -0700
    35.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Apr 20 16:23:48 2012 -0700
    35.3 @@ -85,8 +85,8 @@
    35.4  
    35.5    HeapRegion* _curr;
    35.6  
    35.7 -  size_t      _length;
    35.8 -  size_t      _survivor_length;
    35.9 +  uint        _length;
   35.10 +  uint        _survivor_length;
   35.11  
   35.12    size_t      _last_sampled_rs_lengths;
   35.13    size_t      _sampled_rs_lengths;
   35.14 @@ -101,8 +101,8 @@
   35.15  
   35.16    void         empty_list();
   35.17    bool         is_empty() { return _length == 0; }
   35.18 -  size_t       length() { return _length; }
   35.19 -  size_t       survivor_length() { return _survivor_length; }
   35.20 +  uint         length() { return _length; }
   35.21 +  uint         survivor_length() { return _survivor_length; }
   35.22  
   35.23    // Currently we do not keep track of the used byte sum for the
   35.24    // young list and the survivors and it'd be quite a lot of work to
   35.25 @@ -111,10 +111,10 @@
   35.26    // we'll report the more accurate information then.
   35.27    size_t       eden_used_bytes() {
   35.28      assert(length() >= survivor_length(), "invariant");
   35.29 -    return (length() - survivor_length()) * HeapRegion::GrainBytes;
   35.30 +    return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes;
   35.31    }
   35.32    size_t       survivor_used_bytes() {
   35.33 -    return survivor_length() * HeapRegion::GrainBytes;
   35.34 +    return (size_t) survivor_length() * HeapRegion::GrainBytes;
   35.35    }
   35.36  
   35.37    void rs_length_sampling_init();
   35.38 @@ -247,7 +247,7 @@
   35.39    MasterHumongousRegionSet  _humongous_set;
   35.40  
   35.41    // The number of regions we could create by expansion.
   35.42 -  size_t _expansion_regions;
   35.43 +  uint _expansion_regions;
   35.44  
   35.45    // The block offset table for the G1 heap.
   35.46    G1BlockOffsetSharedArray* _bot_shared;
   35.47 @@ -339,7 +339,7 @@
   35.48    bool* _in_cset_fast_test_base;
   35.49  
   35.50    // The length of the _in_cset_fast_test_base array.
   35.51 -  size_t _in_cset_fast_test_length;
   35.52 +  uint _in_cset_fast_test_length;
   35.53  
   35.54    volatile unsigned _gc_time_stamp;
   35.55  
   35.56 @@ -458,14 +458,14 @@
   35.57    // length and remove them from the master free list. Return the
   35.58    // index of the first region or G1_NULL_HRS_INDEX if the search
   35.59    // was unsuccessful.
   35.60 -  size_t humongous_obj_allocate_find_first(size_t num_regions,
   35.61 -                                           size_t word_size);
   35.62 +  uint humongous_obj_allocate_find_first(uint num_regions,
   35.63 +                                         size_t word_size);
   35.64  
   35.65    // Initialize a contiguous set of free regions of length num_regions
   35.66    // and starting at index first so that they appear as a single
   35.67    // humongous region.
   35.68 -  HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
   35.69 -                                                      size_t num_regions,
   35.70 +  HeapWord* humongous_obj_allocate_initialize_regions(uint first,
   35.71 +                                                      uint num_regions,
   35.72                                                        size_t word_size);
   35.73  
   35.74    // Attempt to allocate a humongous object of the given size. Return
   35.75 @@ -574,7 +574,7 @@
   35.76                                     size_t allocated_bytes);
   35.77  
   35.78    // For GC alloc regions.
   35.79 -  HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
   35.80 +  HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
   35.81                                    GCAllocPurpose ap);
   35.82    void retire_gc_alloc_region(HeapRegion* alloc_region,
   35.83                                size_t allocated_bytes, GCAllocPurpose ap);
   35.84 @@ -641,7 +641,7 @@
   35.85    void register_region_with_in_cset_fast_test(HeapRegion* r) {
   35.86      assert(_in_cset_fast_test_base != NULL, "sanity");
   35.87      assert(r->in_collection_set(), "invariant");
   35.88 -    size_t index = r->hrs_index();
   35.89 +    uint index = r->hrs_index();
   35.90      assert(index < _in_cset_fast_test_length, "invariant");
   35.91      assert(!_in_cset_fast_test_base[index], "invariant");
   35.92      _in_cset_fast_test_base[index] = true;
   35.93 @@ -655,7 +655,7 @@
   35.94      if (_g1_committed.contains((HeapWord*) obj)) {
   35.95        // no need to subtract the bottom of the heap from obj,
   35.96        // _in_cset_fast_test is biased
   35.97 -      size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
   35.98 +      uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
   35.99        bool ret = _in_cset_fast_test[index];
  35.100        // let's make sure the result is consistent with what the slower
  35.101        // test returns
  35.102 @@ -670,7 +670,7 @@
  35.103    void clear_cset_fast_test() {
  35.104      assert(_in_cset_fast_test_base != NULL, "sanity");
  35.105      memset(_in_cset_fast_test_base, false,
  35.106 -        _in_cset_fast_test_length * sizeof(bool));
  35.107 +           (size_t) _in_cset_fast_test_length * sizeof(bool));
  35.108    }
  35.109  
  35.110    // This is called at the end of either a concurrent cycle or a Full
  35.111 @@ -1101,23 +1101,23 @@
  35.112    }
  35.113  
  35.114    // The total number of regions in the heap.
  35.115 -  size_t n_regions() { return _hrs.length(); }
  35.116 +  uint n_regions() { return _hrs.length(); }
  35.117  
  35.118    // The max number of regions in the heap.
  35.119 -  size_t max_regions() { return _hrs.max_length(); }
  35.120 +  uint max_regions() { return _hrs.max_length(); }
  35.121  
  35.122    // The number of regions that are completely free.
  35.123 -  size_t free_regions() { return _free_list.length(); }
  35.124 +  uint free_regions() { return _free_list.length(); }
  35.125  
  35.126    // The number of regions that are not completely free.
  35.127 -  size_t used_regions() { return n_regions() - free_regions(); }
  35.128 +  uint used_regions() { return n_regions() - free_regions(); }
  35.129  
  35.130    // The number of regions available for "regular" expansion.
  35.131 -  size_t expansion_regions() { return _expansion_regions; }
  35.132 +  uint expansion_regions() { return _expansion_regions; }
  35.133  
  35.134    // Factory method for HeapRegion instances. It will return NULL if
  35.135    // the allocation fails.
  35.136 -  HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
  35.137 +  HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
  35.138  
  35.139    void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
  35.140    void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
  35.141 @@ -1301,7 +1301,7 @@
  35.142    void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
  35.143  
  35.144    // Return the region with the given index. It assumes the index is valid.
  35.145 -  HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
  35.146 +  HeapRegion* region_at(uint index) const { return _hrs.at(index); }
  35.147  
  35.148    // Divide the heap region sequence into "chunks" of some size (the number
  35.149    // of regions divided by the number of parallel threads times some
  35.150 @@ -1504,10 +1504,10 @@
  35.151    // Currently there is only one place where this is called with
  35.152    // vo == UseMarkWord, which is to verify the marking during a
  35.153    // full GC.
  35.154 -  void verify(bool allow_dirty, bool silent, VerifyOption vo);
  35.155 +  void verify(bool silent, VerifyOption vo);
  35.156  
  35.157    // Override; it uses the "prev" marking information
  35.158 -  virtual void verify(bool allow_dirty, bool silent);
  35.159 +  virtual void verify(bool silent);
  35.160    virtual void print_on(outputStream* st) const;
  35.161    virtual void print_extended_on(outputStream* st) const;
  35.162  
    36.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Apr 19 12:18:46 2012 -0700
    36.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Apr 20 16:23:48 2012 -0700
    36.3 @@ -29,6 +29,7 @@
    36.4  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    36.5  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    36.6  #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    36.7 +#include "gc_implementation/g1/g1Log.hpp"
    36.8  #include "gc_implementation/g1/heapRegionRemSet.hpp"
    36.9  #include "gc_implementation/shared/gcPolicyCounters.hpp"
   36.10  #include "runtime/arguments.hpp"
   36.11 @@ -191,11 +192,6 @@
   36.12    _in_marking_window(false),
   36.13    _in_marking_window_im(false),
   36.14  
   36.15 -  _known_garbage_ratio(0.0),
   36.16 -  _known_garbage_bytes(0),
   36.17 -
   36.18 -  _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
   36.19 -
   36.20    _recent_prev_end_times_for_all_gcs_sec(
   36.21                                  new TruncatedSeq(NumPrevPausesForHeuristics)),
   36.22  
   36.23 @@ -430,31 +426,36 @@
   36.24    }
   36.25  
   36.26    if (FLAG_IS_CMDLINE(NewSize)) {
   36.27 -     _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
   36.28 +    _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
   36.29 +                                     1U);
   36.30      if (FLAG_IS_CMDLINE(MaxNewSize)) {
   36.31 -      _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
   36.32 +      _max_desired_young_length =
   36.33 +                             MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
   36.34 +                                  1U);
   36.35        _sizer_kind = SizerMaxAndNewSize;
   36.36        _adaptive_size = _min_desired_young_length == _max_desired_young_length;
   36.37      } else {
   36.38        _sizer_kind = SizerNewSizeOnly;
   36.39      }
   36.40    } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
   36.41 -    _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
   36.42 +    _max_desired_young_length =
   36.43 +                             MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
   36.44 +                                  1U);
   36.45      _sizer_kind = SizerMaxNewSizeOnly;
   36.46    }
   36.47  }
   36.48  
   36.49 -size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
   36.50 -  size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
   36.51 -  return MAX2((size_t)1, default_value);
   36.52 +uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
   36.53 +  uint default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
   36.54 +  return MAX2(1U, default_value);
   36.55  }
   36.56  
   36.57 -size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
   36.58 -  size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
   36.59 -  return MAX2((size_t)1, default_value);
   36.60 +uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
   36.61 +  uint default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
   36.62 +  return MAX2(1U, default_value);
   36.63  }
   36.64  
   36.65 -void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
   36.66 +void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
   36.67    assert(new_number_of_heap_regions > 0, "Heap must be initialized");
   36.68  
   36.69    switch (_sizer_kind) {
   36.70 @@ -511,16 +512,16 @@
   36.71    _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   36.72  }
   36.73  
   36.74 -bool G1CollectorPolicy::predict_will_fit(size_t young_length,
   36.75 +bool G1CollectorPolicy::predict_will_fit(uint young_length,
   36.76                                           double base_time_ms,
   36.77 -                                         size_t base_free_regions,
   36.78 +                                         uint base_free_regions,
   36.79                                           double target_pause_time_ms) {
   36.80    if (young_length >= base_free_regions) {
   36.81      // end condition 1: not enough space for the young regions
   36.82      return false;
   36.83    }
   36.84  
   36.85 -  double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
   36.86 +  double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
   36.87    size_t bytes_to_copy =
   36.88                 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   36.89    double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   36.90 @@ -532,7 +533,7 @@
   36.91    }
   36.92  
   36.93    size_t free_bytes =
   36.94 -                  (base_free_regions - young_length) * HeapRegion::GrainBytes;
   36.95 +                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   36.96    if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   36.97      // end condition 3: out-of-space (conservatively!)
   36.98      return false;
   36.99 @@ -542,25 +543,25 @@
  36.100    return true;
  36.101  }
  36.102  
  36.103 -void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
  36.104 +void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
  36.105    // re-calculate the necessary reserve
  36.106    double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
  36.107    // We use ceiling so that if reserve_regions_d is > 0.0 (but
  36.108    // smaller than 1.0) we'll get 1.
  36.109 -  _reserve_regions = (size_t) ceil(reserve_regions_d);
  36.110 +  _reserve_regions = (uint) ceil(reserve_regions_d);
  36.111  
  36.112    _young_gen_sizer->heap_size_changed(new_number_of_regions);
  36.113  }
  36.114  
  36.115 -size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
  36.116 -                                                     size_t base_min_length) {
  36.117 -  size_t desired_min_length = 0;
  36.118 +uint G1CollectorPolicy::calculate_young_list_desired_min_length(
  36.119 +                                                       uint base_min_length) {
  36.120 +  uint desired_min_length = 0;
  36.121    if (adaptive_young_list_length()) {
  36.122      if (_alloc_rate_ms_seq->num() > 3) {
  36.123        double now_sec = os::elapsedTime();
  36.124        double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
  36.125        double alloc_rate_ms = predict_alloc_rate_ms();
  36.126 -      desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
  36.127 +      desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
  36.128      } else {
  36.129        // otherwise we don't have enough info to make the prediction
  36.130      }
  36.131 @@ -570,7 +571,7 @@
  36.132    return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
  36.133  }
  36.134  
  36.135 -size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
  36.136 +uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
  36.137    // Here, we might want to also take into account any additional
  36.138    // constraints (i.e., user-defined minimum bound). Currently, we
  36.139    // effectively don't set this bound.
  36.140 @@ -587,11 +588,11 @@
  36.141    // Calculate the absolute and desired min bounds.
  36.142  
  36.143    // This is how many young regions we already have (currently: the survivors).
  36.144 -  size_t base_min_length = recorded_survivor_regions();
  36.145 +  uint base_min_length = recorded_survivor_regions();
  36.146    // This is the absolute minimum young length, which ensures that we
  36.147    // can allocate one eden region in the worst-case.
  36.148 -  size_t absolute_min_length = base_min_length + 1;
  36.149 -  size_t desired_min_length =
  36.150 +  uint absolute_min_length = base_min_length + 1;
  36.151 +  uint desired_min_length =
  36.152                       calculate_young_list_desired_min_length(base_min_length);
  36.153    if (desired_min_length < absolute_min_length) {
  36.154      desired_min_length = absolute_min_length;
  36.155 @@ -600,16 +601,16 @@
  36.156    // Calculate the absolute and desired max bounds.
  36.157  
  36.158    // We will try our best not to "eat" into the reserve.
  36.159 -  size_t absolute_max_length = 0;
  36.160 +  uint absolute_max_length = 0;
  36.161    if (_free_regions_at_end_of_collection > _reserve_regions) {
  36.162      absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
  36.163    }
  36.164 -  size_t desired_max_length = calculate_young_list_desired_max_length();
  36.165 +  uint desired_max_length = calculate_young_list_desired_max_length();
  36.166    if (desired_max_length > absolute_max_length) {
  36.167      desired_max_length = absolute_max_length;
  36.168    }
  36.169  
  36.170 -  size_t young_list_target_length = 0;
  36.171 +  uint young_list_target_length = 0;
  36.172    if (adaptive_young_list_length()) {
  36.173      if (gcs_are_young()) {
  36.174        young_list_target_length =
  36.175 @@ -647,11 +648,11 @@
  36.176    update_max_gc_locker_expansion();
  36.177  }
  36.178  
  36.179 -size_t
  36.180 +uint
  36.181  G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
  36.182 -                                                   size_t base_min_length,
  36.183 -                                                   size_t desired_min_length,
  36.184 -                                                   size_t desired_max_length) {
  36.185 +                                                     uint base_min_length,
  36.186 +                                                     uint desired_min_length,
  36.187 +                                                     uint desired_max_length) {
  36.188    assert(adaptive_young_list_length(), "pre-condition");
  36.189    assert(gcs_are_young(), "only call this for young GCs");
  36.190  
  36.191 @@ -666,9 +667,9 @@
  36.192    // will be reflected in the predictions by the
  36.193    // survivor_regions_evac_time prediction.
  36.194    assert(desired_min_length > base_min_length, "invariant");
  36.195 -  size_t min_young_length = desired_min_length - base_min_length;
  36.196 +  uint min_young_length = desired_min_length - base_min_length;
  36.197    assert(desired_max_length > base_min_length, "invariant");
  36.198 -  size_t max_young_length = desired_max_length - base_min_length;
  36.199 +  uint max_young_length = desired_max_length - base_min_length;
  36.200  
  36.201    double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
  36.202    double survivor_regions_evac_time = predict_survivor_regions_evac_time();
  36.203 @@ -678,8 +679,8 @@
  36.204    double base_time_ms =
  36.205      predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
  36.206      survivor_regions_evac_time;
  36.207 -  size_t available_free_regions = _free_regions_at_end_of_collection;
  36.208 -  size_t base_free_regions = 0;
  36.209 +  uint available_free_regions = _free_regions_at_end_of_collection;
  36.210 +  uint base_free_regions = 0;
  36.211    if (available_free_regions > _reserve_regions) {
  36.212      base_free_regions = available_free_regions - _reserve_regions;
  36.213    }
  36.214 @@ -716,9 +717,9 @@
  36.215        // the new max. This way we maintain the loop invariants.
  36.216  
  36.217        assert(min_young_length < max_young_length, "invariant");
  36.218 -      size_t diff = (max_young_length - min_young_length) / 2;
  36.219 +      uint diff = (max_young_length - min_young_length) / 2;
  36.220        while (diff > 0) {
  36.221 -        size_t young_length = min_young_length + diff;
  36.222 +        uint young_length = min_young_length + diff;
  36.223          if (predict_will_fit(young_length, base_time_ms,
  36.224                               base_free_regions, target_pause_time_ms)) {
  36.225            min_young_length = young_length;
  36.226 @@ -862,8 +863,6 @@
  36.227    _last_young_gc = false;
  36.228    clear_initiate_conc_mark_if_possible();
  36.229    clear_during_initial_mark_pause();
  36.230 -  _known_garbage_bytes = 0;
  36.231 -  _known_garbage_ratio = 0.0;
  36.232    _in_marking_window = false;
  36.233    _in_marking_window_im = false;
  36.234  
  36.235 @@ -876,7 +875,7 @@
  36.236    // Reset survivors SurvRateGroup.
  36.237    _survivor_surv_rate_group->reset();
  36.238    update_young_list_target_length();
  36.239 -  _collectionSetChooser->clearMarkedHeapRegions();
  36.240 +  _collectionSetChooser->clear();
  36.241  }
  36.242  
  36.243  void G1CollectorPolicy::record_stop_world_start() {
  36.244 @@ -885,7 +884,7 @@
  36.245  
  36.246  void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
  36.247                                                        size_t start_used) {
  36.248 -  if (PrintGCDetails) {
  36.249 +  if (G1Log::finer()) {
  36.250      gclog_or_tty->stamp(PrintGCTimeStamps);
  36.251      gclog_or_tty->print("[GC pause");
  36.252      gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
  36.253 @@ -1022,11 +1021,16 @@
  36.254      if (val > max)
  36.255        max = val;
  36.256      total += val;
  36.257 -    buf.append("  %3.1lf", val);
  36.258 +    if (G1Log::finest()) {
  36.259 +      buf.append("  %.1lf", val);
  36.260 +    }
  36.261    }
  36.262 -  buf.append_and_print_cr("");
  36.263 +
  36.264 +  if (G1Log::finest()) {
  36.265 +    buf.append_and_print_cr("");
  36.266 +  }
  36.267    double avg = total / (double) no_of_gc_threads();
  36.268 -  buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
  36.269 +  buf.append_and_print_cr(" Avg: %.1lf Min: %.1lf Max: %.1lf Diff: %.1lf]",
  36.270      avg, min, max, max - min);
  36.271  }
  36.272  
  36.273 @@ -1223,7 +1227,7 @@
  36.274  
  36.275    // These values are used to update the summary information that is
  36.276    // displayed when TraceGen0Time is enabled, and are output as part
  36.277 -  // of the PrintGCDetails output, in the non-parallel case.
  36.278 +  // of the "finer" output, in the non-parallel case.
  36.279  
  36.280    double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
  36.281    double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
  36.282 @@ -1316,7 +1320,7 @@
  36.283      // given that humongous object allocations do not really affect
  36.284      // either the pause's duration nor when the next pause will take
  36.285      // place we can safely ignore them here.
  36.286 -    size_t regions_allocated = eden_cset_region_length();
  36.287 +    uint regions_allocated = eden_cset_region_length();
  36.288      double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  36.289      _alloc_rate_ms_seq->add(alloc_rate_ms);
  36.290  
  36.291 @@ -1356,8 +1360,7 @@
  36.292      }
  36.293    }
  36.294  
  36.295 -  // PrintGCDetails output
  36.296 -  if (PrintGCDetails) {
  36.297 +  if (G1Log::finer()) {
  36.298      bool print_marking_info =
  36.299        _g1->mark_in_progress() && !last_pause_included_initial_mark;
  36.300  
  36.301 @@ -1376,11 +1379,15 @@
  36.302          print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
  36.303        }
  36.304        print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
  36.305 -      print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
  36.306 +      if (G1Log::finest()) {
  36.307 +        print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
  36.308 +      }
  36.309        print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
  36.310        print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
  36.311        print_par_stats(2, "Termination", _par_last_termination_times_ms);
  36.312 -      print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
  36.313 +      if (G1Log::finest()) {
  36.314 +        print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
  36.315 +      }
  36.316  
  36.317        for (int i = 0; i < _parallel_gc_threads; i++) {
  36.318          _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
  36.319 @@ -1406,7 +1413,9 @@
  36.320          print_stats(1, "SATB Filtering", satb_filtering_time);
  36.321        }
  36.322        print_stats(1, "Update RS", update_rs_time);
  36.323 -      print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
  36.324 +      if (G1Log::finest()) {
  36.325 +        print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
  36.326 +      }
  36.327        print_stats(1, "Scan RS", scan_rs_time);
  36.328        print_stats(1, "Object Copying", obj_copy_time);
  36.329      }
  36.330 @@ -1440,16 +1449,6 @@
  36.331      }
  36.332    }
  36.333  
  36.334 -  // Update the efficiency-since-mark vars.
  36.335 -  double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
  36.336 -  if (elapsed_ms < MIN_TIMER_GRANULARITY) {
  36.337 -    // This usually happens due to the timer not having the required
  36.338 -    // granularity. Some Linuxes are the usual culprits.
  36.339 -    // We'll just set it to something (arbitrarily) small.
  36.340 -    proc_ms = 1.0;
  36.341 -  }
  36.342 -  double cur_efficiency = (double) freed_bytes / proc_ms;
  36.343 -
  36.344    bool new_in_marking_window = _in_marking_window;
  36.345    bool new_in_marking_window_im = false;
  36.346    if (during_initial_mark_pause()) {
  36.347 @@ -1484,10 +1483,6 @@
  36.348      }
  36.349    }
  36.350  
  36.351 -  if (_last_gc_was_young && !_during_marking) {
  36.352 -    _young_gc_eff_seq->add(cur_efficiency);
  36.353 -  }
  36.354 -
  36.355    _short_lived_surv_rate_group->start_adding_regions();
  36.356    // do that for any other surv rate groupsx
  36.357  
  36.358 @@ -1495,8 +1490,9 @@
  36.359      double pause_time_ms = elapsed_ms;
  36.360  
  36.361      size_t diff = 0;
  36.362 -    if (_max_pending_cards >= _pending_cards)
  36.363 +    if (_max_pending_cards >= _pending_cards) {
  36.364        diff = _max_pending_cards - _pending_cards;
  36.365 +    }
  36.366      _pending_card_diff_seq->add((double) diff);
  36.367  
  36.368      double cost_per_card_ms = 0.0;
  36.369 @@ -1601,7 +1597,7 @@
  36.370    double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  36.371    adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  36.372  
  36.373 -  assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
  36.374 +  _collectionSetChooser->verify();
  36.375  }
  36.376  
  36.377  #define EXT_SIZE_FORMAT "%d%s"
  36.378 @@ -1610,7 +1606,7 @@
  36.379    proper_unit_for_byte_size((bytes))
  36.380  
  36.381  void G1CollectorPolicy::print_heap_transition() {
  36.382 -  if (PrintGCDetails) {
  36.383 +  if (G1Log::finer()) {
  36.384      YoungList* young_list = _g1->young_list();
  36.385      size_t eden_bytes = young_list->eden_used_bytes();
  36.386      size_t survivor_bytes = young_list->survivor_used_bytes();
  36.387 @@ -1637,7 +1633,7 @@
  36.388        EXT_SIZE_PARAMS(capacity));
  36.389  
  36.390      _prev_eden_capacity = eden_capacity;
  36.391 -  } else if (PrintGC) {
  36.392 +  } else if (G1Log::fine()) {
  36.393      _g1->print_size_transition(gclog_or_tty,
  36.394                                 _cur_collection_pause_used_at_start_bytes,
  36.395                                 _g1->used(), _g1->capacity());
  36.396 @@ -1730,8 +1726,7 @@
  36.397    return region_elapsed_time_ms;
  36.398  }
  36.399  
  36.400 -size_t
  36.401 -G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  36.402 +size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  36.403    size_t bytes_to_copy;
  36.404    if (hr->is_marked())
  36.405      bytes_to_copy = hr->max_live_bytes();
  36.406 @@ -1745,8 +1740,8 @@
  36.407  }
  36.408  
  36.409  void
  36.410 -G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
  36.411 -                                          size_t survivor_cset_region_length) {
  36.412 +G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
  36.413 +                                            uint survivor_cset_region_length) {
  36.414    _eden_cset_region_length     = eden_cset_region_length;
  36.415    _survivor_cset_region_length = survivor_cset_region_length;
  36.416    _old_cset_region_length      = 0;
  36.417 @@ -2010,7 +2005,7 @@
  36.418  }
  36.419  #endif // PRODUCT
  36.420  
  36.421 -size_t G1CollectorPolicy::max_regions(int purpose) {
  36.422 +uint G1CollectorPolicy::max_regions(int purpose) {
  36.423    switch (purpose) {
  36.424      case GCAllocForSurvived:
  36.425        return _max_survivor_regions;
  36.426 @@ -2023,13 +2018,13 @@
  36.427  }
  36.428  
  36.429  void G1CollectorPolicy::update_max_gc_locker_expansion() {
  36.430 -  size_t expansion_region_num = 0;
  36.431 +  uint expansion_region_num = 0;
  36.432    if (GCLockerEdenExpansionPercent > 0) {
  36.433      double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  36.434      double expansion_region_num_d = perc * (double) _young_list_target_length;
  36.435      // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  36.436      // less than 1.0) we'll get 1.
  36.437 -    expansion_region_num = (size_t) ceil(expansion_region_num_d);
  36.438 +    expansion_region_num = (uint) ceil(expansion_region_num_d);
  36.439    } else {
  36.440      assert(expansion_region_num == 0, "sanity");
  36.441    }
  36.442 @@ -2043,34 +2038,12 @@
  36.443                   (double) _young_list_target_length / (double) SurvivorRatio;
  36.444    // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  36.445    // smaller than 1.0) we'll get 1.
  36.446 -  _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
  36.447 +  _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
  36.448  
  36.449    _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  36.450          HeapRegion::GrainWords * _max_survivor_regions);
  36.451  }
  36.452  
  36.453 -#ifndef PRODUCT
  36.454 -class HRSortIndexIsOKClosure: public HeapRegionClosure {
  36.455 -  CollectionSetChooser* _chooser;
  36.456 -public:
  36.457 -  HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
  36.458 -    _chooser(chooser) {}
  36.459 -
  36.460 -  bool doHeapRegion(HeapRegion* r) {
  36.461 -    if (!r->continuesHumongous()) {
  36.462 -      assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
  36.463 -    }
  36.464 -    return false;
  36.465 -  }
  36.466 -};
  36.467 -
  36.468 -bool G1CollectorPolicy::assertMarkedBytesDataOK() {
  36.469 -  HRSortIndexIsOKClosure cl(_collectionSetChooser);
  36.470 -  _g1->heap_region_iterate(&cl);
  36.471 -  return true;
  36.472 -}
  36.473 -#endif
  36.474 -
  36.475  bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  36.476                                                       GCCause::Cause gc_cause) {
  36.477    bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  36.478 @@ -2168,8 +2141,8 @@
  36.479        // We will skip any region that's currently used as an old GC
  36.480        // alloc region (we should not consider those for collection
  36.481        // before we fill them up).
  36.482 -      if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
  36.483 -        _hrSorted->addMarkedHeapRegion(r);
  36.484 +      if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
  36.485 +        _hrSorted->add_region(r);
  36.486        }
  36.487      }
  36.488      return false;
  36.489 @@ -2179,16 +2152,14 @@
  36.490  class ParKnownGarbageHRClosure: public HeapRegionClosure {
  36.491    G1CollectedHeap* _g1h;
  36.492    CollectionSetChooser* _hrSorted;
  36.493 -  jint _marked_regions_added;
  36.494 +  uint _marked_regions_added;
  36.495    size_t _reclaimable_bytes_added;
  36.496 -  jint _chunk_size;
  36.497 -  jint _cur_chunk_idx;
  36.498 -  jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  36.499 -  int _worker;
  36.500 -  int _invokes;
  36.501 +  uint _chunk_size;
  36.502 +  uint _cur_chunk_idx;
  36.503 +  uint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  36.504  
  36.505    void get_new_chunk() {
  36.506 -    _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
  36.507 +    _cur_chunk_idx = _hrSorted->claim_array_chunk(_chunk_size);
  36.508      _cur_chunk_end = _cur_chunk_idx + _chunk_size;
  36.509    }
  36.510    void add_region(HeapRegion* r) {
  36.511 @@ -2196,7 +2167,7 @@
  36.512        get_new_chunk();
  36.513      }
  36.514      assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
  36.515 -    _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
  36.516 +    _hrSorted->set_region(_cur_chunk_idx, r);
  36.517      _marked_regions_added++;
  36.518      _reclaimable_bytes_added += r->reclaimable_bytes();
  36.519      _cur_chunk_idx++;
  36.520 @@ -2204,104 +2175,79 @@
  36.521  
  36.522  public:
  36.523    ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  36.524 -                           jint chunk_size,
  36.525 -                           int worker) :
  36.526 +                           uint chunk_size) :
  36.527        _g1h(G1CollectedHeap::heap()),
  36.528 -      _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
  36.529 +      _hrSorted(hrSorted), _chunk_size(chunk_size),
  36.530        _marked_regions_added(0), _reclaimable_bytes_added(0),
  36.531 -      _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { }
  36.532 +      _cur_chunk_idx(0), _cur_chunk_end(0) { }
  36.533  
  36.534    bool doHeapRegion(HeapRegion* r) {
  36.535 -    // We only include humongous regions in collection
  36.536 -    // sets when concurrent mark shows that their contained object is
  36.537 -    // unreachable.
  36.538 -    _invokes++;
  36.539 -
  36.540      // Do we have any marking information for this region?
  36.541      if (r->is_marked()) {
  36.542        // We will skip any region that's currently used as an old GC
  36.543        // alloc region (we should not consider those for collection
  36.544        // before we fill them up).
  36.545 -      if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
  36.546 +      if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
  36.547          add_region(r);
  36.548        }
  36.549      }
  36.550      return false;
  36.551    }
  36.552 -  jint marked_regions_added() { return _marked_regions_added; }
  36.553 +  uint marked_regions_added() { return _marked_regions_added; }
  36.554    size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
  36.555 -  int invokes() { return _invokes; }
  36.556  };
  36.557  
  36.558  class ParKnownGarbageTask: public AbstractGangTask {
  36.559    CollectionSetChooser* _hrSorted;
  36.560 -  jint _chunk_size;
  36.561 +  uint _chunk_size;
  36.562    G1CollectedHeap* _g1;
  36.563  public:
  36.564 -  ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
  36.565 +  ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
  36.566      AbstractGangTask("ParKnownGarbageTask"),
  36.567      _hrSorted(hrSorted), _chunk_size(chunk_size),
  36.568      _g1(G1CollectedHeap::heap()) { }
  36.569  
  36.570    void work(uint worker_id) {
  36.571 -    ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
  36.572 -                                               _chunk_size,
  36.573 -                                               worker_id);
  36.574 +    ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
  36.575 +
  36.576      // Back to zero for the claim value.
  36.577      _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
  36.578                                           _g1->workers()->active_workers(),
  36.579                                           HeapRegion::InitialClaimValue);
  36.580 -    jint regions_added = parKnownGarbageCl.marked_regions_added();
  36.581 +    uint regions_added = parKnownGarbageCl.marked_regions_added();
  36.582      size_t reclaimable_bytes_added =
  36.583                                     parKnownGarbageCl.reclaimable_bytes_added();
  36.584 -    _hrSorted->updateTotals(regions_added, reclaimable_bytes_added);
  36.585 -    if (G1PrintParCleanupStats) {
  36.586 -      gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
  36.587 -                 worker_id, parKnownGarbageCl.invokes(), regions_added);
  36.588 -    }
  36.589 +    _hrSorted->update_totals(regions_added, reclaimable_bytes_added);
  36.590    }
  36.591  };
  36.592  
  36.593  void
  36.594  G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
  36.595 -  double start_sec;
  36.596 -  if (G1PrintParCleanupStats) {
  36.597 -    start_sec = os::elapsedTime();
  36.598 -  }
  36.599 +  _collectionSetChooser->clear();
  36.600  
  36.601 -  _collectionSetChooser->clearMarkedHeapRegions();
  36.602 -  double clear_marked_end_sec;
  36.603 -  if (G1PrintParCleanupStats) {
  36.604 -    clear_marked_end_sec = os::elapsedTime();
  36.605 -    gclog_or_tty->print_cr("  clear marked regions: %8.3f ms.",
  36.606 -                           (clear_marked_end_sec - start_sec) * 1000.0);
  36.607 -  }
  36.608 -
  36.609 +  uint region_num = _g1->n_regions();
  36.610    if (G1CollectedHeap::use_parallel_gc_threads()) {
  36.611 -    const size_t OverpartitionFactor = 4;
  36.612 -    size_t WorkUnit;
  36.613 +    const uint OverpartitionFactor = 4;
  36.614 +    uint WorkUnit;
  36.615      // The use of MinChunkSize = 8 in the original code
  36.616      // causes some assertion failures when the total number of
  36.617      // region is less than 8.  The code here tries to fix that.
  36.618      // Should the original code also be fixed?
  36.619      if (no_of_gc_threads > 0) {
  36.620 -      const size_t MinWorkUnit =
  36.621 -        MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
  36.622 -      WorkUnit =
  36.623 -        MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
  36.624 -             MinWorkUnit);
  36.625 +      const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
  36.626 +      WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
  36.627 +                      MinWorkUnit);
  36.628      } else {
  36.629        assert(no_of_gc_threads > 0,
  36.630          "The active gc workers should be greater than 0");
  36.631        // In a product build do something reasonable to avoid a crash.
  36.632 -      const size_t MinWorkUnit =
  36.633 -        MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
  36.634 +      const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
  36.635        WorkUnit =
  36.636 -        MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
  36.637 +        MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
  36.638               MinWorkUnit);
  36.639      }
  36.640 -    _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
  36.641 -                                                             WorkUnit);
  36.642 +    _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
  36.643 +                                                           WorkUnit);
  36.644      ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  36.645                                              (int) WorkUnit);
  36.646      _g1->workers()->run_task(&parKnownGarbageTask);
  36.647 @@ -2312,20 +2258,10 @@
  36.648      KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  36.649      _g1->heap_region_iterate(&knownGarbagecl);
  36.650    }
  36.651 -  double known_garbage_end_sec;
  36.652 -  if (G1PrintParCleanupStats) {
  36.653 -    known_garbage_end_sec = os::elapsedTime();
  36.654 -    gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
  36.655 -                      (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
  36.656 -  }
  36.657  
  36.658 -  _collectionSetChooser->sortMarkedHeapRegions();
  36.659 +  _collectionSetChooser->sort_regions();
  36.660 +
  36.661    double end_sec = os::elapsedTime();
  36.662 -  if (G1PrintParCleanupStats) {
  36.663 -    gclog_or_tty->print_cr("  sorting: %8.3f ms.",
  36.664 -                           (end_sec - known_garbage_end_sec) * 1000.0);
  36.665 -  }
  36.666 -
  36.667    double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  36.668    _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  36.669    _cur_mark_stop_world_time_ms += elapsed_time_ms;
  36.670 @@ -2541,13 +2477,13 @@
  36.671  bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
  36.672                                                  const char* false_action_str) {
  36.673    CollectionSetChooser* cset_chooser = _collectionSetChooser;
  36.674 -  if (cset_chooser->isEmpty()) {
  36.675 +  if (cset_chooser->is_empty()) {
  36.676      ergo_verbose0(ErgoMixedGCs,
  36.677                    false_action_str,
  36.678                    ergo_format_reason("candidate old regions not available"));
  36.679      return false;
  36.680    }
  36.681 -  size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
  36.682 +  size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
  36.683    size_t capacity_bytes = _g1->capacity();
  36.684    double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
  36.685    double threshold = (double) G1HeapWastePercent;
  36.686 @@ -2558,7 +2494,7 @@
  36.687                ergo_format_region("candidate old regions")
  36.688                ergo_format_byte_perc("reclaimable")
  36.689                ergo_format_perc("threshold"),
  36.690 -              cset_chooser->remainingRegions(),
  36.691 +              cset_chooser->remaining_regions(),
  36.692                reclaimable_bytes, perc, threshold);
  36.693      return false;
  36.694    }
  36.695 @@ -2569,7 +2505,7 @@
  36.696                  ergo_format_region("candidate old regions")
  36.697                  ergo_format_byte_perc("reclaimable")
  36.698                  ergo_format_perc("threshold"),
  36.699 -                cset_chooser->remainingRegions(),
  36.700 +                cset_chooser->remaining_regions(),
  36.701                  reclaimable_bytes, perc, threshold);
  36.702    return true;
  36.703  }
  36.704 @@ -2613,8 +2549,8 @@
  36.705    // pause are appended to the RHS of the young list, i.e.
  36.706    //   [Newly Young Regions ++ Survivors from last pause].
  36.707  
  36.708 -  size_t survivor_region_length = young_list->survivor_length();
  36.709 -  size_t eden_region_length = young_list->length() - survivor_region_length;
  36.710 +  uint survivor_region_length = young_list->survivor_length();
  36.711 +  uint eden_region_length = young_list->length() - survivor_region_length;
  36.712    init_cset_region_lengths(eden_region_length, survivor_region_length);
  36.713    hr = young_list->first_survivor_region();
  36.714    while (hr != NULL) {
  36.715 @@ -2652,11 +2588,11 @@
  36.716  
  36.717    if (!gcs_are_young()) {
  36.718      CollectionSetChooser* cset_chooser = _collectionSetChooser;
  36.719 -    assert(cset_chooser->verify(), "CSet Chooser verification - pre");
  36.720 -    const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength();
  36.721 -    const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
  36.722 +    cset_chooser->verify();
  36.723 +    const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length();
  36.724 +    const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length();
  36.725  
  36.726 -    size_t expensive_region_num = 0;
  36.727 +    uint expensive_region_num = 0;
  36.728      bool check_time_remaining = adaptive_young_list_length();
  36.729      HeapRegion* hr = cset_chooser->peek();
  36.730      while (hr != NULL) {
  36.731 @@ -2741,7 +2677,7 @@
  36.732                      time_remaining_ms);
  36.733      }
  36.734  
  36.735 -    assert(cset_chooser->verify(), "CSet Chooser verification - post");
  36.736 +    cset_chooser->verify();
  36.737    }
  36.738  
  36.739    stop_incremental_cset_building();
    37.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Apr 19 12:18:46 2012 -0700
    37.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Apr 20 16:23:48 2012 -0700
    37.3 @@ -128,19 +128,19 @@
    37.4      SizerNewRatio
    37.5    };
    37.6    SizerKind _sizer_kind;
    37.7 -  size_t _min_desired_young_length;
    37.8 -  size_t _max_desired_young_length;
    37.9 +  uint _min_desired_young_length;
   37.10 +  uint _max_desired_young_length;
   37.11    bool _adaptive_size;
   37.12 -  size_t calculate_default_min_length(size_t new_number_of_heap_regions);
   37.13 -  size_t calculate_default_max_length(size_t new_number_of_heap_regions);
   37.14 +  uint calculate_default_min_length(uint new_number_of_heap_regions);
   37.15 +  uint calculate_default_max_length(uint new_number_of_heap_regions);
   37.16  
   37.17  public:
   37.18    G1YoungGenSizer();
   37.19 -  void heap_size_changed(size_t new_number_of_heap_regions);
   37.20 -  size_t min_desired_young_length() {
   37.21 +  void heap_size_changed(uint new_number_of_heap_regions);
   37.22 +  uint min_desired_young_length() {
   37.23      return _min_desired_young_length;
   37.24    }
   37.25 -  size_t max_desired_young_length() {
   37.26 +  uint max_desired_young_length() {
   37.27      return _max_desired_young_length;
   37.28    }
   37.29    bool adaptive_young_list_length() {
   37.30 @@ -175,7 +175,7 @@
   37.31  
   37.32    double _cur_collection_start_sec;
   37.33    size_t _cur_collection_pause_used_at_start_bytes;
   37.34 -  size_t _cur_collection_pause_used_regions_at_start;
   37.35 +  uint   _cur_collection_pause_used_regions_at_start;
   37.36    double _cur_collection_par_time_ms;
   37.37  
   37.38    double _cur_collection_code_root_fixup_time_ms;
   37.39 @@ -233,13 +233,13 @@
   37.40    // indicates whether we are in young or mixed GC mode
   37.41    bool _gcs_are_young;
   37.42  
   37.43 -  size_t _young_list_target_length;
   37.44 -  size_t _young_list_fixed_length;
   37.45 +  uint _young_list_target_length;
   37.46 +  uint _young_list_fixed_length;
   37.47    size_t _prev_eden_capacity; // used for logging
   37.48  
   37.49    // The max number of regions we can extend the eden by while the GC
   37.50    // locker is active. This should be >= _young_list_target_length;
   37.51 -  size_t _young_list_max_length;
   37.52 +  uint _young_list_max_length;
   37.53  
   37.54    bool                  _last_gc_was_young;
   37.55  
   37.56 @@ -257,7 +257,7 @@
   37.57    double                _gc_overhead_perc;
   37.58  
   37.59    double _reserve_factor;
   37.60 -  size_t _reserve_regions;
   37.61 +  uint _reserve_regions;
   37.62  
   37.63    bool during_marking() {
   37.64      return _during_marking;
   37.65 @@ -288,22 +288,20 @@
   37.66  
   37.67    TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
   37.68  
   37.69 -  TruncatedSeq* _young_gc_eff_seq;
   37.70 -
   37.71    G1YoungGenSizer* _young_gen_sizer;
   37.72  
   37.73 -  size_t _eden_cset_region_length;
   37.74 -  size_t _survivor_cset_region_length;
   37.75 -  size_t _old_cset_region_length;
   37.76 +  uint _eden_cset_region_length;
   37.77 +  uint _survivor_cset_region_length;
   37.78 +  uint _old_cset_region_length;
   37.79  
   37.80 -  void init_cset_region_lengths(size_t eden_cset_region_length,
   37.81 -                                size_t survivor_cset_region_length);
   37.82 +  void init_cset_region_lengths(uint eden_cset_region_length,
   37.83 +                                uint survivor_cset_region_length);
   37.84  
   37.85 -  size_t eden_cset_region_length()     { return _eden_cset_region_length;     }
   37.86 -  size_t survivor_cset_region_length() { return _survivor_cset_region_length; }
   37.87 -  size_t old_cset_region_length()      { return _old_cset_region_length;      }
   37.88 +  uint eden_cset_region_length()     { return _eden_cset_region_length;     }
   37.89 +  uint survivor_cset_region_length() { return _survivor_cset_region_length; }
   37.90 +  uint old_cset_region_length()      { return _old_cset_region_length;      }
   37.91  
   37.92 -  size_t _free_regions_at_end_of_collection;
   37.93 +  uint _free_regions_at_end_of_collection;
   37.94  
   37.95    size_t _recorded_rs_lengths;
   37.96    size_t _max_rs_lengths;
   37.97 @@ -315,9 +313,6 @@
   37.98  
   37.99    size_t _rs_lengths_prediction;
  37.100  
  37.101 -  size_t _known_garbage_bytes;
  37.102 -  double _known_garbage_ratio;
  37.103 -
  37.104    double sigma() { return _sigma; }
  37.105  
  37.106    // A function that prevents us putting too much stock in small sample
  37.107 @@ -496,10 +491,10 @@
  37.108  
  37.109    void set_recorded_rs_lengths(size_t rs_lengths);
  37.110  
  37.111 -  size_t cset_region_length()       { return young_cset_region_length() +
  37.112 -                                             old_cset_region_length(); }
  37.113 -  size_t young_cset_region_length() { return eden_cset_region_length() +
  37.114 -                                             survivor_cset_region_length(); }
  37.115 +  uint cset_region_length()       { return young_cset_region_length() +
  37.116 +                                           old_cset_region_length(); }
  37.117 +  uint young_cset_region_length() { return eden_cset_region_length() +
  37.118 +                                           survivor_cset_region_length(); }
  37.119  
  37.120    void record_young_free_cset_time_ms(double time_ms) {
  37.121      _recorded_young_free_cset_time_ms = time_ms;
  37.122 @@ -509,10 +504,6 @@
  37.123      _recorded_non_young_free_cset_time_ms = time_ms;
  37.124    }
  37.125  
  37.126 -  double predict_young_gc_eff() {
  37.127 -    return get_new_neg_prediction(_young_gc_eff_seq);
  37.128 -  }
  37.129 -
  37.130    double predict_survivor_regions_evac_time();
  37.131  
  37.132    void cset_regions_freed() {
  37.133 @@ -522,20 +513,6 @@
  37.134      // also call it on any more surv rate groups
  37.135    }
  37.136  
  37.137 -  void set_known_garbage_bytes(size_t known_garbage_bytes) {
  37.138 -    _known_garbage_bytes = known_garbage_bytes;
  37.139 -    size_t heap_bytes = _g1->capacity();
  37.140 -    _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
  37.141 -  }
  37.142 -
  37.143 -  void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
  37.144 -    guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
  37.145 -
  37.146 -    _known_garbage_bytes -= known_garbage_bytes;
  37.147 -    size_t heap_bytes = _g1->capacity();
  37.148 -    _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
  37.149 -  }
  37.150 -
  37.151    G1MMUTracker* mmu_tracker() {
  37.152      return _mmu_tracker;
  37.153    }
  37.154 @@ -720,12 +697,12 @@
  37.155    // Calculate and return the minimum desired young list target
  37.156    // length. This is the minimum desired young list length according
  37.157    // to the user's inputs.
  37.158 -  size_t calculate_young_list_desired_min_length(size_t base_min_length);
  37.159 +  uint calculate_young_list_desired_min_length(uint base_min_length);
  37.160  
  37.161    // Calculate and return the maximum desired young list target
  37.162    // length. This is the maximum desired young list length according
  37.163    // to the user's inputs.
  37.164 -  size_t calculate_young_list_desired_max_length();
  37.165 +  uint calculate_young_list_desired_max_length();
  37.166  
  37.167    // Calculate and return the maximum young list target length that
  37.168    // can fit into the pause time goal. The parameters are: rs_lengths
  37.169 @@ -733,18 +710,18 @@
  37.170    // be, base_min_length is the alreay existing number of regions in
  37.171    // the young list, min_length and max_length are the desired min and
  37.172    // max young list length according to the user's inputs.
  37.173 -  size_t calculate_young_list_target_length(size_t rs_lengths,
  37.174 -                                            size_t base_min_length,
  37.175 -                                            size_t desired_min_length,
  37.176 -                                            size_t desired_max_length);
  37.177 +  uint calculate_young_list_target_length(size_t rs_lengths,
  37.178 +                                          uint base_min_length,
  37.179 +                                          uint desired_min_length,
  37.180 +                                          uint desired_max_length);
  37.181  
  37.182    // Check whether a given young length (young_length) fits into the
  37.183    // given target pause time and whether the prediction for the amount
  37.184    // of objects to be copied for the given length will fit into the
  37.185    // given free space (expressed by base_free_regions).  It is used by
  37.186    // calculate_young_list_target_length().
  37.187 -  bool predict_will_fit(size_t young_length, double base_time_ms,
  37.188 -                        size_t base_free_regions, double target_pause_time_ms);
  37.189 +  bool predict_will_fit(uint young_length, double base_time_ms,
  37.190 +                        uint base_free_regions, double target_pause_time_ms);
  37.191  
  37.192    // Count the number of bytes used in the CS.
  37.193    void count_CS_bytes_used();
  37.194 @@ -773,7 +750,7 @@
  37.195    }
  37.196  
  37.197    // This should be called after the heap is resized.
  37.198 -  void record_new_heap_size(size_t new_number_of_regions);
  37.199 +  void record_new_heap_size(uint new_number_of_regions);
  37.200  
  37.201    void init();
  37.202  
  37.203 @@ -1026,12 +1003,6 @@
  37.204    // exceeded the desired limit, return an amount to expand by.
  37.205    size_t expansion_amount();
  37.206  
  37.207 -#ifndef PRODUCT
  37.208 -  // Check any appropriate marked bytes info, asserting false if
  37.209 -  // something's wrong, else returning "true".
  37.210 -  bool assertMarkedBytesDataOK();
  37.211 -#endif
  37.212 -
  37.213    // Print tracing information.
  37.214    void print_tracing_info() const;
  37.215  
  37.216 @@ -1048,18 +1019,18 @@
  37.217    }
  37.218  
  37.219    bool is_young_list_full() {
  37.220 -    size_t young_list_length = _g1->young_list()->length();
  37.221 -    size_t young_list_target_length = _young_list_target_length;
  37.222 +    uint young_list_length = _g1->young_list()->length();
  37.223 +    uint young_list_target_length = _young_list_target_length;
  37.224      return young_list_length >= young_list_target_length;
  37.225    }
  37.226  
  37.227    bool can_expand_young_list() {
  37.228 -    size_t young_list_length = _g1->young_list()->length();
  37.229 -    size_t young_list_max_length = _young_list_max_length;
  37.230 +    uint young_list_length = _g1->young_list()->length();
  37.231 +    uint young_list_max_length = _young_list_max_length;
  37.232      return young_list_length < young_list_max_length;
  37.233    }
  37.234  
  37.235 -  size_t young_list_max_length() {
  37.236 +  uint young_list_max_length() {
  37.237      return _young_list_max_length;
  37.238    }
  37.239  
  37.240 @@ -1074,19 +1045,6 @@
  37.241      return _young_gen_sizer->adaptive_young_list_length();
  37.242    }
  37.243  
  37.244 -  inline double get_gc_eff_factor() {
  37.245 -    double ratio = _known_garbage_ratio;
  37.246 -
  37.247 -    double square = ratio * ratio;
  37.248 -    // square = square * square;
  37.249 -    double ret = square * 9.0 + 1.0;
  37.250 -#if 0
  37.251 -    gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
  37.252 -#endif // 0
  37.253 -    guarantee(0.0 <= ret && ret < 10.0, "invariant!");
  37.254 -    return ret;
  37.255 -  }
  37.256 -
  37.257  private:
  37.258    //
  37.259    // Survivor regions policy.
  37.260 @@ -1097,7 +1055,7 @@
  37.261    int _tenuring_threshold;
  37.262  
  37.263    // The limit on the number of regions allocated for survivors.
  37.264 -  size_t _max_survivor_regions;
  37.265 +  uint _max_survivor_regions;
  37.266  
  37.267    // For reporting purposes.
  37.268    size_t _eden_bytes_before_gc;
  37.269 @@ -1105,7 +1063,7 @@
  37.270    size_t _capacity_before_gc;
  37.271  
  37.272    // The amount of survor regions after a collection.
  37.273 -  size_t _recorded_survivor_regions;
  37.274 +  uint _recorded_survivor_regions;
  37.275    // List of survivor regions.
  37.276    HeapRegion* _recorded_survivor_head;
  37.277    HeapRegion* _recorded_survivor_tail;
  37.278 @@ -1127,9 +1085,9 @@
  37.279      return purpose == GCAllocForSurvived;
  37.280    }
  37.281  
  37.282 -  static const size_t REGIONS_UNLIMITED = ~(size_t)0;
  37.283 +  static const uint REGIONS_UNLIMITED = (uint) -1;
  37.284  
  37.285 -  size_t max_regions(int purpose);
  37.286 +  uint max_regions(int purpose);
  37.287  
  37.288    // The limit on regions for a particular purpose is reached.
  37.289    void note_alloc_region_limit_reached(int purpose) {
  37.290 @@ -1146,7 +1104,7 @@
  37.291      _survivor_surv_rate_group->stop_adding_regions();
  37.292    }
  37.293  
  37.294 -  void record_survivor_regions(size_t      regions,
  37.295 +  void record_survivor_regions(uint regions,
  37.296                                 HeapRegion* head,
  37.297                                 HeapRegion* tail) {
  37.298      _recorded_survivor_regions = regions;
  37.299 @@ -1154,12 +1112,11 @@
  37.300      _recorded_survivor_tail    = tail;
  37.301    }
  37.302  
  37.303 -  size_t recorded_survivor_regions() {
  37.304 +  uint recorded_survivor_regions() {
  37.305      return _recorded_survivor_regions;
  37.306    }
  37.307  
  37.308 -  void record_thread_age_table(ageTable* age_table)
  37.309 -  {
  37.310 +  void record_thread_age_table(ageTable* age_table) {
  37.311      _survivors_age_table.merge_par(age_table);
  37.312    }
  37.313  
    38.1 --- a/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Thu Apr 19 12:18:46 2012 -0700
    38.2 +++ b/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Fri Apr 20 16:23:48 2012 -0700
    38.3 @@ -1,5 +1,5 @@
    38.4  /*
    38.5 - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    38.6 + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    38.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    38.8   *
    38.9   * This code is free software; you can redistribute it and/or modify it
   38.10 @@ -120,7 +120,7 @@
   38.11  
   38.12  // Single parameter format strings
   38.13  #define ergo_format_str(_name_)      ", " _name_ ": %s"
   38.14 -#define ergo_format_region(_name_)   ", " _name_ ": "SIZE_FORMAT" regions"
   38.15 +#define ergo_format_region(_name_)   ", " _name_ ": %u regions"
   38.16  #define ergo_format_byte(_name_)     ", " _name_ ": "SIZE_FORMAT" bytes"
   38.17  #define ergo_format_double(_name_)   ", " _name_ ": %1.2f"
   38.18  #define ergo_format_perc(_name_)     ", " _name_ ": %1.2f %%"
    39.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    39.2 +++ b/src/share/vm/gc_implementation/g1/g1Log.cpp	Fri Apr 20 16:23:48 2012 -0700
    39.3 @@ -0,0 +1,56 @@
    39.4 +/*
    39.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    39.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    39.7 + *
    39.8 + * This code is free software; you can redistribute it and/or modify it
    39.9 + * under the terms of the GNU General Public License version 2 only, as
   39.10 + * published by the Free Software Foundation.
   39.11 + *
   39.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   39.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   39.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   39.15 + * version 2 for more details (a copy is included in the LICENSE file that
   39.16 + * accompanied this code).
   39.17 + *
   39.18 + * You should have received a copy of the GNU General Public License version
   39.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   39.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   39.21 + *
   39.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   39.23 + * or visit www.oracle.com if you need additional information or have any
   39.24 + * questions.
   39.25 + *
   39.26 + */
   39.27 +
   39.28 +#include "precompiled.hpp"
   39.29 +#include "gc_implementation/g1/g1_globals.hpp"
   39.30 +#include "gc_implementation/g1/g1Log.hpp"
   39.31 +#include "runtime/globals.hpp"
   39.32 +
   39.33 +G1Log::LogLevel G1Log::_level = G1Log::LevelNone;
   39.34 +
   39.35 +// If G1LogLevel has not been set up we will use the values of PrintGC
   39.36 +// and PrintGCDetails for the logging level.
   39.37 +// - PrintGC maps to "fine".
   39.38 +// - PrintGCDetails maps to "finer".
   39.39 +void G1Log::init() {
   39.40 +  if (G1LogLevel != NULL && G1LogLevel[0] != '\0') {
   39.41 +    if (strncmp("none", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') {
   39.42 +      _level = LevelNone;
   39.43 +    } else if (strncmp("fine", G1LogLevel, 4) == 0 && G1LogLevel[4] == '\0') {
   39.44 +      _level = LevelFine;
   39.45 +    } else if (strncmp("finer", G1LogLevel, 5) == 0 && G1LogLevel[5] == '\0') {
   39.46 +      _level = LevelFiner;
   39.47 +    } else if (strncmp("finest", G1LogLevel, 6) == 0 && G1LogLevel[6] == '\0') {
   39.48 +      _level = LevelFinest;
   39.49 +    } else {
   39.50 +      warning("Unknown logging level '%s', should be one of 'fine', 'finer' or 'finest'.", G1LogLevel);
   39.51 +    }
   39.52 +  } else {
   39.53 +    if (PrintGCDetails) {
   39.54 +      _level = LevelFiner;
   39.55 +    } else if (PrintGC) {
   39.56 +      _level = LevelFine;
   39.57 +    }
   39.58 +  }
   39.59 +}
    40.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    40.2 +++ b/src/share/vm/gc_implementation/g1/g1Log.hpp	Fri Apr 20 16:23:48 2012 -0700
    40.3 @@ -0,0 +1,56 @@
    40.4 +/*
    40.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    40.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    40.7 + *
    40.8 + * This code is free software; you can redistribute it and/or modify it
    40.9 + * under the terms of the GNU General Public License version 2 only, as
   40.10 + * published by the Free Software Foundation.
   40.11 + *
   40.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   40.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   40.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   40.15 + * version 2 for more details (a copy is included in the LICENSE file that
   40.16 + * accompanied this code).
   40.17 + *
   40.18 + * You should have received a copy of the GNU General Public License version
   40.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   40.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   40.21 + *
   40.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   40.23 + * or visit www.oracle.com if you need additional information or have any
   40.24 + * questions.
   40.25 + *
   40.26 + */
   40.27 +
   40.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP
   40.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP
   40.30 +
   40.31 +#include "memory/allocation.hpp"
   40.32 +
   40.33 +class G1Log : public AllStatic {
   40.34 +  typedef enum {
   40.35 +    LevelNone,
   40.36 +    LevelFine,
   40.37 +    LevelFiner,
   40.38 +    LevelFinest
   40.39 +  } LogLevel;
   40.40 +
   40.41 +  static LogLevel _level;
   40.42 +
   40.43 + public:
   40.44 +  inline static bool fine() {
   40.45 +    return _level >= LevelFine;
   40.46 +  }
   40.47 +
   40.48 +  inline static bool finer() {
   40.49 +    return _level >= LevelFiner;
   40.50 +  }
   40.51 +
   40.52 +  inline static bool finest() {
   40.53 +    return _level == LevelFinest;
   40.54 +  }
   40.55 +
   40.56 +  static void init();
   40.57 +};
   40.58 +
   40.59 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1LOG_HPP
    41.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Apr 19 12:18:46 2012 -0700
    41.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Apr 20 16:23:48 2012 -0700
    41.3 @@ -29,6 +29,7 @@
    41.4  #include "classfile/vmSymbols.hpp"
    41.5  #include "code/codeCache.hpp"
    41.6  #include "code/icBuffer.hpp"
    41.7 +#include "gc_implementation/g1/g1Log.hpp"
    41.8  #include "gc_implementation/g1/g1MarkSweep.hpp"
    41.9  #include "memory/gcLocker.hpp"
   41.10  #include "memory/genCollectedHeap.hpp"
   41.11 @@ -126,7 +127,7 @@
   41.12  void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
   41.13                                      bool clear_all_softrefs) {
   41.14    // Recursively traverse all live objects and mark them
   41.15 -  TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
   41.16 +  TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty);
   41.17    GenMarkSweep::trace(" 1");
   41.18  
   41.19    SharedHeap* sh = SharedHeap::heap();
   41.20 @@ -192,8 +193,7 @@
   41.21      // fail. At the end of the GC, the orginal mark word values
   41.22      // (including hash values) are restored to the appropriate
   41.23      // objects.
   41.24 -    Universe::heap()->verify(/* allow dirty */ true,
   41.25 -                             /* silent      */ false,
   41.26 +    Universe::heap()->verify(/* silent      */ false,
   41.27                               /* option      */ VerifyOption_G1UseMarkWord);
   41.28  
   41.29      G1CollectedHeap* g1h = G1CollectedHeap::heap();
   41.30 @@ -291,7 +291,7 @@
   41.31    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   41.32    Generation* pg = g1h->perm_gen();
   41.33  
   41.34 -  TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
   41.35 +  TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty);
   41.36    GenMarkSweep::trace("2");
   41.37  
   41.38    FindFirstRegionClosure cl;
   41.39 @@ -335,7 +335,7 @@
   41.40    Generation* pg = g1h->perm_gen();
   41.41  
   41.42    // Adjust the pointers to reflect the new locations
   41.43 -  TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
   41.44 +  TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty);
   41.45    GenMarkSweep::trace("3");
   41.46  
   41.47    SharedHeap* sh = SharedHeap::heap();
   41.48 @@ -399,7 +399,7 @@
   41.49    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   41.50    Generation* pg = g1h->perm_gen();
   41.51  
   41.52 -  TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
   41.53 +  TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty);
   41.54    GenMarkSweep::trace("4");
   41.55  
   41.56    pg->compact();
    42.1 --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Thu Apr 19 12:18:46 2012 -0700
    42.2 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Fri Apr 20 16:23:48 2012 -0700
    42.3 @@ -1,5 +1,5 @@
    42.4  /*
    42.5 - * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
    42.6 + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    42.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    42.8   *
    42.9   * This code is free software; you can redistribute it and/or modify it
   42.10 @@ -177,19 +177,19 @@
   42.11    // values we read here are possible (i.e., at a STW phase at the end
   42.12    // of a GC).
   42.13  
   42.14 -  size_t young_list_length = g1->young_list()->length();
   42.15 -  size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
   42.16 +  uint young_list_length = g1->young_list()->length();
   42.17 +  uint survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
   42.18    assert(young_list_length >= survivor_list_length, "invariant");
   42.19 -  size_t eden_list_length = young_list_length - survivor_list_length;
   42.20 +  uint eden_list_length = young_list_length - survivor_list_length;
   42.21    // Max length includes any potential extensions to the young gen
   42.22    // we'll do when the GC locker is active.
   42.23 -  size_t young_list_max_length = g1->g1_policy()->young_list_max_length();
   42.24 +  uint young_list_max_length = g1->g1_policy()->young_list_max_length();
   42.25    assert(young_list_max_length >= survivor_list_length, "invariant");
   42.26 -  size_t eden_list_max_length = young_list_max_length - survivor_list_length;
   42.27 +  uint eden_list_max_length = young_list_max_length - survivor_list_length;
   42.28  
   42.29    _overall_used = g1->used_unlocked();
   42.30 -  _eden_used = eden_list_length * HeapRegion::GrainBytes;
   42.31 -  _survivor_used = survivor_list_length * HeapRegion::GrainBytes;
   42.32 +  _eden_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
   42.33 +  _survivor_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
   42.34    _young_region_num = young_list_length;
   42.35    _old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
   42.36  
   42.37 @@ -207,7 +207,7 @@
   42.38    committed -= _survivor_committed + _old_committed;
   42.39  
   42.40    // Next, calculate and remove the committed size for the eden.
   42.41 -  _eden_committed = eden_list_max_length * HeapRegion::GrainBytes;
   42.42 +  _eden_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
   42.43    // Somewhat defensive: be robust in case there are inaccuracies in
   42.44    // the calculations
   42.45    _eden_committed = MIN2(_eden_committed, committed);
   42.46 @@ -237,10 +237,10 @@
   42.47    // When a new eden region is allocated, only the eden_used size is
   42.48    // affected (since we have recalculated everything else at the last GC).
   42.49  
   42.50 -  size_t young_region_num = g1h()->young_list()->length();
   42.51 +  uint young_region_num = g1h()->young_list()->length();
   42.52    if (young_region_num > _young_region_num) {
   42.53 -    size_t diff = young_region_num - _young_region_num;
   42.54 -    _eden_used += diff * HeapRegion::GrainBytes;
   42.55 +    uint diff = young_region_num - _young_region_num;
   42.56 +    _eden_used += (size_t) diff * HeapRegion::GrainBytes;
   42.57      // Somewhat defensive: cap the eden used size to make sure it
   42.58      // never exceeds the committed size.
   42.59      _eden_used = MIN2(_eden_used, _eden_committed);
    43.1 --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Thu Apr 19 12:18:46 2012 -0700
    43.2 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Fri Apr 20 16:23:48 2012 -0700
    43.3 @@ -1,5 +1,5 @@
    43.4  /*
    43.5 - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    43.6 + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    43.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    43.8   *
    43.9   * This code is free software; you can redistribute it and/or modify it
   43.10 @@ -147,7 +147,7 @@
   43.11    size_t _overall_committed;
   43.12    size_t _overall_used;
   43.13  
   43.14 -  size_t _young_region_num;
   43.15 +  uint   _young_region_num;
   43.16    size_t _young_gen_committed;
   43.17    size_t _eden_committed;
   43.18    size_t _eden_used;
    44.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Apr 19 12:18:46 2012 -0700
    44.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Apr 20 16:23:48 2012 -0700
    44.3 @@ -26,7 +26,6 @@
    44.4  #define SHARE_VM_GC_IMPLEMENTATION_G1_G1_GLOBALS_HPP
    44.5  
    44.6  #include "runtime/globals.hpp"
    44.7 -
    44.8  //
    44.9  // Defines all globals flags used by the garbage-first compiler.
   44.10  //
   44.11 @@ -128,9 +127,6 @@
   44.12              "Prints the liveness information for all regions in the heap "  \
   44.13              "at the end of a marking cycle.")                               \
   44.14                                                                              \
   44.15 -  develop(bool, G1PrintParCleanupStats, false,                              \
   44.16 -          "When true, print extra stats about parallel cleanup.")           \
   44.17 -                                                                            \
   44.18    product(intx, G1UpdateBufferSize, 256,                                    \
   44.19            "Size of an update buffer")                                       \
   44.20                                                                              \
   44.21 @@ -309,7 +305,10 @@
   44.22                                                                              \
   44.23    develop(uintx, G1OldCSetRegionThresholdPercent, 10,                       \
   44.24            "An upper bound for the number of old CSet regions expressed "    \
   44.25 -          "as a percentage of the heap size.")
   44.26 +          "as a percentage of the heap size.")                              \
   44.27 +                                                                            \
   44.28 +  experimental(ccstr, G1LogLevel, NULL,                                     \
   44.29 +          "Log level for G1 logging: fine, finer, finest")
   44.30  
   44.31  G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
   44.32  
    45.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Apr 19 12:18:46 2012 -0700
    45.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Apr 20 16:23:48 2012 -0700
    45.3 @@ -334,7 +334,7 @@
    45.4  
    45.5    guarantee(GrainWords == 0, "we should only set it once");
    45.6    GrainWords = GrainBytes >> LogHeapWordSize;
    45.7 -  guarantee((size_t)(1 << LogOfHRGrainWords) == GrainWords, "sanity");
    45.8 +  guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
    45.9  
   45.10    guarantee(CardsPerRegion == 0, "we should only set it once");
   45.11    CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
   45.12 @@ -370,7 +370,6 @@
   45.13      _claimed = InitialClaimValue;
   45.14    }
   45.15    zero_marked_bytes();
   45.16 -  set_sort_index(-1);
   45.17  
   45.18    _offsets.resize(HeapRegion::GrainWords);
   45.19    init_top_at_mark_start();
   45.20 @@ -482,17 +481,16 @@
   45.21  #endif // _MSC_VER
   45.22  
   45.23  
   45.24 -HeapRegion::
   45.25 -HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
   45.26 -           MemRegion mr, bool is_zeroed)
   45.27 -  : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
   45.28 +HeapRegion::HeapRegion(uint hrs_index,
   45.29 +                       G1BlockOffsetSharedArray* sharedOffsetArray,
   45.30 +                       MemRegion mr, bool is_zeroed) :
   45.31 +    G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
   45.32      _hrs_index(hrs_index),
   45.33      _humongous_type(NotHumongous), _humongous_start_region(NULL),
   45.34      _in_collection_set(false),
   45.35      _next_in_special_set(NULL), _orig_end(NULL),
   45.36      _claimed(InitialClaimValue), _evacuation_failed(false),
   45.37 -    _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
   45.38 -    _gc_efficiency(0.0),
   45.39 +    _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
   45.40      _young_type(NotYoung), _next_young_region(NULL),
   45.41      _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
   45.42  #ifdef ASSERT
   45.43 @@ -779,16 +777,15 @@
   45.44    G1OffsetTableContigSpace::print_on(st);
   45.45  }
   45.46  
   45.47 -void HeapRegion::verify(bool allow_dirty) const {
   45.48 +void HeapRegion::verify() const {
   45.49    bool dummy = false;
   45.50 -  verify(allow_dirty, VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
   45.51 +  verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
   45.52  }
   45.53  
   45.54  // This really ought to be commoned up into OffsetTableContigSpace somehow.
   45.55  // We would need a mechanism to make that code skip dead objects.
   45.56  
   45.57 -void HeapRegion::verify(bool allow_dirty,
   45.58 -                        VerifyOption vo,
   45.59 +void HeapRegion::verify(VerifyOption vo,
   45.60                          bool* failures) const {
   45.61    G1CollectedHeap* g1 = G1CollectedHeap::heap();
   45.62    *failures = false;
    46.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Apr 19 12:18:46 2012 -0700
    46.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Apr 20 16:23:48 2012 -0700
    46.3 @@ -52,12 +52,15 @@
    46.4  class HeapRegion;
    46.5  class HeapRegionSetBase;
    46.6  
    46.7 -#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
    46.8 +#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
    46.9  #define HR_FORMAT_PARAMS(_hr_) \
   46.10                  (_hr_)->hrs_index(), \
   46.11                  (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
   46.12                  (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
   46.13  
   46.14 +// sentinel value for hrs_index
   46.15 +#define G1_NULL_HRS_INDEX ((uint) -1)
   46.16 +
   46.17  // A dirty card to oop closure for heap regions. It
   46.18  // knows how to get the G1 heap and how to use the bitmap
   46.19  // in the concurrent marker used by G1 to filter remembered
   46.20 @@ -235,7 +238,7 @@
   46.21  
   46.22   protected:
   46.23    // The index of this region in the heap region sequence.
   46.24 -  size_t  _hrs_index;
   46.25 +  uint  _hrs_index;
   46.26  
   46.27    HumongousType _humongous_type;
   46.28    // For a humongous region, region in which it starts.
   46.29 @@ -278,12 +281,8 @@
   46.30    size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
   46.31    size_t _next_marked_bytes;    // Bytes known to be live via in-progress marking.
   46.32  
   46.33 -  // See "sort_index" method.  -1 means is not in the array.
   46.34 -  int _sort_index;
   46.35 -
   46.36 -  // <PREDICTION>
   46.37 +  // The calculated GC efficiency of the region.
   46.38    double _gc_efficiency;
   46.39 -  // </PREDICTION>
   46.40  
   46.41    enum YoungType {
   46.42      NotYoung,                   // a region is not young
   46.43 @@ -342,7 +341,7 @@
   46.44  
   46.45   public:
   46.46    // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
   46.47 -  HeapRegion(size_t hrs_index,
   46.48 +  HeapRegion(uint hrs_index,
   46.49               G1BlockOffsetSharedArray* sharedOffsetArray,
   46.50               MemRegion mr, bool is_zeroed);
   46.51  
   46.52 @@ -389,7 +388,7 @@
   46.53  
   46.54    // If this region is a member of a HeapRegionSeq, the index in that
   46.55    // sequence, otherwise -1.
   46.56 -  size_t hrs_index() const { return _hrs_index; }
   46.57 +  uint hrs_index() const { return _hrs_index; }
   46.58  
   46.59    // The number of bytes marked live in the region in the last marking phase.
   46.60    size_t marked_bytes()    { return _prev_marked_bytes; }
   46.61 @@ -626,16 +625,6 @@
   46.62    // last mark phase ended.
   46.63    bool is_marked() { return _prev_top_at_mark_start != bottom(); }
   46.64  
   46.65 -  // If "is_marked()" is true, then this is the index of the region in
   46.66 -  // an array constructed at the end of marking of the regions in a
   46.67 -  // "desirability" order.
   46.68 -  int sort_index() {
   46.69 -    return _sort_index;
   46.70 -  }
   46.71 -  void set_sort_index(int i) {
   46.72 -    _sort_index = i;
   46.73 -  }
   46.74 -
   46.75    void init_top_at_conc_mark_count() {
   46.76      _top_at_conc_mark_count = bottom();
   46.77    }
   46.78 @@ -823,10 +812,10 @@
   46.79    // Currently there is only one place where this is called with
   46.80    // vo == UseMarkWord, which is to verify the marking during a
   46.81    // full GC.
   46.82 -  void verify(bool allow_dirty, VerifyOption vo, bool *failures) const;
   46.83 +  void verify(VerifyOption vo, bool *failures) const;
   46.84  
   46.85    // Override; it uses the "prev" marking information
   46.86 -  virtual void verify(bool allow_dirty) const;
   46.87 +  virtual void verify() const;
   46.88  };
   46.89  
   46.90  // HeapRegionClosure is used for iterating over regions.
    47.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Apr 19 12:18:46 2012 -0700
    47.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Apr 20 16:23:48 2012 -0700
    47.3 @@ -1,5 +1,5 @@
    47.4  /*
    47.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    47.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    47.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    47.8   *
    47.9   * This code is free software; you can redistribute it and/or modify it
   47.10 @@ -577,7 +577,7 @@
   47.11  #endif
   47.12  
   47.13  void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
   47.14 -  size_t cur_hrs_ind = hr()->hrs_index();
   47.15 +  size_t cur_hrs_ind = (size_t) hr()->hrs_index();
   47.16  
   47.17  #if HRRS_VERBOSE
   47.18    gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
   47.19 @@ -841,7 +841,7 @@
   47.20  #endif
   47.21  
   47.22    // Set the corresponding coarse bit.
   47.23 -  size_t max_hrs_index = max->hr()->hrs_index();
   47.24 +  size_t max_hrs_index = (size_t) max->hr()->hrs_index();
   47.25    if (!_coarse_map.at(max_hrs_index)) {
   47.26      _coarse_map.at_put(max_hrs_index, true);
   47.27      _n_coarse_entries++;
   47.28 @@ -866,17 +866,20 @@
   47.29  void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
   47.30                                BitMap* region_bm, BitMap* card_bm) {
   47.31    // First eliminated garbage regions from the coarse map.
   47.32 -  if (G1RSScrubVerbose)
   47.33 -    gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
   47.34 -                           hr()->hrs_index());
   47.35 +  if (G1RSScrubVerbose) {
   47.36 +    gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index());
   47.37 +  }
   47.38  
   47.39    assert(_coarse_map.size() == region_bm->size(), "Precondition");
   47.40 -  if (G1RSScrubVerbose)
   47.41 -    gclog_or_tty->print("   Coarse map: before = %d...", _n_coarse_entries);
   47.42 +  if (G1RSScrubVerbose) {
   47.43 +    gclog_or_tty->print("   Coarse map: before = "SIZE_FORMAT"...",
   47.44 +                        _n_coarse_entries);
   47.45 +  }
   47.46    _coarse_map.set_intersection(*region_bm);
   47.47    _n_coarse_entries = _coarse_map.count_one_bits();
   47.48 -  if (G1RSScrubVerbose)
   47.49 -    gclog_or_tty->print_cr("   after = %d.", _n_coarse_entries);
   47.50 +  if (G1RSScrubVerbose) {
   47.51 +    gclog_or_tty->print_cr("   after = "SIZE_FORMAT".", _n_coarse_entries);
   47.52 +  }
   47.53  
   47.54    // Now do the fine-grained maps.
   47.55    for (size_t i = 0; i < _max_fine_entries; i++) {
   47.56 @@ -885,23 +888,27 @@
   47.57      while (cur != NULL) {
   47.58        PosParPRT* nxt = cur->next();
   47.59        // If the entire region is dead, eliminate.
   47.60 -      if (G1RSScrubVerbose)
   47.61 -        gclog_or_tty->print_cr("     For other region "SIZE_FORMAT":",
   47.62 +      if (G1RSScrubVerbose) {
   47.63 +        gclog_or_tty->print_cr("     For other region %u:",
   47.64                                 cur->hr()->hrs_index());
   47.65 -      if (!region_bm->at(cur->hr()->hrs_index())) {
   47.66 +      }
   47.67 +      if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
   47.68          *prev = nxt;
   47.69          cur->set_next(NULL);
   47.70          _n_fine_entries--;
   47.71 -        if (G1RSScrubVerbose)
   47.72 +        if (G1RSScrubVerbose) {
   47.73            gclog_or_tty->print_cr("          deleted via region map.");
   47.74 +        }
   47.75          PosParPRT::free(cur);
   47.76        } else {
   47.77          // Do fine-grain elimination.
   47.78 -        if (G1RSScrubVerbose)
   47.79 +        if (G1RSScrubVerbose) {
   47.80            gclog_or_tty->print("          occ: before = %4d.", cur->occupied());
   47.81 +        }
   47.82          cur->scrub(ctbs, card_bm);
   47.83 -        if (G1RSScrubVerbose)
   47.84 +        if (G1RSScrubVerbose) {
   47.85            gclog_or_tty->print_cr("          after = %4d.", cur->occupied());
   47.86 +        }
   47.87          // Did that empty the table completely?
   47.88          if (cur->occupied() == 0) {
   47.89            *prev = nxt;
   47.90 @@ -1003,7 +1010,7 @@
   47.91  
   47.92  void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
   47.93    MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   47.94 -  size_t hrs_ind = from_hr->hrs_index();
   47.95 +  size_t hrs_ind = (size_t) from_hr->hrs_index();
   47.96    size_t ind = hrs_ind & _mod_max_fine_entries_mask;
   47.97    if (del_single_region_table(ind, from_hr)) {
   47.98      assert(!_coarse_map.at(hrs_ind), "Inv");
   47.99 @@ -1011,7 +1018,7 @@
  47.100      _coarse_map.par_at_put(hrs_ind, 0);
  47.101    }
  47.102    // Check to see if any of the fcc entries come from here.
  47.103 -  size_t hr_ind = hr()->hrs_index();
  47.104 +  size_t hr_ind = (size_t) hr()->hrs_index();
  47.105    for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
  47.106      int fcc_ent = _from_card_cache[tid][hr_ind];
  47.107      if (fcc_ent != -1) {
  47.108 @@ -1223,7 +1230,7 @@
  47.109      if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
  47.110        _coarse_cur_region_cur_card = 0;
  47.111        HeapWord* r_bot =
  47.112 -        _g1h->region_at(_coarse_cur_region_index)->bottom();
  47.113 +        _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
  47.114        _cur_region_card_offset = _bosa->index_for(r_bot);
  47.115      } else {
  47.116        return false;
    48.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Apr 19 12:18:46 2012 -0700
    48.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Apr 20 16:23:48 2012 -0700
    48.3 @@ -329,13 +329,13 @@
    48.4  
    48.5    // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
    48.6    // (Uses it to initialize from_card_cache).
    48.7 -  static void init_heap(size_t max_regions) {
    48.8 -    OtherRegionsTable::init_from_card_cache(max_regions);
    48.9 +  static void init_heap(uint max_regions) {
   48.10 +    OtherRegionsTable::init_from_card_cache((size_t) max_regions);
   48.11    }
   48.12  
   48.13    // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
   48.14 -  static void shrink_heap(size_t new_n_regs) {
   48.15 -    OtherRegionsTable::shrink_from_card_cache(new_n_regs);
   48.16 +  static void shrink_heap(uint new_n_regs) {
   48.17 +    OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs);
   48.18    }
   48.19  
   48.20  #ifndef PRODUCT
    49.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Apr 19 12:18:46 2012 -0700
    49.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Apr 20 16:23:48 2012 -0700
    49.3 @@ -1,5 +1,5 @@
    49.4  /*
    49.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    49.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    49.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    49.8   *
    49.9   * This code is free software; you can redistribute it and/or modify it
   49.10 @@ -31,16 +31,15 @@
   49.11  
   49.12  // Private
   49.13  
   49.14 -size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
   49.15 -  size_t len = length();
   49.16 +uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
   49.17 +  uint len = length();
   49.18    assert(num > 1, "use this only for sequences of length 2 or greater");
   49.19    assert(from <= len,
   49.20 -         err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
   49.21 -                 from, len));
   49.22 +         err_msg("from: %u should be valid and <= than %u", from, len));
   49.23  
   49.24 -  size_t curr = from;
   49.25 -  size_t first = G1_NULL_HRS_INDEX;
   49.26 -  size_t num_so_far = 0;
   49.27 +  uint curr = from;
   49.28 +  uint first = G1_NULL_HRS_INDEX;
   49.29 +  uint num_so_far = 0;
   49.30    while (curr < len && num_so_far < num) {
   49.31      if (at(curr)->is_empty()) {
   49.32        if (first == G1_NULL_HRS_INDEX) {
   49.33 @@ -60,7 +59,7 @@
   49.34      // we found enough space for the humongous object
   49.35      assert(from <= first && first < len, "post-condition");
   49.36      assert(first < curr && (curr - first) == num, "post-condition");
   49.37 -    for (size_t i = first; i < first + num; ++i) {
   49.38 +    for (uint i = first; i < first + num; ++i) {
   49.39        assert(at(i)->is_empty(), "post-condition");
   49.40      }
   49.41      return first;
   49.42 @@ -73,10 +72,10 @@
   49.43  // Public
   49.44  
   49.45  void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
   49.46 -                               size_t max_length) {
   49.47 -  assert((size_t) bottom % HeapRegion::GrainBytes == 0,
   49.48 +                               uint max_length) {
   49.49 +  assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
   49.50           "bottom should be heap region aligned");
   49.51 -  assert((size_t) end % HeapRegion::GrainBytes == 0,
   49.52 +  assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
   49.53           "end should be heap region aligned");
   49.54  
   49.55    _length = 0;
   49.56 @@ -88,8 +87,8 @@
   49.57    _max_length = max_length;
   49.58  
   49.59    _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
   49.60 -  memset(_regions, 0, max_length * sizeof(HeapRegion*));
   49.61 -  _regions_biased = _regions - ((size_t) bottom >> _region_shift);
   49.62 +  memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
   49.63 +  _regions_biased = _regions - ((uintx) bottom >> _region_shift);
   49.64  
   49.65    assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
   49.66           "bottom should be included in the region with index 0");
   49.67 @@ -105,7 +104,7 @@
   49.68    assert(_heap_bottom <= next_bottom, "invariant");
   49.69    while (next_bottom < new_end) {
   49.70      assert(next_bottom < _heap_end, "invariant");
   49.71 -    size_t index = length();
   49.72 +    uint index = length();
   49.73  
   49.74      assert(index < _max_length, "otherwise we cannot expand further");
   49.75      if (index == 0) {
   49.76 @@ -139,9 +138,9 @@
   49.77    return MemRegion(old_end, next_bottom);
   49.78  }
   49.79  
   49.80 -size_t HeapRegionSeq::free_suffix() {
   49.81 -  size_t res = 0;
   49.82 -  size_t index = length();
   49.83 +uint HeapRegionSeq::free_suffix() {
   49.84 +  uint res = 0;
   49.85 +  uint index = length();
   49.86    while (index > 0) {
   49.87      index -= 1;
   49.88      if (!at(index)->is_empty()) {
   49.89 @@ -152,27 +151,24 @@
   49.90    return res;
   49.91  }
   49.92  
   49.93 -size_t HeapRegionSeq::find_contiguous(size_t num) {
   49.94 +uint HeapRegionSeq::find_contiguous(uint num) {
   49.95    assert(num > 1, "use this only for sequences of length 2 or greater");
   49.96    assert(_next_search_index <= length(),
   49.97 -         err_msg("_next_search_indeex: "SIZE_FORMAT" "
   49.98 -                 "should be valid and <= than "SIZE_FORMAT,
   49.99 +         err_msg("_next_search_index: %u should be valid and <= than %u",
  49.100                   _next_search_index, length()));
  49.101  
  49.102 -  size_t start = _next_search_index;
  49.103 -  size_t res = find_contiguous_from(start, num);
  49.104 +  uint start = _next_search_index;
  49.105 +  uint res = find_contiguous_from(start, num);
  49.106    if (res == G1_NULL_HRS_INDEX && start > 0) {
  49.107      // Try starting from the beginning. If _next_search_index was 0,
  49.108      // no point in doing this again.
  49.109      res = find_contiguous_from(0, num);
  49.110    }
  49.111    if (res != G1_NULL_HRS_INDEX) {
  49.112 -    assert(res < length(),
  49.113 -           err_msg("res: "SIZE_FORMAT" should be valid", res));
  49.114 +    assert(res < length(), err_msg("res: %u should be valid", res));
  49.115      _next_search_index = res + num;
  49.116      assert(_next_search_index <= length(),
  49.117 -           err_msg("_next_search_indeex: "SIZE_FORMAT" "
  49.118 -                   "should be valid and <= than "SIZE_FORMAT,
  49.119 +           err_msg("_next_search_index: %u should be valid and <= than %u",
  49.120                     _next_search_index, length()));
  49.121    }
  49.122    return res;
  49.123 @@ -183,20 +179,20 @@
  49.124  }
  49.125  
  49.126  void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
  49.127 -  size_t hr_index = 0;
  49.128 +  uint hr_index = 0;
  49.129    if (hr != NULL) {
  49.130 -    hr_index = (size_t) hr->hrs_index();
  49.131 +    hr_index = hr->hrs_index();
  49.132    }
  49.133  
  49.134 -  size_t len = length();
  49.135 -  for (size_t i = hr_index; i < len; i += 1) {
  49.136 +  uint len = length();
  49.137 +  for (uint i = hr_index; i < len; i += 1) {
  49.138      bool res = blk->doHeapRegion(at(i));
  49.139      if (res) {
  49.140        blk->incomplete();
  49.141        return;
  49.142      }
  49.143    }
  49.144 -  for (size_t i = 0; i < hr_index; i += 1) {
  49.145 +  for (uint i = 0; i < hr_index; i += 1) {
  49.146      bool res = blk->doHeapRegion(at(i));
  49.147      if (res) {
  49.148        blk->incomplete();
  49.149 @@ -206,7 +202,7 @@
  49.150  }
  49.151  
  49.152  MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
  49.153 -                                   size_t* num_regions_deleted) {
  49.154 +                                   uint* num_regions_deleted) {
  49.155    // Reset this in case it's currently pointing into the regions that
  49.156    // we just removed.
  49.157    _next_search_index = 0;
  49.158 @@ -218,7 +214,7 @@
  49.159    assert(_allocated_length > 0, "we should have at least one region committed");
  49.160  
  49.161    // around the loop, i will be the next region to be removed
  49.162 -  size_t i = length() - 1;
  49.163 +  uint i = length() - 1;
  49.164    assert(i > 0, "we should never remove all regions");
  49.165    // [last_start, end) is the MemRegion that covers the regions we will remove.
  49.166    HeapWord* end = at(i)->end();
  49.167 @@ -249,29 +245,24 @@
  49.168  #ifndef PRODUCT
  49.169  void HeapRegionSeq::verify_optional() {
  49.170    guarantee(_length <= _allocated_length,
  49.171 -            err_msg("invariant: _length: "SIZE_FORMAT" "
  49.172 -                    "_allocated_length: "SIZE_FORMAT,
  49.173 +            err_msg("invariant: _length: %u _allocated_length: %u",
  49.174                      _length, _allocated_length));
  49.175    guarantee(_allocated_length <= _max_length,
  49.176 -            err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
  49.177 -                    "_max_length: "SIZE_FORMAT,
  49.178 +            err_msg("invariant: _allocated_length: %u _max_length: %u",
  49.179                      _allocated_length, _max_length));
  49.180    guarantee(_next_search_index <= _length,
  49.181 -            err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
  49.182 -                    "_length: "SIZE_FORMAT,
  49.183 +            err_msg("invariant: _next_search_index: %u _length: %u",
  49.184                      _next_search_index, _length));
  49.185  
  49.186    HeapWord* prev_end = _heap_bottom;
  49.187 -  for (size_t i = 0; i < _allocated_length; i += 1) {
  49.188 +  for (uint i = 0; i < _allocated_length; i += 1) {
  49.189      HeapRegion* hr = _regions[i];
  49.190 -    guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
  49.191 +    guarantee(hr != NULL, err_msg("invariant: i: %u", i));
  49.192      guarantee(hr->bottom() == prev_end,
  49.193 -              err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
  49.194 -                      "prev_end: "PTR_FORMAT,
  49.195 +              err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
  49.196                        i, HR_FORMAT_PARAMS(hr), prev_end));
  49.197      guarantee(hr->hrs_index() == i,
  49.198 -              err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
  49.199 -                      i, hr->hrs_index()));
  49.200 +              err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
  49.201      if (i < _length) {
  49.202        // Asserts will fire if i is >= _length
  49.203        HeapWord* addr = hr->bottom();
  49.204 @@ -290,8 +281,8 @@
  49.205        prev_end = hr->end();
  49.206      }
  49.207    }
  49.208 -  for (size_t i = _allocated_length; i < _max_length; i += 1) {
  49.209 -    guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
  49.210 +  for (uint i = _allocated_length; i < _max_length; i += 1) {
  49.211 +    guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
  49.212    }
  49.213  }
  49.214  #endif // PRODUCT
    50.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Apr 19 12:18:46 2012 -0700
    50.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Apr 20 16:23:48 2012 -0700
    50.3 @@ -1,5 +1,5 @@
    50.4  /*
    50.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    50.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    50.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    50.8   *
    50.9   * This code is free software; you can redistribute it and/or modify it
   50.10 @@ -29,8 +29,6 @@
   50.11  class HeapRegionClosure;
   50.12  class FreeRegionList;
   50.13  
   50.14 -#define G1_NULL_HRS_INDEX ((size_t) -1)
   50.15 -
   50.16  // This class keeps track of the region metadata (i.e., HeapRegion
   50.17  // instances). They are kept in the _regions array in address
   50.18  // order. A region's index in the array corresponds to its index in
   50.19 @@ -65,7 +63,7 @@
   50.20    HeapRegion** _regions_biased;
   50.21  
   50.22    // The number of regions committed in the heap.
   50.23 -  size_t _length;
   50.24 +  uint _length;
   50.25  
   50.26    // The address of the first reserved word in the heap.
   50.27    HeapWord* _heap_bottom;
   50.28 @@ -74,32 +72,32 @@
   50.29    HeapWord* _heap_end;
   50.30  
   50.31    // The log of the region byte size.
   50.32 -  size_t _region_shift;
   50.33 +  uint _region_shift;
   50.34  
   50.35    // A hint for which index to start searching from for humongous
   50.36    // allocations.
   50.37 -  size_t _next_search_index;
   50.38 +  uint _next_search_index;
   50.39  
   50.40    // The number of regions for which we have allocated HeapRegions for.
   50.41 -  size_t _allocated_length;
   50.42 +  uint _allocated_length;
   50.43  
   50.44    // The maximum number of regions in the heap.
   50.45 -  size_t _max_length;
   50.46 +  uint _max_length;
   50.47  
   50.48    // Find a contiguous set of empty regions of length num, starting
   50.49    // from the given index.
   50.50 -  size_t find_contiguous_from(size_t from, size_t num);
   50.51 +  uint find_contiguous_from(uint from, uint num);
   50.52  
   50.53    // Map a heap address to a biased region index. Assume that the
   50.54    // address is valid.
   50.55 -  inline size_t addr_to_index_biased(HeapWord* addr) const;
   50.56 +  inline uintx addr_to_index_biased(HeapWord* addr) const;
   50.57  
   50.58 -  void increment_length(size_t* length) {
   50.59 +  void increment_length(uint* length) {
   50.60      assert(*length < _max_length, "pre-condition");
   50.61      *length += 1;
   50.62    }
   50.63  
   50.64 -  void decrement_length(size_t* length) {
   50.65 +  void decrement_length(uint* length) {
   50.66      assert(*length > 0, "pre-condition");
   50.67      *length -= 1;
   50.68    }
   50.69 @@ -108,11 +106,11 @@
   50.70    // Empty contructor, we'll initialize it with the initialize() method.
   50.71    HeapRegionSeq() { }
   50.72  
   50.73 -  void initialize(HeapWord* bottom, HeapWord* end, size_t max_length);
   50.74 +  void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
   50.75  
   50.76    // Return the HeapRegion at the given index. Assume that the index
   50.77    // is valid.
   50.78 -  inline HeapRegion* at(size_t index) const;
   50.79 +  inline HeapRegion* at(uint index) const;
   50.80  
   50.81    // If addr is within the committed space return its corresponding
   50.82    // HeapRegion, otherwise return NULL.
   50.83 @@ -123,10 +121,10 @@
   50.84    inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
   50.85  
   50.86    // Return the number of regions that have been committed in the heap.
   50.87 -  size_t length() const { return _length; }
   50.88 +  uint length() const { return _length; }
   50.89  
   50.90    // Return the maximum number of regions in the heap.
   50.91 -  size_t max_length() const { return _max_length; }
   50.92 +  uint max_length() const { return _max_length; }
   50.93  
   50.94    // Expand the sequence to reflect that the heap has grown from
   50.95    // old_end to new_end. Either create new HeapRegions, or re-use
   50.96 @@ -139,12 +137,12 @@
   50.97  
   50.98    // Return the number of contiguous regions at the end of the sequence
   50.99    // that are available for allocation.
  50.100 -  size_t free_suffix();
  50.101 +  uint free_suffix();
  50.102  
  50.103    // Find a contiguous set of empty regions of length num and return
  50.104    // the index of the first region or G1_NULL_HRS_INDEX if the
  50.105    // search was unsuccessful.
  50.106 -  size_t find_contiguous(size_t num);
  50.107 +  uint find_contiguous(uint num);
  50.108  
  50.109    // Apply blk->doHeapRegion() on all committed regions in address order,
  50.110    // terminating the iteration early if doHeapRegion() returns true.
  50.111 @@ -159,7 +157,7 @@
  50.112    // sequence. Return a MemRegion that corresponds to the address
  50.113    // range of the uncommitted regions. Assume shrink_bytes is page and
  50.114    // heap region aligned.
  50.115 -  MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted);
  50.116 +  MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted);
  50.117  
  50.118    // Do some sanity checking.
  50.119    void verify_optional() PRODUCT_RETURN;
    51.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Thu Apr 19 12:18:46 2012 -0700
    51.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Fri Apr 20 16:23:48 2012 -0700
    51.3 @@ -1,5 +1,5 @@
    51.4  /*
    51.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    51.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    51.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    51.8   *
    51.9   * This code is free software; you can redistribute it and/or modify it
   51.10 @@ -28,11 +28,11 @@
   51.11  #include "gc_implementation/g1/heapRegion.hpp"
   51.12  #include "gc_implementation/g1/heapRegionSeq.hpp"
   51.13  
   51.14 -inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
   51.15 +inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
   51.16    assert(_heap_bottom <= addr && addr < _heap_end,
   51.17           err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
   51.18                   addr, _heap_bottom, _heap_end));
   51.19 -  size_t index = (size_t) addr >> _region_shift;
   51.20 +  uintx index = (uintx) addr >> _region_shift;
   51.21    return index;
   51.22  }
   51.23  
   51.24 @@ -40,7 +40,7 @@
   51.25    assert(_heap_bottom <= addr && addr < _heap_end,
   51.26           err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
   51.27                   addr, _heap_bottom, _heap_end));
   51.28 -  size_t index_biased = addr_to_index_biased(addr);
   51.29 +  uintx index_biased = addr_to_index_biased(addr);
   51.30    HeapRegion* hr = _regions_biased[index_biased];
   51.31    assert(hr != NULL, "invariant");
   51.32    return hr;
   51.33 @@ -55,7 +55,7 @@
   51.34    return NULL;
   51.35  }
   51.36  
   51.37 -inline HeapRegion* HeapRegionSeq::at(size_t index) const {
   51.38 +inline HeapRegion* HeapRegionSeq::at(uint index) const {
   51.39    assert(index < length(), "pre-condition");
   51.40    HeapRegion* hr = _regions[index];
   51.41    assert(hr != NULL, "sanity");
    52.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Thu Apr 19 12:18:46 2012 -0700
    52.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Fri Apr 20 16:23:48 2012 -0700
    52.3 @@ -1,5 +1,5 @@
    52.4  /*
    52.5 - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    52.6 + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    52.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    52.8   *
    52.9   * This code is free software; you can redistribute it and/or modify it
   52.10 @@ -25,28 +25,26 @@
   52.11  #include "precompiled.hpp"
   52.12  #include "gc_implementation/g1/heapRegionSet.inline.hpp"
   52.13  
   52.14 -size_t HeapRegionSetBase::_unrealistically_long_length = 0;
   52.15 +uint HeapRegionSetBase::_unrealistically_long_length = 0;
   52.16  HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone;
   52.17  
   52.18  //////////////////// HeapRegionSetBase ////////////////////
   52.19  
   52.20 -void HeapRegionSetBase::set_unrealistically_long_length(size_t len) {
   52.21 +void HeapRegionSetBase::set_unrealistically_long_length(uint len) {
   52.22    guarantee(_unrealistically_long_length == 0, "should only be set once");
   52.23    _unrealistically_long_length = len;
   52.24  }
   52.25  
   52.26 -size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
   52.27 +uint HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
   52.28    assert(hr->startsHumongous(), "pre-condition");
   52.29    assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
   52.30 -  size_t region_num = hr->capacity() >> HeapRegion::LogOfHRGrainBytes;
   52.31 +  uint region_num = (uint) (hr->capacity() >> HeapRegion::LogOfHRGrainBytes);
   52.32    assert(region_num > 0, "sanity");
   52.33    return region_num;
   52.34  }
   52.35  
   52.36  void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
   52.37 -  msg->append("[%s] %s "
   52.38 -              "ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
   52.39 -              "cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
   52.40 +  msg->append("[%s] %s ln: %u rn: %u cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
   52.41                name(), message, length(), region_num(),
   52.42                total_capacity_bytes(), total_used_bytes());
   52.43    fill_in_ext_msg_extra(msg);
   52.44 @@ -170,13 +168,11 @@
   52.45           hrs_ext_msg(this, "verification should be in progress"));
   52.46  
   52.47    guarantee(length() == _calc_length,
   52.48 -            hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == "
   52.49 -                        "calc length: "SIZE_FORMAT,
   52.50 +            hrs_err_msg("[%s] length: %u should be == calc length: %u",
   52.51                          name(), length(), _calc_length));
   52.52  
   52.53    guarantee(region_num() == _calc_region_num,
   52.54 -            hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
   52.55 -                        "calc region num: "SIZE_FORMAT,
   52.56 +            hrs_err_msg("[%s] region num: %u should be == calc region num: %u",
   52.57                          name(), region_num(), _calc_region_num));
   52.58  
   52.59    guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
   52.60 @@ -211,8 +207,8 @@
   52.61    out->print_cr("    humongous         : %s", BOOL_TO_STR(regions_humongous()));
   52.62    out->print_cr("    empty             : %s", BOOL_TO_STR(regions_empty()));
   52.63    out->print_cr("  Attributes");
   52.64 -  out->print_cr("    length            : "SIZE_FORMAT_W(14), length());
   52.65 -  out->print_cr("    region num        : "SIZE_FORMAT_W(14), region_num());
   52.66 +  out->print_cr("    length            : %14u", length());
   52.67 +  out->print_cr("    region num        : %14u", region_num());
   52.68    out->print_cr("    total capacity    : "SIZE_FORMAT_W(14)" bytes",
   52.69                  total_capacity_bytes());
   52.70    out->print_cr("    total used        : "SIZE_FORMAT_W(14)" bytes",
   52.71 @@ -243,14 +239,12 @@
   52.72    if (proxy_set->is_empty()) return;
   52.73  
   52.74    assert(proxy_set->length() <= _length,
   52.75 -         hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
   52.76 -                     "should be <= length: "SIZE_FORMAT,
   52.77 +         hrs_err_msg("[%s] proxy set length: %u should be <= length: %u",
   52.78                       name(), proxy_set->length(), _length));
   52.79    _length -= proxy_set->length();
   52.80  
   52.81    assert(proxy_set->region_num() <= _region_num,
   52.82 -         hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
   52.83 -                     "should be <= region num: "SIZE_FORMAT,
   52.84 +         hrs_err_msg("[%s] proxy set region num: %u should be <= region num: %u",
   52.85                       name(), proxy_set->region_num(), _region_num));
   52.86    _region_num -= proxy_set->region_num();
   52.87  
   52.88 @@ -369,17 +363,17 @@
   52.89    verify_optional();
   52.90  }
   52.91  
   52.92 -void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
   52.93 +void HeapRegionLinkedList::remove_all_pending(uint target_count) {
   52.94    hrs_assert_mt_safety_ok(this);
   52.95    assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
   52.96    assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
   52.97  
   52.98    verify_optional();
   52.99 -  DEBUG_ONLY(size_t old_length = length();)
  52.100 +  DEBUG_ONLY(uint old_length = length();)
  52.101  
  52.102    HeapRegion* curr = _head;
  52.103    HeapRegion* prev = NULL;
  52.104 -  size_t count = 0;
  52.105 +  uint count = 0;
  52.106    while (curr != NULL) {
  52.107      hrs_assert_region_ok(this, curr, this);
  52.108      HeapRegion* next = curr->next();
  52.109 @@ -387,7 +381,7 @@
  52.110      if (curr->pending_removal()) {
  52.111        assert(count < target_count,
  52.112               hrs_err_msg("[%s] should not come across more regions "
  52.113 -                         "pending for removal than target_count: "SIZE_FORMAT,
  52.114 +                         "pending for removal than target_count: %u",
  52.115                           name(), target_count));
  52.116  
  52.117        if (prev == NULL) {
  52.118 @@ -422,12 +416,11 @@
  52.119    }
  52.120  
  52.121    assert(count == target_count,
  52.122 -         hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == "
  52.123 -                     "target_count: "SIZE_FORMAT, name(), count, target_count));
  52.124 +         hrs_err_msg("[%s] count: %u should be == target_count: %u",
  52.125 +                     name(), count, target_count));
  52.126    assert(length() + target_count == old_length,
  52.127           hrs_err_msg("[%s] new length should be consistent "
  52.128 -                     "new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
  52.129 -                     "target_count: "SIZE_FORMAT,
  52.130 +                     "new length: %u old length: %u target_count: %u",
  52.131                       name(), length(), old_length, target_count));
  52.132  
  52.133    verify_optional();
  52.134 @@ -444,16 +437,16 @@
  52.135    HeapRegion* curr  = _head;
  52.136    HeapRegion* prev1 = NULL;
  52.137    HeapRegion* prev0 = NULL;
  52.138 -  size_t      count = 0;
  52.139 +  uint        count = 0;
  52.140    while (curr != NULL) {
  52.141      verify_next_region(curr);
  52.142  
  52.143      count += 1;
  52.144      guarantee(count < _unrealistically_long_length,
  52.145 -              hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
  52.146 +              hrs_err_msg("[%s] the calculated length: %u "
  52.147                            "seems very long, is there maybe a cycle? "
  52.148                            "curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
  52.149 -                          "prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
  52.150 +                          "prev1: "PTR_FORMAT" length: %u",
  52.151                            name(), count, curr, prev0, prev1, length()));
  52.152  
  52.153      prev1 = prev0;
    53.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Thu Apr 19 12:18:46 2012 -0700
    53.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Fri Apr 20 16:23:48 2012 -0700
    53.3 @@ -62,20 +62,20 @@
    53.4    friend class VMStructs;
    53.5  
    53.6  protected:
    53.7 -  static size_t calculate_region_num(HeapRegion* hr);
    53.8 +  static uint calculate_region_num(HeapRegion* hr);
    53.9  
   53.10 -  static size_t _unrealistically_long_length;
   53.11 +  static uint _unrealistically_long_length;
   53.12  
   53.13    // The number of regions added to the set. If the set contains
   53.14    // only humongous regions, this reflects only 'starts humongous'
   53.15    // regions and does not include 'continues humongous' ones.
   53.16 -  size_t _length;
   53.17 +  uint _length;
   53.18  
   53.19    // The total number of regions represented by the set. If the set
   53.20    // does not contain humongous regions, this should be the same as
   53.21    // _length. If the set contains only humongous regions, this will
   53.22    // include the 'continues humongous' regions.
   53.23 -  size_t _region_num;
   53.24 +  uint _region_num;
   53.25  
   53.26    // We don't keep track of the total capacity explicitly, we instead
   53.27    // recalculate it based on _region_num and the heap region size.
   53.28 @@ -86,8 +86,8 @@
   53.29    const char* _name;
   53.30  
   53.31    bool        _verify_in_progress;
   53.32 -  size_t      _calc_length;
   53.33 -  size_t      _calc_region_num;
   53.34 +  uint        _calc_length;
   53.35 +  uint        _calc_region_num;
   53.36    size_t      _calc_total_capacity_bytes;
   53.37    size_t      _calc_total_used_bytes;
   53.38  
   53.39 @@ -153,18 +153,18 @@
   53.40    HeapRegionSetBase(const char* name);
   53.41  
   53.42  public:
   53.43 -  static void set_unrealistically_long_length(size_t len);
   53.44 +  static void set_unrealistically_long_length(uint len);
   53.45  
   53.46    const char* name() { return _name; }
   53.47  
   53.48 -  size_t length() { return _length; }
   53.49 +  uint length() { return _length; }
   53.50  
   53.51    bool is_empty() { return _length == 0; }
   53.52  
   53.53 -  size_t region_num() { return _region_num; }
   53.54 +  uint region_num() { return _region_num; }
   53.55  
   53.56    size_t total_capacity_bytes() {
   53.57 -    return region_num() << HeapRegion::LogOfHRGrainBytes;
   53.58 +    return (size_t) region_num() << HeapRegion::LogOfHRGrainBytes;
   53.59    }
   53.60  
   53.61    size_t total_used_bytes() { return _total_used_bytes; }
   53.62 @@ -341,7 +341,7 @@
   53.63    // of regions that are pending for removal in the list, and
   53.64    // target_count should be > 1 (currently, we never need to remove a
   53.65    // single region using this).
   53.66 -  void remove_all_pending(size_t target_count);
   53.67 +  void remove_all_pending(uint target_count);
   53.68  
   53.69    virtual void verify();
   53.70  
    54.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Thu Apr 19 12:18:46 2012 -0700
    54.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Fri Apr 20 16:23:48 2012 -0700
    54.3 @@ -1,5 +1,5 @@
    54.4  /*
    54.5 - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    54.6 + * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    54.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    54.8   *
    54.9   * This code is free software; you can redistribute it and/or modify it
   54.10 @@ -54,15 +54,15 @@
   54.11    assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
   54.12    _length -= 1;
   54.13  
   54.14 -  size_t region_num_diff;
   54.15 +  uint region_num_diff;
   54.16    if (!hr->isHumongous()) {
   54.17      region_num_diff = 1;
   54.18    } else {
   54.19      region_num_diff = calculate_region_num(hr);
   54.20    }
   54.21    assert(region_num_diff <= _region_num,
   54.22 -         hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" "
   54.23 -                     "should be <= region num: "SIZE_FORMAT,
   54.24 +         hrs_err_msg("[%s] region's region num: %u "
   54.25 +                     "should be <= region num: %u",
   54.26                       name(), region_num_diff, _region_num));
   54.27    _region_num -= region_num_diff;
   54.28  
    55.1 --- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu Apr 19 12:18:46 2012 -0700
    55.2 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Fri Apr 20 16:23:48 2012 -0700
    55.3 @@ -1,5 +1,5 @@
    55.4  /*
    55.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    55.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    55.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    55.8   *
    55.9   * This code is free software; you can redistribute it and/or modify it
   55.10 @@ -481,8 +481,7 @@
   55.11  
   55.12  bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
   55.13  #if SPARSE_PRT_VERBOSE
   55.14 -  gclog_or_tty->print_cr("  Adding card %d from region %d to region "
   55.15 -                         SIZE_FORMAT" sparse.",
   55.16 +  gclog_or_tty->print_cr("  Adding card %d from region %d to region %u sparse.",
   55.17                           card_index, region_id, _hr->hrs_index());
   55.18  #endif
   55.19    if (_next->occupied_entries() * 2 > _next->capacity()) {
   55.20 @@ -534,7 +533,7 @@
   55.21    _next = new RSHashTable(last->capacity() * 2);
   55.22  
   55.23  #if SPARSE_PRT_VERBOSE
   55.24 -  gclog_or_tty->print_cr("  Expanded sparse table for "SIZE_FORMAT" to %d.",
   55.25 +  gclog_or_tty->print_cr("  Expanded sparse table for %u to %d.",
   55.26                           _hr->hrs_index(), _next->capacity());
   55.27  #endif
   55.28    for (size_t i = 0; i < last->capacity(); i++) {
    56.1 --- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Thu Apr 19 12:18:46 2012 -0700
    56.2 +++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Apr 20 16:23:48 2012 -0700
    56.3 @@ -34,7 +34,7 @@
    56.4    static_field(HeapRegion, GrainBytes, size_t)                                \
    56.5                                                                                \
    56.6    nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
    56.7 -  nonstatic_field(HeapRegionSeq,   _length,  size_t)                          \
    56.8 +  nonstatic_field(HeapRegionSeq,   _length,  uint)                            \
    56.9                                                                                \
   56.10    nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
   56.11    nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
   56.12 @@ -50,8 +50,8 @@
   56.13    nonstatic_field(G1MonitoringSupport, _old_committed,      size_t)           \
   56.14    nonstatic_field(G1MonitoringSupport, _old_used,           size_t)           \
   56.15                                                                                \
   56.16 -  nonstatic_field(HeapRegionSetBase,   _length,             size_t)           \
   56.17 -  nonstatic_field(HeapRegionSetBase,   _region_num,         size_t)           \
   56.18 +  nonstatic_field(HeapRegionSetBase,   _length,             uint)             \
   56.19 +  nonstatic_field(HeapRegionSetBase,   _region_num,         uint)             \
   56.20    nonstatic_field(HeapRegionSetBase,   _total_used_bytes,   size_t)           \
   56.21  
   56.22  
    57.1 --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Apr 19 12:18:46 2012 -0700
    57.2 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Apr 20 16:23:48 2012 -0700
    57.3 @@ -26,6 +26,7 @@
    57.4  #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    57.5  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    57.6  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    57.7 +#include "gc_implementation/g1/g1Log.hpp"
    57.8  #include "gc_implementation/g1/vm_operations_g1.hpp"
    57.9  #include "gc_implementation/shared/isGCActiveMark.hpp"
   57.10  #include "gc_implementation/g1/vm_operations_g1.hpp"
   57.11 @@ -223,9 +224,9 @@
   57.12  }
   57.13  
   57.14  void VM_CGC_Operation::doit() {
   57.15 -  gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   57.16 -  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   57.17 -  TraceTime t(_printGCMessage, PrintGC, true, gclog_or_tty);
   57.18 +  gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
   57.19 +  TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
   57.20 +  TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty);
   57.21    SharedHeap* sh = SharedHeap::heap();
   57.22    // This could go away if CollectedHeap gave access to _gc_is_active...
   57.23    if (sh != NULL) {
    58.1 --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Thu Apr 19 12:18:46 2012 -0700
    58.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Apr 20 16:23:48 2012 -0700
    58.3 @@ -42,7 +42,7 @@
    58.4  
    58.5   protected:
    58.6    template <class T> void do_oop_work(T* p) {
    58.7 -    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
    58.8 +    oop obj = oopDesc::load_decode_heap_oop(p);
    58.9      if (_young_gen->is_in_reserved(obj) &&
   58.10          !_card_table->addr_is_marked_imprecise(p)) {
   58.11        // Don't overwrite the first missing card mark
    59.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Apr 19 12:18:46 2012 -0700
    59.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Apr 20 16:23:48 2012 -0700
    59.3 @@ -911,23 +911,23 @@
    59.4  }
    59.5  
    59.6  
    59.7 -void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
    59.8 +void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
    59.9    // Why do we need the total_collections()-filter below?
   59.10    if (total_collections() > 0) {
   59.11      if (!silent) {
   59.12        gclog_or_tty->print("permanent ");
   59.13      }
   59.14 -    perm_gen()->verify(allow_dirty);
   59.15 +    perm_gen()->verify();
   59.16  
   59.17      if (!silent) {
   59.18        gclog_or_tty->print("tenured ");
   59.19      }
   59.20 -    old_gen()->verify(allow_dirty);
   59.21 +    old_gen()->verify();
   59.22  
   59.23      if (!silent) {
   59.24        gclog_or_tty->print("eden ");
   59.25      }
   59.26 -    young_gen()->verify(allow_dirty);
   59.27 +    young_gen()->verify();
   59.28    }
   59.29  }
   59.30  
    60.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu Apr 19 12:18:46 2012 -0700
    60.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri Apr 20 16:23:48 2012 -0700
    60.3 @@ -257,7 +257,7 @@
    60.4    virtual void gc_threads_do(ThreadClosure* tc) const;
    60.5    virtual void print_tracing_info() const;
    60.6  
    60.7 -  void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */);
    60.8 +  void verify(bool silent, VerifyOption option /* ignored */);
    60.9  
   60.10    void print_heap_change(size_t prev_used);
   60.11  
    61.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Thu Apr 19 12:18:46 2012 -0700
    61.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Fri Apr 20 16:23:48 2012 -0700
    61.3 @@ -1,5 +1,5 @@
    61.4  /*
    61.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    61.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    61.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    61.8   *
    61.9   * This code is free software; you can redistribute it and/or modify it
   61.10 @@ -477,8 +477,8 @@
   61.11  }
   61.12  #endif
   61.13  
   61.14 -void PSOldGen::verify(bool allow_dirty) {
   61.15 -  object_space()->verify(allow_dirty);
   61.16 +void PSOldGen::verify() {
   61.17 +  object_space()->verify();
   61.18  }
   61.19  class VerifyObjectStartArrayClosure : public ObjectClosure {
   61.20    PSOldGen* _gen;
    62.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Thu Apr 19 12:18:46 2012 -0700
    62.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Fri Apr 20 16:23:48 2012 -0700
    62.3 @@ -1,5 +1,5 @@
    62.4  /*
    62.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    62.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    62.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    62.8   *
    62.9   * This code is free software; you can redistribute it and/or modify it
   62.10 @@ -174,7 +174,7 @@
   62.11    virtual void print_on(outputStream* st) const;
   62.12    void print_used_change(size_t prev_used) const;
   62.13  
   62.14 -  void verify(bool allow_dirty);
   62.15 +  void verify();
   62.16    void verify_object_start_array();
   62.17  
   62.18    // These should not used
    63.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Thu Apr 19 12:18:46 2012 -0700
    63.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Fri Apr 20 16:23:48 2012 -0700
    63.3 @@ -1,5 +1,5 @@
    63.4  /*
    63.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    63.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    63.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    63.8   *
    63.9   * This code is free software; you can redistribute it and/or modify it
   63.10 @@ -937,10 +937,10 @@
   63.11    }
   63.12  }
   63.13  
   63.14 -void PSYoungGen::verify(bool allow_dirty) {
   63.15 -  eden_space()->verify(allow_dirty);
   63.16 -  from_space()->verify(allow_dirty);
   63.17 -  to_space()->verify(allow_dirty);
   63.18 +void PSYoungGen::verify() {
   63.19 +  eden_space()->verify();
   63.20 +  from_space()->verify();
   63.21 +  to_space()->verify();
   63.22  }
   63.23  
   63.24  #ifndef PRODUCT
    64.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Thu Apr 19 12:18:46 2012 -0700
    64.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Fri Apr 20 16:23:48 2012 -0700
    64.3 @@ -1,5 +1,5 @@
    64.4  /*
    64.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    64.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    64.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    64.8   *
    64.9   * This code is free software; you can redistribute it and/or modify it
   64.10 @@ -181,7 +181,7 @@
   64.11    void print_used_change(size_t prev_used) const;
   64.12    virtual const char* name() const { return "PSYoungGen"; }
   64.13  
   64.14 -  void verify(bool allow_dirty);
   64.15 +  void verify();
   64.16  
   64.17    // Space boundary invariant checker
   64.18    void space_invariants() PRODUCT_RETURN;
    65.1 --- a/src/share/vm/gc_implementation/shared/immutableSpace.cpp	Thu Apr 19 12:18:46 2012 -0700
    65.2 +++ b/src/share/vm/gc_implementation/shared/immutableSpace.cpp	Fri Apr 20 16:23:48 2012 -0700
    65.3 @@ -1,5 +1,5 @@
    65.4  /*
    65.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    65.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    65.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    65.8   *
    65.9   * This code is free software; you can redistribute it and/or modify it
   65.10 @@ -70,7 +70,7 @@
   65.11  
   65.12  #endif
   65.13  
   65.14 -void ImmutableSpace::verify(bool allow_dirty) {
   65.15 +void ImmutableSpace::verify() {
   65.16    HeapWord* p = bottom();
   65.17    HeapWord* t = end();
   65.18    HeapWord* prev_p = NULL;
    66.1 --- a/src/share/vm/gc_implementation/shared/immutableSpace.hpp	Thu Apr 19 12:18:46 2012 -0700
    66.2 +++ b/src/share/vm/gc_implementation/shared/immutableSpace.hpp	Fri Apr 20 16:23:48 2012 -0700
    66.3 @@ -1,5 +1,5 @@
    66.4  /*
    66.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    66.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    66.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    66.8   *
    66.9   * This code is free software; you can redistribute it and/or modify it
   66.10 @@ -65,7 +65,7 @@
   66.11    // Debugging
   66.12    virtual void print() const            PRODUCT_RETURN;
   66.13    virtual void print_short() const      PRODUCT_RETURN;
   66.14 -  virtual void verify(bool allow_dirty);
   66.15 +  virtual void verify();
   66.16  };
   66.17  
   66.18  #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_IMMUTABLESPACE_HPP
    67.1 --- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Thu Apr 19 12:18:46 2012 -0700
    67.2 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Fri Apr 20 16:23:48 2012 -0700
    67.3 @@ -891,12 +891,12 @@
    67.4    }
    67.5  }
    67.6  
    67.7 -void MutableNUMASpace::verify(bool allow_dirty) {
    67.8 +void MutableNUMASpace::verify() {
    67.9    // This can be called after setting an arbitary value to the space's top,
   67.10    // so an object can cross the chunk boundary. We ensure the parsablity
   67.11    // of the space and just walk the objects in linear fashion.
   67.12    ensure_parsability();
   67.13 -  MutableSpace::verify(allow_dirty);
   67.14 +  MutableSpace::verify();
   67.15  }
   67.16  
   67.17  // Scan pages and gather stats about page placement and size.
    68.1 --- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Thu Apr 19 12:18:46 2012 -0700
    68.2 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Fri Apr 20 16:23:48 2012 -0700
    68.3 @@ -1,5 +1,5 @@
    68.4  /*
    68.5 - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
    68.6 + * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
    68.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    68.8   *
    68.9   * This code is free software; you can redistribute it and/or modify it
   68.10 @@ -225,7 +225,7 @@
   68.11    // Debugging
   68.12    virtual void print_on(outputStream* st) const;
   68.13    virtual void print_short_on(outputStream* st) const;
   68.14 -  virtual void verify(bool allow_dirty);
   68.15 +  virtual void verify();
   68.16  
   68.17    virtual void set_top(HeapWord* value);
   68.18  };
    69.1 --- a/src/share/vm/gc_implementation/shared/mutableSpace.cpp	Thu Apr 19 12:18:46 2012 -0700
    69.2 +++ b/src/share/vm/gc_implementation/shared/mutableSpace.cpp	Fri Apr 20 16:23:48 2012 -0700
    69.3 @@ -1,5 +1,5 @@
    69.4  /*
    69.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    69.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    69.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    69.8   *
    69.9   * This code is free software; you can redistribute it and/or modify it
   69.10 @@ -246,7 +246,7 @@
   69.11                   bottom(), top(), end());
   69.12  }
   69.13  
   69.14 -void MutableSpace::verify(bool allow_dirty) {
   69.15 +void MutableSpace::verify() {
   69.16    HeapWord* p = bottom();
   69.17    HeapWord* t = top();
   69.18    HeapWord* prev_p = NULL;
    70.1 --- a/src/share/vm/gc_implementation/shared/mutableSpace.hpp	Thu Apr 19 12:18:46 2012 -0700
    70.2 +++ b/src/share/vm/gc_implementation/shared/mutableSpace.hpp	Fri Apr 20 16:23:48 2012 -0700
    70.3 @@ -1,5 +1,5 @@
    70.4  /*
    70.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    70.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    70.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    70.8   *
    70.9   * This code is free software; you can redistribute it and/or modify it
   70.10 @@ -141,7 +141,7 @@
   70.11    virtual void print_on(outputStream* st) const;
   70.12    virtual void print_short() const;
   70.13    virtual void print_short_on(outputStream* st) const;
   70.14 -  virtual void verify(bool allow_dirty);
   70.15 +  virtual void verify();
   70.16  };
   70.17  
   70.18  #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLESPACE_HPP
    71.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp	Thu Apr 19 12:18:46 2012 -0700
    71.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp	Fri Apr 20 16:23:48 2012 -0700
    71.3 @@ -659,7 +659,7 @@
    71.4    }
    71.5  
    71.6    // Heap verification
    71.7 -  virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
    71.8 +  virtual void verify(bool silent, VerifyOption option) = 0;
    71.9  
   71.10    // Non product verification and debugging.
   71.11  #ifndef PRODUCT
    72.1 --- a/src/share/vm/memory/compactingPermGenGen.cpp	Thu Apr 19 12:18:46 2012 -0700
    72.2 +++ b/src/share/vm/memory/compactingPermGenGen.cpp	Fri Apr 20 16:23:48 2012 -0700
    72.3 @@ -444,11 +444,11 @@
    72.4  }
    72.5  
    72.6  
    72.7 -void CompactingPermGenGen::verify(bool allow_dirty) {
    72.8 -  the_space()->verify(allow_dirty);
    72.9 +void CompactingPermGenGen::verify() {
   72.10 +  the_space()->verify();
   72.11    if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
   72.12 -    ro_space()->verify(allow_dirty);
   72.13 -    rw_space()->verify(allow_dirty);
   72.14 +    ro_space()->verify();
   72.15 +    rw_space()->verify();
   72.16    }
   72.17  }
   72.18  
    73.1 --- a/src/share/vm/memory/compactingPermGenGen.hpp	Thu Apr 19 12:18:46 2012 -0700
    73.2 +++ b/src/share/vm/memory/compactingPermGenGen.hpp	Fri Apr 20 16:23:48 2012 -0700
    73.3 @@ -1,5 +1,5 @@
    73.4  /*
    73.5 - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
    73.6 + * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    73.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.8   *
    73.9   * This code is free software; you can redistribute it and/or modify it
   73.10 @@ -230,7 +230,7 @@
   73.11                                        void* new_vtable_start,
   73.12                                        void* obj);
   73.13  
   73.14 -  void verify(bool allow_dirty);
   73.15 +  void verify();
   73.16  
   73.17    // Serialization
   73.18    static void initialize_oops() KERNEL_RETURN;
    74.1 --- a/src/share/vm/memory/defNewGeneration.cpp	Thu Apr 19 12:18:46 2012 -0700
    74.2 +++ b/src/share/vm/memory/defNewGeneration.cpp	Fri Apr 20 16:23:48 2012 -0700
    74.3 @@ -939,10 +939,10 @@
    74.4    }
    74.5  }
    74.6  
    74.7 -void DefNewGeneration::verify(bool allow_dirty) {
    74.8 -  eden()->verify(allow_dirty);
    74.9 -  from()->verify(allow_dirty);
   74.10 -    to()->verify(allow_dirty);
   74.11 +void DefNewGeneration::verify() {
   74.12 +  eden()->verify();
   74.13 +  from()->verify();
   74.14 +    to()->verify();
   74.15  }
   74.16  
   74.17  void DefNewGeneration::print_on(outputStream* st) const {
    75.1 --- a/src/share/vm/memory/defNewGeneration.hpp	Thu Apr 19 12:18:46 2012 -0700
    75.2 +++ b/src/share/vm/memory/defNewGeneration.hpp	Fri Apr 20 16:23:48 2012 -0700
    75.3 @@ -1,5 +1,5 @@
    75.4  /*
    75.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    75.6 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    75.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    75.8   *
    75.9   * This code is free software; you can redistribute it and/or modify it
   75.10 @@ -340,7 +340,7 @@
   75.11    // PrintHeapAtGC support.
   75.12    void print_on(outputStream* st) const;
   75.13  
   75.14 -  void verify(bool allow_dirty);
   75.15 +  void verify();
   75.16  
   75.17    bool promo_failure_scan_is_complete() const {
   75.18      return _promo_failure_scan_stack.is_empty();
    76.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Thu Apr 19 12:18:46 2012 -0700
    76.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Fri Apr 20 16:23:48 2012 -0700
    76.3 @@ -1247,18 +1247,18 @@
    76.4    return _gens[level]->gc_stats();
    76.5  }
    76.6  
    76.7 -void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
    76.8 +void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
    76.9    if (!silent) {
   76.10      gclog_or_tty->print("permgen ");
   76.11    }
   76.12 -  perm_gen()->verify(allow_dirty);
   76.13 +  perm_gen()->verify();
   76.14    for (int i = _n_gens-1; i >= 0; i--) {
   76.15      Generation* g = _gens[i];
   76.16      if (!silent) {
   76.17        gclog_or_tty->print(g->name());
   76.18        gclog_or_tty->print(" ");
   76.19      }
   76.20 -    g->verify(allow_dirty);
   76.21 +    g->verify();
   76.22    }
   76.23    if (!silent) {
   76.24      gclog_or_tty->print("remset ");
    77.1 --- a/src/share/vm/memory/genCollectedHeap.hpp	Thu Apr 19 12:18:46 2012 -0700
    77.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp	Fri Apr 20 16:23:48 2012 -0700
    77.3 @@ -1,5 +1,5 @@
    77.4  /*
    77.5 - * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
    77.6 + * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
    77.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    77.8   *
    77.9   * This code is free software; you can redistribute it and/or modify it
   77.10 @@ -357,7 +357,7 @@
   77.11    void prepare_for_verify();
   77.12  
   77.13    // Override.
   77.14 -  void verify(bool allow_dirty, bool silent, VerifyOption option);
   77.15 +  void verify(bool silent, VerifyOption option);
   77.16  
   77.17    // Override.
   77.18    virtual void print_on(outputStream* st) const;
    78.1 --- a/src/share/vm/memory/generation.cpp	Thu Apr 19 12:18:46 2012 -0700
    78.2 +++ b/src/share/vm/memory/generation.cpp	Fri Apr 20 16:23:48 2012 -0700
    78.3 @@ -1,5 +1,5 @@
    78.4  /*
    78.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    78.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    78.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    78.8   *
    78.9   * This code is free software; you can redistribute it and/or modify it
   78.10 @@ -696,8 +696,8 @@
   78.11    the_space()->set_top_for_allocations();
   78.12  }
   78.13  
   78.14 -void OneContigSpaceCardGeneration::verify(bool allow_dirty) {
   78.15 -  the_space()->verify(allow_dirty);
   78.16 +void OneContigSpaceCardGeneration::verify() {
   78.17 +  the_space()->verify();
   78.18  }
   78.19  
   78.20  void OneContigSpaceCardGeneration::print_on(outputStream* st)  const {
    79.1 --- a/src/share/vm/memory/generation.hpp	Thu Apr 19 12:18:46 2012 -0700
    79.2 +++ b/src/share/vm/memory/generation.hpp	Fri Apr 20 16:23:48 2012 -0700
    79.3 @@ -1,5 +1,5 @@
    79.4  /*
    79.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    79.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    79.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    79.8   *
    79.9   * This code is free software; you can redistribute it and/or modify it
   79.10 @@ -599,7 +599,7 @@
   79.11    virtual void print() const;
   79.12    virtual void print_on(outputStream* st) const;
   79.13  
   79.14 -  virtual void verify(bool allow_dirty) = 0;
   79.15 +  virtual void verify() = 0;
   79.16  
   79.17    struct StatRecord {
   79.18      int invocations;
   79.19 @@ -753,7 +753,7 @@
   79.20  
   79.21    virtual void record_spaces_top();
   79.22  
   79.23 -  virtual void verify(bool allow_dirty);
   79.24 +  virtual void verify();
   79.25    virtual void print_on(outputStream* st) const;
   79.26  };
   79.27  
    80.1 --- a/src/share/vm/memory/oopFactory.cpp	Thu Apr 19 12:18:46 2012 -0700
    80.2 +++ b/src/share/vm/memory/oopFactory.cpp	Fri Apr 20 16:23:48 2012 -0700
    80.3 @@ -1,5 +1,5 @@
    80.4  /*
    80.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    80.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    80.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    80.8   *
    80.9   * This code is free software; you can redistribute it and/or modify it
   80.10 @@ -127,9 +127,12 @@
   80.11  klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_len,
   80.12                                         int static_field_size,
   80.13                                         unsigned int nonstatic_oop_map_count,
   80.14 +                                       AccessFlags access_flags,
   80.15                                         ReferenceType rt, TRAPS) {
   80.16    instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj());
   80.17 -  return ikk->allocate_instance_klass(name, vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL);
   80.18 +  return ikk->allocate_instance_klass(name, vtable_len, itable_len,
   80.19 +                                      static_field_size, nonstatic_oop_map_count,
   80.20 +                                      access_flags, rt, CHECK_NULL);
   80.21  }
   80.22  
   80.23  
    81.1 --- a/src/share/vm/memory/oopFactory.hpp	Thu Apr 19 12:18:46 2012 -0700
    81.2 +++ b/src/share/vm/memory/oopFactory.hpp	Fri Apr 20 16:23:48 2012 -0700
    81.3 @@ -1,5 +1,5 @@
    81.4  /*
    81.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    81.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    81.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    81.8   *
    81.9   * This code is free software; you can redistribute it and/or modify it
   81.10 @@ -77,6 +77,7 @@
   81.11                                             int vtable_len, int itable_len,
   81.12                                             int static_field_size,
   81.13                                             unsigned int nonstatic_oop_map_count,
   81.14 +                                           AccessFlags access_flags,
   81.15                                             ReferenceType rt, TRAPS);
   81.16  
   81.17    // Methods
    82.1 --- a/src/share/vm/memory/space.cpp	Thu Apr 19 12:18:46 2012 -0700
    82.2 +++ b/src/share/vm/memory/space.cpp	Fri Apr 20 16:23:48 2012 -0700
    82.3 @@ -1,5 +1,5 @@
    82.4  /*
    82.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    82.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    82.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    82.8   *
    82.9   * This code is free software; you can redistribute it and/or modify it
   82.10 @@ -531,7 +531,7 @@
   82.11                bottom(), top(), _offsets.threshold(), end());
   82.12  }
   82.13  
   82.14 -void ContiguousSpace::verify(bool allow_dirty) const {
   82.15 +void ContiguousSpace::verify() const {
   82.16    HeapWord* p = bottom();
   82.17    HeapWord* t = top();
   82.18    HeapWord* prev_p = NULL;
   82.19 @@ -965,27 +965,12 @@
   82.20    initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
   82.21  }
   82.22  
   82.23 -
   82.24 -class VerifyOldOopClosure : public OopClosure {
   82.25 - public:
   82.26 -  oop  _the_obj;
   82.27 -  bool _allow_dirty;
   82.28 -  void do_oop(oop* p) {
   82.29 -    _the_obj->verify_old_oop(p, _allow_dirty);
   82.30 -  }
   82.31 -  void do_oop(narrowOop* p) {
   82.32 -    _the_obj->verify_old_oop(p, _allow_dirty);
   82.33 -  }
   82.34 -};
   82.35 -
   82.36  #define OBJ_SAMPLE_INTERVAL 0
   82.37  #define BLOCK_SAMPLE_INTERVAL 100
   82.38  
   82.39 -void OffsetTableContigSpace::verify(bool allow_dirty) const {
   82.40 +void OffsetTableContigSpace::verify() const {
   82.41    HeapWord* p = bottom();
   82.42    HeapWord* prev_p = NULL;
   82.43 -  VerifyOldOopClosure blk;      // Does this do anything?
   82.44 -  blk._allow_dirty = allow_dirty;
   82.45    int objs = 0;
   82.46    int blocks = 0;
   82.47  
   82.48 @@ -1007,8 +992,6 @@
   82.49  
   82.50      if (objs == OBJ_SAMPLE_INTERVAL) {
   82.51        oop(p)->verify();
   82.52 -      blk._the_obj = oop(p);
   82.53 -      oop(p)->oop_iterate(&blk);
   82.54        objs = 0;
   82.55      } else {
   82.56        objs++;
    83.1 --- a/src/share/vm/memory/space.hpp	Thu Apr 19 12:18:46 2012 -0700
    83.2 +++ b/src/share/vm/memory/space.hpp	Fri Apr 20 16:23:48 2012 -0700
    83.3 @@ -1,5 +1,5 @@
    83.4  /*
    83.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    83.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    83.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    83.8   *
    83.9   * This code is free software; you can redistribute it and/or modify it
   83.10 @@ -306,7 +306,7 @@
   83.11    }
   83.12  
   83.13    // Debugging
   83.14 -  virtual void verify(bool allow_dirty) const = 0;
   83.15 +  virtual void verify() const = 0;
   83.16  };
   83.17  
   83.18  // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
   83.19 @@ -948,7 +948,7 @@
   83.20    }
   83.21  
   83.22    // Debugging
   83.23 -  virtual void verify(bool allow_dirty) const;
   83.24 +  virtual void verify() const;
   83.25  
   83.26    // Used to increase collection frequency.  "factor" of 0 means entire
   83.27    // space.
   83.28 @@ -1100,7 +1100,7 @@
   83.29    virtual void print_on(outputStream* st) const;
   83.30  
   83.31    // Debugging
   83.32 -  void verify(bool allow_dirty) const;
   83.33 +  void verify() const;
   83.34  
   83.35    // Shared space support
   83.36    void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
    84.1 --- a/src/share/vm/memory/universe.cpp	Thu Apr 19 12:18:46 2012 -0700
    84.2 +++ b/src/share/vm/memory/universe.cpp	Fri Apr 20 16:23:48 2012 -0700
    84.3 @@ -1,5 +1,5 @@
    84.4  /*
    84.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    84.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    84.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    84.8   *
    84.9   * This code is free software; you can redistribute it and/or modify it
   84.10 @@ -1326,7 +1326,7 @@
   84.11    st->print_cr("}");
   84.12  }
   84.13  
   84.14 -void Universe::verify(bool allow_dirty, bool silent, VerifyOption option) {
   84.15 +void Universe::verify(bool silent, VerifyOption option) {
   84.16    if (SharedSkipVerify) {
   84.17      return;
   84.18    }
   84.19 @@ -1350,7 +1350,7 @@
   84.20    if (!silent) gclog_or_tty->print("[Verifying ");
   84.21    if (!silent) gclog_or_tty->print("threads ");
   84.22    Threads::verify();
   84.23 -  heap()->verify(allow_dirty, silent, option);
   84.24 +  heap()->verify(silent, option);
   84.25  
   84.26    if (!silent) gclog_or_tty->print("syms ");
   84.27    SymbolTable::verify();
    85.1 --- a/src/share/vm/memory/universe.hpp	Thu Apr 19 12:18:46 2012 -0700
    85.2 +++ b/src/share/vm/memory/universe.hpp	Fri Apr 20 16:23:48 2012 -0700
    85.3 @@ -1,5 +1,5 @@
    85.4  /*
    85.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    85.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    85.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    85.8   *
    85.9   * This code is free software; you can redistribute it and/or modify it
   85.10 @@ -412,7 +412,7 @@
   85.11  
   85.12    // Debugging
   85.13    static bool verify_in_progress() { return _verify_in_progress; }
   85.14 -  static void verify(bool allow_dirty = true, bool silent = false,
   85.15 +  static void verify(bool silent = false,
   85.16                       VerifyOption option = VerifyOption_Default );
   85.17    static int  verify_count()       { return _verify_count; }
   85.18    // The default behavior is to call print_on() on gclog_or_tty.
    86.1 --- a/src/share/vm/oops/instanceKlass.cpp	Thu Apr 19 12:18:46 2012 -0700
    86.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Fri Apr 20 16:23:48 2012 -0700
    86.3 @@ -567,8 +567,18 @@
    86.4    ol.notify_all(CHECK);
    86.5  }
    86.6  
    86.7 +// The embedded _implementor field can only record one implementor.
    86.8 +// When there are more than one implementors, the _implementor field
    86.9 +// is set to the interface klassOop itself. Following are the possible
   86.10 +// values for the _implementor field:
   86.11 +//   NULL                  - no implementor
   86.12 +//   implementor klassOop  - one implementor
   86.13 +//   self                  - more than one implementor
   86.14 +//
   86.15 +// The _implementor field only exists for interfaces.
   86.16  void instanceKlass::add_implementor(klassOop k) {
   86.17    assert(Compile_lock->owned_by_self(), "");
   86.18 +  assert(is_interface(), "not interface");
   86.19    // Filter out my subinterfaces.
   86.20    // (Note: Interfaces are never on the subklass list.)
   86.21    if (instanceKlass::cast(k)->is_interface()) return;
   86.22 @@ -583,17 +593,13 @@
   86.23      // Any supers of the super have the same (or fewer) transitive_interfaces.
   86.24      return;
   86.25  
   86.26 -  // Update number of implementors
   86.27 -  int i = _nof_implementors++;
   86.28 -
   86.29 -  // Record this implementor, if there are not too many already
   86.30 -  if (i < implementors_limit) {
   86.31 -    assert(_implementors[i] == NULL, "should be exactly one implementor");
   86.32 -    oop_store_without_check((oop*)&_implementors[i], k);
   86.33 -  } else if (i == implementors_limit) {
   86.34 -    // clear out the list on first overflow
   86.35 -    for (int i2 = 0; i2 < implementors_limit; i2++)
   86.36 -      oop_store_without_check((oop*)&_implementors[i2], NULL);
   86.37 +  klassOop ik = implementor();
   86.38 +  if (ik == NULL) {
   86.39 +    set_implementor(k);
   86.40 +  } else if (ik != this->as_klassOop()) {
   86.41 +    // There is already an implementor. Use itself as an indicator of
   86.42 +    // more than one implementors.
   86.43 +    set_implementor(this->as_klassOop());
   86.44    }
   86.45  
   86.46    // The implementor also implements the transitive_interfaces
   86.47 @@ -603,9 +609,9 @@
   86.48  }
   86.49  
   86.50  void instanceKlass::init_implementor() {
   86.51 -  for (int i = 0; i < implementors_limit; i++)
   86.52 -    oop_store_without_check((oop*)&_implementors[i], NULL);
   86.53 -  _nof_implementors = 0;
   86.54 +  if (is_interface()) {
   86.55 +    set_implementor(NULL);
   86.56 +  }
   86.57  }
   86.58  
   86.59  
   86.60 @@ -1849,24 +1855,22 @@
   86.61  void instanceKlass::follow_weak_klass_links(
   86.62    BoolObjectClosure* is_alive, OopClosure* keep_alive) {
   86.63    assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
   86.64 -  if (ClassUnloading) {
   86.65 -    for (int i = 0; i < implementors_limit; i++) {
   86.66 -      klassOop impl = _implementors[i];
   86.67 -      if (impl == NULL)  break;  // no more in the list
   86.68 -      if (!is_alive->do_object_b(impl)) {
   86.69 -        // remove this guy from the list by overwriting him with the tail
   86.70 -        int lasti = --_nof_implementors;
   86.71 -        assert(lasti >= i && lasti < implementors_limit, "just checking");
   86.72 -        _implementors[i] = _implementors[lasti];
   86.73 -        _implementors[lasti] = NULL;
   86.74 -        --i; // rerun the loop at this index
   86.75 +
   86.76 +  if (is_interface()) {
   86.77 +    if (ClassUnloading) {
   86.78 +      klassOop impl = implementor();
   86.79 +      if (impl != NULL) {
   86.80 +        if (!is_alive->do_object_b(impl)) {
   86.81 +          // remove this guy
   86.82 +          *start_of_implementor() = NULL;
   86.83 +        }
   86.84        }
   86.85 -    }
   86.86 -  } else {
   86.87 -    for (int i = 0; i < implementors_limit; i++) {
   86.88 -      keep_alive->do_oop(&adr_implementors()[i]);
   86.89 +    } else {
   86.90 +      assert(adr_implementor() != NULL, "just checking");
   86.91 +      keep_alive->do_oop(adr_implementor());
   86.92      }
   86.93    }
   86.94 +
   86.95    Klass::follow_weak_klass_links(is_alive, keep_alive);
   86.96  }
   86.97  
    87.1 --- a/src/share/vm/oops/instanceKlass.hpp	Thu Apr 19 12:18:46 2012 -0700
    87.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Fri Apr 20 16:23:48 2012 -0700
    87.3 @@ -1,5 +1,5 @@
    87.4  /*
    87.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    87.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    87.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    87.8   *
    87.9   * This code is free software; you can redistribute it and/or modify it
   87.10 @@ -56,8 +56,6 @@
   87.11  //    [methods                    ]
   87.12  //    [local interfaces           ]
   87.13  //    [transitive interfaces      ]
   87.14 -//    [number of implementors     ]
   87.15 -//    [implementors               ] klassOop[2]
   87.16  //    [fields                     ]
   87.17  //    [constants                  ]
   87.18  //    [class loader               ]
   87.19 @@ -77,9 +75,9 @@
   87.20  //    [oop map cache (stack maps) ]
   87.21  //    [EMBEDDED Java vtable             ] size in words = vtable_len
   87.22  //    [EMBEDDED nonstatic oop-map blocks] size in words = nonstatic_oop_map_size
   87.23 -//
   87.24 -//    The embedded nonstatic oop-map blocks are short pairs (offset, length) indicating
   87.25 -//    where oops are located in instances of this klass.
   87.26 +//      The embedded nonstatic oop-map blocks are short pairs (offset, length)
   87.27 +//      indicating where oops are located in instances of this klass.
   87.28 +//    [EMBEDDED implementor of the interface] only exist for interface
   87.29  
   87.30  
   87.31  // forward declaration for class -- see below for definition
   87.32 @@ -153,10 +151,6 @@
   87.33    oop* oop_block_beg() const { return adr_array_klasses(); }
   87.34    oop* oop_block_end() const { return adr_methods_default_annotations() + 1; }
   87.35  
   87.36 -  enum {
   87.37 -    implementors_limit = 2              // how many implems can we track?
   87.38 -  };
   87.39 -
   87.40   protected:
   87.41    //
   87.42    // The oop block.  See comment in klass.hpp before making changes.
   87.43 @@ -200,8 +194,6 @@
   87.44    // and EnclosingMethod attributes the _inner_classes array length is
   87.45    // number_of_inner_classes * 4 + enclosing_method_attribute_size.
   87.46    typeArrayOop    _inner_classes;
   87.47 -  // Implementors of this interface (not valid if it overflows)
   87.48 -  klassOop        _implementors[implementors_limit];
   87.49    // Annotations for this class, or null if none.
   87.50    typeArrayOop    _class_annotations;
   87.51    // Annotation objects (byte arrays) for fields, or null if no annotations.
   87.52 @@ -257,7 +249,6 @@
   87.53    nmethodBucket*  _dependencies;         // list of dependent nmethods
   87.54    nmethod*        _osr_nmethods_head;    // Head of list of on-stack replacement nmethods for this class
   87.55    BreakpointInfo* _breakpoints;          // bpt lists, managed by methodOop
   87.56 -  int             _nof_implementors;     // No of implementors of this interface (zero if not an interface)
   87.57    // Array of interesting part(s) of the previous version(s) of this
   87.58    // instanceKlass. See PreviousVersionWalker below.
   87.59    GrowableArray<PreviousVersionNode *>* _previous_versions;
   87.60 @@ -278,6 +269,13 @@
   87.61    // embedded Java itables follows here
   87.62    // embedded static fields follows here
   87.63    // embedded nonstatic oop-map blocks follows here
   87.64 +  // embedded implementor of this interface follows here
   87.65 +  //   The embedded implementor only exists if the current klass is an
   87.66 +  //   iterface. The possible values of the implementor fall into following
   87.67 +  //   three cases:
   87.68 +  //     NULL: no implementor.
   87.69 +  //     A klassOop that's not itself: one implementor.
   87.70 +  //     Itsef: more than one implementors.
   87.71  
   87.72    friend class instanceKlassKlass;
   87.73    friend class SystemDictionary;
   87.74 @@ -644,19 +642,40 @@
   87.75  
   87.76    // support for stub routines
   87.77    static ByteSize init_state_offset()  { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_state)); }
   87.78 +  TRACE_DEFINE_OFFSET;
   87.79    static ByteSize init_thread_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _init_thread)); }
   87.80  
   87.81    // subclass/subinterface checks
   87.82    bool implements_interface(klassOop k) const;
   87.83  
   87.84 -  // Access to implementors of an interface. We only store the count
   87.85 -  // of implementors, and in case, there are only a few
   87.86 -  // implementors, we store them in a short list.
   87.87 -  // This accessor returns NULL if we walk off the end of the list.
   87.88 -  klassOop implementor(int i) const {
   87.89 -    return (i < implementors_limit)? _implementors[i]: (klassOop) NULL;
   87.90 +  // Access to the implementor of an interface.
   87.91 +  klassOop implementor() const
   87.92 +  {
   87.93 +    klassOop* k = start_of_implementor();
   87.94 +    if (k == NULL) {
   87.95 +      return NULL;
   87.96 +    } else {
   87.97 +      return *k;
   87.98 +    }
   87.99    }
  87.100 -  int  nof_implementors() const       { return _nof_implementors; }
  87.101 +
  87.102 +  void set_implementor(klassOop k) {
  87.103 +    assert(is_interface(), "not interface");
  87.104 +    oop* addr = (oop*)start_of_implementor();
  87.105 +    oop_store_without_check(addr, k);
  87.106 +  }
  87.107 +
  87.108 +  int  nof_implementors() const       {
  87.109 +    klassOop k = implementor();
  87.110 +    if (k == NULL) {
  87.111 +      return 0;
  87.112 +    } else if (k != this->as_klassOop()) {
  87.113 +      return 1;
  87.114 +    } else {
  87.115 +      return 2;
  87.116 +    }
  87.117 +  }
  87.118 +
  87.119    void add_implementor(klassOop k);  // k is a new class that implements this interface
  87.120    void init_implementor();           // initialize
  87.121  
  87.122 @@ -693,7 +712,15 @@
  87.123  
  87.124    // Sizing (in words)
  87.125    static int header_size()            { return align_object_offset(oopDesc::header_size() + sizeof(instanceKlass)/HeapWordSize); }
  87.126 -  int object_size() const             { return object_size(align_object_offset(vtable_length()) + align_object_offset(itable_length()) + nonstatic_oop_map_size()); }
  87.127 +
  87.128 +  int object_size() const
  87.129 +  {
  87.130 +    return object_size(align_object_offset(vtable_length()) +
  87.131 +                       align_object_offset(itable_length()) +
  87.132 +                       (is_interface() ?
  87.133 +                        (align_object_offset(nonstatic_oop_map_size()) + (int)sizeof(klassOop)/HeapWordSize) :
  87.134 +                        nonstatic_oop_map_size()));
  87.135 +  }
  87.136    static int vtable_start_offset()    { return header_size(); }
  87.137    static int vtable_length_offset()   { return oopDesc::header_size() + offset_of(instanceKlass, _vtable_len) / HeapWordSize; }
  87.138    static int object_size(int extra)   { return align_object_size(header_size() + extra); }
  87.139 @@ -710,6 +737,15 @@
  87.140      return (OopMapBlock*)(start_of_itable() + align_object_offset(itable_length()));
  87.141    }
  87.142  
  87.143 +  klassOop* start_of_implementor() const {
  87.144 +    if (is_interface()) {
  87.145 +      return (klassOop*)(start_of_nonstatic_oop_maps() +
  87.146 +                         nonstatic_oop_map_count());
  87.147 +    } else {
  87.148 +      return NULL;
  87.149 +    }
  87.150 +  };
  87.151 +
  87.152    // Allocation profiling support
  87.153    juint alloc_size() const            { return _alloc_count * size_helper(); }
  87.154    void set_alloc_size(juint n)        {}
  87.155 @@ -819,7 +855,7 @@
  87.156    oop* adr_host_klass() const        { return (oop*)&this->_host_klass;}
  87.157    oop* adr_signers() const           { return (oop*)&this->_signers;}
  87.158    oop* adr_inner_classes() const     { return (oop*)&this->_inner_classes;}
  87.159 -  oop* adr_implementors() const      { return (oop*)&this->_implementors[0];}
  87.160 +  oop* adr_implementor() const       { return (oop*)start_of_implementor(); }
  87.161    oop* adr_methods_jmethod_ids() const             { return (oop*)&this->_methods_jmethod_ids;}
  87.162    oop* adr_methods_cached_itable_indices() const   { return (oop*)&this->_methods_cached_itable_indices;}
  87.163    oop* adr_class_annotations() const   { return (oop*)&this->_class_annotations;}
    88.1 --- a/src/share/vm/oops/instanceKlassKlass.cpp	Thu Apr 19 12:18:46 2012 -0700
    88.2 +++ b/src/share/vm/oops/instanceKlassKlass.cpp	Fri Apr 20 16:23:48 2012 -0700
    88.3 @@ -1,5 +1,5 @@
    88.4  /*
    88.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    88.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    88.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    88.8   *
    88.9   * This code is free software; you can redistribute it and/or modify it
   88.10 @@ -111,7 +111,7 @@
   88.11    MarkSweep::mark_and_push(ik->adr_methods_parameter_annotations());
   88.12    MarkSweep::mark_and_push(ik->adr_methods_default_annotations());
   88.13  
   88.14 -  // We do not follow adr_implementors() here. It is followed later
   88.15 +  // We do not follow adr_implementor() here. It is followed later
   88.16    // in instanceKlass::follow_weak_klass_links()
   88.17  
   88.18    klassKlass::oop_follow_contents(obj);
   88.19 @@ -180,8 +180,8 @@
   88.20    blk->do_oop(ik->adr_host_klass());
   88.21    blk->do_oop(ik->adr_signers());
   88.22    blk->do_oop(ik->adr_inner_classes());
   88.23 -  for (int i = 0; i < instanceKlass::implementors_limit; i++) {
   88.24 -    blk->do_oop(&ik->adr_implementors()[i]);
   88.25 +  if (ik->is_interface()) {
   88.26 +    blk->do_oop(ik->adr_implementor());
   88.27    }
   88.28    blk->do_oop(ik->adr_class_annotations());
   88.29    blk->do_oop(ik->adr_fields_annotations());
   88.30 @@ -232,9 +232,9 @@
   88.31    if (mr.contains(adr)) blk->do_oop(adr);
   88.32    adr = ik->adr_inner_classes();
   88.33    if (mr.contains(adr)) blk->do_oop(adr);
   88.34 -  adr = ik->adr_implementors();
   88.35 -  for (int i = 0; i < instanceKlass::implementors_limit; i++) {
   88.36 -    if (mr.contains(&adr[i])) blk->do_oop(&adr[i]);
   88.37 +  if (ik->is_interface()) {
   88.38 +    adr = ik->adr_implementor();
   88.39 +    if (mr.contains(adr)) blk->do_oop(adr);
   88.40    }
   88.41    adr = ik->adr_class_annotations();
   88.42    if (mr.contains(adr)) blk->do_oop(adr);
   88.43 @@ -273,8 +273,8 @@
   88.44    MarkSweep::adjust_pointer(ik->adr_host_klass());
   88.45    MarkSweep::adjust_pointer(ik->adr_signers());
   88.46    MarkSweep::adjust_pointer(ik->adr_inner_classes());
   88.47 -  for (int i = 0; i < instanceKlass::implementors_limit; i++) {
   88.48 -    MarkSweep::adjust_pointer(&ik->adr_implementors()[i]);
   88.49 +  if (ik->is_interface()) {
   88.50 +    MarkSweep::adjust_pointer(ik->adr_implementor());
   88.51    }
   88.52    MarkSweep::adjust_pointer(ik->adr_class_annotations());
   88.53    MarkSweep::adjust_pointer(ik->adr_fields_annotations());
   88.54 @@ -328,6 +328,9 @@
   88.55    for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
   88.56      PSParallelCompact::adjust_pointer(cur_oop);
   88.57    }
   88.58 +  if (ik->is_interface()) {
   88.59 +    PSParallelCompact::adjust_pointer(ik->adr_implementor());
   88.60 +  }
   88.61  
   88.62    OopClosure* closure = PSParallelCompact::adjust_root_pointer_closure();
   88.63    iterate_c_heap_oops(ik, closure);
   88.64 @@ -342,11 +345,18 @@
   88.65  instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int itable_len,
   88.66                                              int static_field_size,
   88.67                                              unsigned nonstatic_oop_map_count,
   88.68 +                                            AccessFlags access_flags,
   88.69                                              ReferenceType rt, TRAPS) {
   88.70  
   88.71    const int nonstatic_oop_map_size =
   88.72      instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count);
   88.73 -  int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + nonstatic_oop_map_size);
   88.74 +  int size = align_object_offset(vtable_len) + align_object_offset(itable_len);
   88.75 +  if (access_flags.is_interface()) {
   88.76 +    size += align_object_offset(nonstatic_oop_map_size) + (int)sizeof(klassOop)/HeapWordSize;
   88.77 +  } else {
   88.78 +    size += nonstatic_oop_map_size;
   88.79 +  }
   88.80 +  size = instanceKlass::object_size(size);
   88.81  
   88.82    // Allocation
   88.83    KlassHandle h_this_klass(THREAD, as_klassOop());
   88.84 @@ -378,6 +388,7 @@
   88.85      ik->set_itable_length(itable_len);
   88.86      ik->set_static_field_size(static_field_size);
   88.87      ik->set_nonstatic_oop_map_size(nonstatic_oop_map_size);
   88.88 +    ik->set_access_flags(access_flags);
   88.89      assert(k()->size() == size, "wrong size for object");
   88.90  
   88.91      ik->set_array_klasses(NULL);
   88.92 @@ -470,16 +481,12 @@
   88.93  
   88.94    if (ik->is_interface()) {
   88.95      st->print_cr(BULLET"nof implementors:  %d", ik->nof_implementors());
   88.96 -    int print_impl = 0;
   88.97 -    for (int i = 0; i < instanceKlass::implementors_limit; i++) {
   88.98 -      if (ik->implementor(i) != NULL) {
   88.99 -        if (++print_impl == 1)
  88.100 -          st->print_cr(BULLET"implementor:    ");
  88.101 -        st->print("   ");
  88.102 -        ik->implementor(i)->print_value_on(st);
  88.103 -      }
  88.104 +    if (ik->nof_implementors() == 1) {
  88.105 +      st->print_cr(BULLET"implementor:    ");
  88.106 +      st->print("   ");
  88.107 +      ik->implementor()->print_value_on(st);
  88.108 +      st->cr();
  88.109      }
  88.110 -    if (print_impl > 0)  st->cr();
  88.111    }
  88.112  
  88.113    st->print(BULLET"arrays:            "); ik->array_klasses()->print_value_on(st);     st->cr();
  88.114 @@ -640,16 +647,12 @@
  88.115      }
  88.116  
  88.117      // Verify implementor fields
  88.118 -    bool saw_null_impl = false;
  88.119 -    for (int i = 0; i < instanceKlass::implementors_limit; i++) {
  88.120 -      klassOop im = ik->implementor(i);
  88.121 -      if (im == NULL) { saw_null_impl = true; continue; }
  88.122 -      guarantee(!saw_null_impl, "non-nulls must preceded all nulls");
  88.123 +    klassOop im = ik->implementor();
  88.124 +    if (im != NULL) {
  88.125        guarantee(ik->is_interface(), "only interfaces should have implementor set");
  88.126 -      guarantee(i < ik->nof_implementors(), "should only have one implementor");
  88.127        guarantee(im->is_perm(),  "should be in permspace");
  88.128        guarantee(im->is_klass(), "should be klass");
  88.129 -      guarantee(!Klass::cast(klassOop(im))->is_interface(), "implementors cannot be interfaces");
  88.130 +      guarantee(!Klass::cast(klassOop(im))->is_interface() || im == ik->as_klassOop(), "implementors cannot be interfaces");
  88.131      }
  88.132  
  88.133      // Verify local interfaces
    89.1 --- a/src/share/vm/oops/instanceKlassKlass.hpp	Thu Apr 19 12:18:46 2012 -0700
    89.2 +++ b/src/share/vm/oops/instanceKlassKlass.hpp	Fri Apr 20 16:23:48 2012 -0700
    89.3 @@ -1,5 +1,5 @@
    89.4  /*
    89.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    89.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    89.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    89.8   *
    89.9   * This code is free software; you can redistribute it and/or modify it
   89.10 @@ -46,6 +46,7 @@
   89.11                                     int itable_len,
   89.12                                     int static_field_size,
   89.13                                     unsigned int nonstatic_oop_map_count,
   89.14 +                                   AccessFlags access_flags,
   89.15                                     ReferenceType rt,
   89.16                                     TRAPS);
   89.17  
    90.1 --- a/src/share/vm/oops/instanceRefKlass.cpp	Thu Apr 19 12:18:46 2012 -0700
    90.2 +++ b/src/share/vm/oops/instanceRefKlass.cpp	Fri Apr 20 16:23:48 2012 -0700
    90.3 @@ -1,5 +1,5 @@
    90.4  /*
    90.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    90.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    90.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    90.8   *
    90.9   * This code is free software; you can redistribute it and/or modify it
   90.10 @@ -497,36 +497,12 @@
   90.11  
   90.12    if (referent != NULL) {
   90.13      guarantee(referent->is_oop(), "referent field heap failed");
   90.14 -    if (gch != NULL && !gch->is_in_young(obj)) {
   90.15 -      // We do a specific remembered set check here since the referent
   90.16 -      // field is not part of the oop mask and therefore skipped by the
   90.17 -      // regular verify code.
   90.18 -      if (UseCompressedOops) {
   90.19 -        narrowOop* referent_addr = (narrowOop*)java_lang_ref_Reference::referent_addr(obj);
   90.20 -        obj->verify_old_oop(referent_addr, true);
   90.21 -      } else {
   90.22 -        oop* referent_addr = (oop*)java_lang_ref_Reference::referent_addr(obj);
   90.23 -        obj->verify_old_oop(referent_addr, true);
   90.24 -      }
   90.25 -    }
   90.26    }
   90.27    // Verify next field
   90.28    oop next = java_lang_ref_Reference::next(obj);
   90.29    if (next != NULL) {
   90.30      guarantee(next->is_oop(), "next field verify failed");
   90.31      guarantee(next->is_instanceRef(), "next field verify failed");
   90.32 -    if (gch != NULL && !gch->is_in_young(obj)) {
   90.33 -      // We do a specific remembered set check here since the next field is
   90.34 -      // not part of the oop mask and therefore skipped by the regular
   90.35 -      // verify code.
   90.36 -      if (UseCompressedOops) {
   90.37 -        narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
   90.38 -        obj->verify_old_oop(next_addr, true);
   90.39 -      } else {
   90.40 -        oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
   90.41 -        obj->verify_old_oop(next_addr, true);
   90.42 -      }
   90.43 -    }
   90.44    }
   90.45  }
   90.46  
    91.1 --- a/src/share/vm/oops/klass.cpp	Thu Apr 19 12:18:46 2012 -0700
    91.2 +++ b/src/share/vm/oops/klass.cpp	Fri Apr 20 16:23:48 2012 -0700
    91.3 @@ -581,14 +581,6 @@
    91.4    guarantee(obj->klass()->is_klass(), "klass field is not a klass");
    91.5  }
    91.6  
    91.7 -
    91.8 -void Klass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) {
    91.9 -  /* $$$ I think this functionality should be handled by verification of
   91.10 -  RememberedSet::verify_old_oop(obj, p, allow_dirty, false);
   91.11 -  the card table. */
   91.12 -}
   91.13 -void Klass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) { }
   91.14 -
   91.15  #ifndef PRODUCT
   91.16  
   91.17  void Klass::verify_vtable_index(int i) {
    92.1 --- a/src/share/vm/oops/klass.hpp	Thu Apr 19 12:18:46 2012 -0700
    92.2 +++ b/src/share/vm/oops/klass.hpp	Fri Apr 20 16:23:48 2012 -0700
    92.3 @@ -805,8 +805,6 @@
    92.4    // Verification
    92.5    virtual const char* internal_name() const = 0;
    92.6    virtual void oop_verify_on(oop obj, outputStream* st);
    92.7 -  virtual void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
    92.8 -  virtual void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
    92.9    // tells whether obj is partially constructed (gc during class loading)
   92.10    virtual bool oop_partially_loaded(oop obj) const { return false; }
   92.11    virtual void oop_set_partially_loaded(oop obj) {};
    93.1 --- a/src/share/vm/oops/objArrayKlass.cpp	Thu Apr 19 12:18:46 2012 -0700
    93.2 +++ b/src/share/vm/oops/objArrayKlass.cpp	Fri Apr 20 16:23:48 2012 -0700
    93.3 @@ -545,10 +545,3 @@
    93.4      guarantee(oa->obj_at(index)->is_oop_or_null(), "should be oop");
    93.5    }
    93.6  }
    93.7 -
    93.8 -void objArrayKlass::oop_verify_old_oop(oop obj, oop* p, bool allow_dirty) {
    93.9 -  /* $$$ move into remembered set verification?
   93.10 -  RememberedSet::verify_old_oop(obj, p, allow_dirty, true);
   93.11 -  */
   93.12 -}
   93.13 -void objArrayKlass::oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty) {}
    94.1 --- a/src/share/vm/oops/objArrayKlass.hpp	Thu Apr 19 12:18:46 2012 -0700
    94.2 +++ b/src/share/vm/oops/objArrayKlass.hpp	Fri Apr 20 16:23:48 2012 -0700
    94.3 @@ -1,5 +1,5 @@
    94.4  /*
    94.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    94.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    94.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    94.8   *
    94.9   * This code is free software; you can redistribute it and/or modify it
   94.10 @@ -144,8 +144,6 @@
   94.11    // Verification
   94.12    const char* internal_name() const;
   94.13    void oop_verify_on(oop obj, outputStream* st);
   94.14 -  void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
   94.15 -  void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
   94.16  };
   94.17  
   94.18  #endif // SHARE_VM_OOPS_OBJARRAYKLASS_HPP
    95.1 --- a/src/share/vm/oops/oop.cpp	Thu Apr 19 12:18:46 2012 -0700
    95.2 +++ b/src/share/vm/oops/oop.cpp	Fri Apr 20 16:23:48 2012 -0700
    95.3 @@ -1,5 +1,5 @@
    95.4  /*
    95.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    95.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    95.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    95.8   *
    95.9   * This code is free software; you can redistribute it and/or modify it
   95.10 @@ -107,16 +107,6 @@
   95.11    verify_on(tty);
   95.12  }
   95.13  
   95.14 -
   95.15 -// XXX verify_old_oop doesn't do anything (should we remove?)
   95.16 -void oopDesc::verify_old_oop(oop* p, bool allow_dirty) {
   95.17 -  blueprint()->oop_verify_old_oop(this, p, allow_dirty);
   95.18 -}
   95.19 -
   95.20 -void oopDesc::verify_old_oop(narrowOop* p, bool allow_dirty) {
   95.21 -  blueprint()->oop_verify_old_oop(this, p, allow_dirty);
   95.22 -}
   95.23 -
   95.24  bool oopDesc::partially_loaded() {
   95.25    return blueprint()->oop_partially_loaded(this);
   95.26  }
    96.1 --- a/src/share/vm/oops/oop.hpp	Thu Apr 19 12:18:46 2012 -0700
    96.2 +++ b/src/share/vm/oops/oop.hpp	Fri Apr 20 16:23:48 2012 -0700
    96.3 @@ -1,5 +1,5 @@
    96.4  /*
    96.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
    96.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    96.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    96.8   *
    96.9   * This code is free software; you can redistribute it and/or modify it
   96.10 @@ -293,8 +293,6 @@
   96.11    // verification operations
   96.12    void verify_on(outputStream* st);
   96.13    void verify();
   96.14 -  void verify_old_oop(oop* p, bool allow_dirty);
   96.15 -  void verify_old_oop(narrowOop* p, bool allow_dirty);
   96.16  
   96.17    // tells whether this oop is partially constructed (gc during class loading)
   96.18    bool partially_loaded();
    97.1 --- a/src/share/vm/opto/library_call.cpp	Thu Apr 19 12:18:46 2012 -0700
    97.2 +++ b/src/share/vm/opto/library_call.cpp	Fri Apr 20 16:23:48 2012 -0700
    97.3 @@ -1,5 +1,5 @@
    97.4  /*
    97.5 - * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
    97.6 + * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    97.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    97.8   *
    97.9   * This code is free software; you can redistribute it and/or modify it
   97.10 @@ -175,7 +175,11 @@
   97.11    bool inline_unsafe_allocate();
   97.12    bool inline_unsafe_copyMemory();
   97.13    bool inline_native_currentThread();
   97.14 -  bool inline_native_time_funcs(bool isNano);
   97.15 +#ifdef TRACE_HAVE_INTRINSICS
   97.16 +  bool inline_native_classID();
   97.17 +  bool inline_native_threadID();
   97.18 +#endif
   97.19 +  bool inline_native_time_funcs(address method, const char* funcName);
   97.20    bool inline_native_isInterrupted();
   97.21    bool inline_native_Class_query(vmIntrinsics::ID id);
   97.22    bool inline_native_subtype_check();
   97.23 @@ -638,10 +642,18 @@
   97.24    case vmIntrinsics::_isInterrupted:
   97.25      return inline_native_isInterrupted();
   97.26  
   97.27 +#ifdef TRACE_HAVE_INTRINSICS
   97.28 +  case vmIntrinsics::_classID:
   97.29 +    return inline_native_classID();
   97.30 +  case vmIntrinsics::_threadID:
   97.31 +    return inline_native_threadID();
   97.32 +  case vmIntrinsics::_counterTime:
   97.33 +    return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
   97.34 +#endif
   97.35    case vmIntrinsics::_currentTimeMillis:
   97.36 -    return inline_native_time_funcs(false);
   97.37 +    return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
   97.38    case vmIntrinsics::_nanoTime:
   97.39 -    return inline_native_time_funcs(true);
   97.40 +    return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
   97.41    case vmIntrinsics::_allocateInstance:
   97.42      return inline_unsafe_allocate();
   97.43    case vmIntrinsics::_copyMemory:
   97.44 @@ -2840,14 +2852,63 @@
   97.45    return true;
   97.46  }
   97.47  
   97.48 +#ifdef TRACE_HAVE_INTRINSICS
   97.49 +/*
   97.50 + * oop -> myklass
   97.51 + * myklass->trace_id |= USED
   97.52 + * return myklass->trace_id & ~0x3
   97.53 + */
   97.54 +bool LibraryCallKit::inline_native_classID() {
   97.55 +  int nargs = 1 + 1;
   97.56 +  null_check_receiver(callee());  // check then ignore argument(0)
   97.57 +  _sp += nargs;
   97.58 +  Node* cls = do_null_check(argument(1), T_OBJECT);
   97.59 +  _sp -= nargs;
   97.60 +  Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0);
   97.61 +  _sp += nargs;
   97.62 +  kls = do_null_check(kls, T_OBJECT);
   97.63 +  _sp -= nargs;
   97.64 +  ByteSize offset = TRACE_ID_OFFSET;
   97.65 +  Node* insp = basic_plus_adr(kls, in_bytes(offset));
   97.66 +  Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG);
   97.67 +  Node* bits = longcon(~0x03l); // ignore bit 0 & 1
   97.68 +  Node* andl = _gvn.transform(new (C, 3) AndLNode(tvalue, bits));
   97.69 +  Node* clsused = longcon(0x01l); // set the class bit
   97.70 +  Node* orl = _gvn.transform(new (C, 3) OrLNode(tvalue, clsused));
   97.71 +
   97.72 +  const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
   97.73 +  store_to_memory(control(), insp, orl, T_LONG, adr_type);
   97.74 +  push_pair(andl);
   97.75 +  return true;
   97.76 +}
   97.77 +
   97.78 +bool LibraryCallKit::inline_native_threadID() {
   97.79 +  Node* tls_ptr = NULL;
   97.80 +  Node* cur_thr = generate_current_thread(tls_ptr);
   97.81 +  Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
   97.82 +  Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
   97.83 +  p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset()));
   97.84 +
   97.85 +  Node* threadid = NULL;
   97.86 +  size_t thread_id_size = OSThread::thread_id_size();
   97.87 +  if (thread_id_size == (size_t) BytesPerLong) {
   97.88 +    threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG));
   97.89 +    push(threadid);
   97.90 +  } else if (thread_id_size == (size_t) BytesPerInt) {
   97.91 +    threadid = make_load(control(), p, TypeInt::INT, T_INT);
   97.92 +    push(threadid);
   97.93 +  } else {
   97.94 +    ShouldNotReachHere();
   97.95 +  }
   97.96 +  return true;
   97.97 +}
   97.98 +#endif
   97.99 +
  97.100  //------------------------inline_native_time_funcs--------------
  97.101  // inline code for System.currentTimeMillis() and System.nanoTime()
  97.102  // these have the same type and signature
  97.103 -bool LibraryCallKit::inline_native_time_funcs(bool isNano) {
  97.104 -  address funcAddr = isNano ? CAST_FROM_FN_PTR(address, os::javaTimeNanos) :
  97.105 -                              CAST_FROM_FN_PTR(address, os::javaTimeMillis);
  97.106 -  const char * funcName = isNano ? "nanoTime" : "currentTimeMillis";
  97.107 -  const TypeFunc *tf = OptoRuntime::current_time_millis_Type();
  97.108 +bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
  97.109 +  const TypeFunc *tf = OptoRuntime::void_long_Type();
  97.110    const TypePtr* no_memory_effects = NULL;
  97.111    Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
  97.112    Node* value = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms+0));
    98.1 --- a/src/share/vm/opto/runtime.cpp	Thu Apr 19 12:18:46 2012 -0700
    98.2 +++ b/src/share/vm/opto/runtime.cpp	Fri Apr 20 16:23:48 2012 -0700
    98.3 @@ -1,5 +1,5 @@
    98.4  /*
    98.5 - * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
    98.6 + * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    98.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    98.8   *
    98.9   * This code is free software; you can redistribute it and/or modify it
   98.10 @@ -709,9 +709,9 @@
   98.11    return TypeFunc::make(domain, range);
   98.12  }
   98.13  
   98.14 -//-------------- currentTimeMillis
   98.15 +//-------------- currentTimeMillis, currentTimeNanos, etc
   98.16  
   98.17 -const TypeFunc* OptoRuntime::current_time_millis_Type() {
   98.18 +const TypeFunc* OptoRuntime::void_long_Type() {
   98.19    // create input type (domain)
   98.20    const Type **fields = TypeTuple::fields(0);
   98.21    const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields);
    99.1 --- a/src/share/vm/opto/runtime.hpp	Thu Apr 19 12:18:46 2012 -0700
    99.2 +++ b/src/share/vm/opto/runtime.hpp	Fri Apr 20 16:23:48 2012 -0700
    99.3 @@ -1,5 +1,5 @@
    99.4  /*
    99.5 - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    99.6 + * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    99.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    99.8   *
    99.9   * This code is free software; you can redistribute it and/or modify it
   99.10 @@ -268,7 +268,7 @@
   99.11    static const TypeFunc* Math_DD_D_Type(); // mod,pow & friends
   99.12    static const TypeFunc* modf_Type();
   99.13    static const TypeFunc* l2f_Type();
   99.14 -  static const TypeFunc* current_time_millis_Type();
   99.15 +  static const TypeFunc* void_long_Type();
   99.16  
   99.17    static const TypeFunc* flush_windows_Type();
   99.18  
   100.1 --- a/src/share/vm/runtime/osThread.hpp	Thu Apr 19 12:18:46 2012 -0700
   100.2 +++ b/src/share/vm/runtime/osThread.hpp	Fri Apr 20 16:23:48 2012 -0700
   100.3 @@ -1,5 +1,5 @@
   100.4  /*
   100.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   100.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   100.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   100.8   *
   100.9   * This code is free software; you can redistribute it and/or modify it
  100.10 @@ -98,6 +98,7 @@
  100.11  
  100.12    // For java intrinsics:
  100.13    static ByteSize interrupted_offset()            { return byte_offset_of(OSThread, _interrupted); }
  100.14 +  static ByteSize thread_id_offset()              { return byte_offset_of(OSThread, _thread_id); }
  100.15  
  100.16    // Platform dependent stuff
  100.17  #ifdef TARGET_OS_FAMILY_linux
   101.1 --- a/src/share/vm/runtime/thread.cpp	Thu Apr 19 12:18:46 2012 -0700
   101.2 +++ b/src/share/vm/runtime/thread.cpp	Fri Apr 20 16:23:48 2012 -0700
   101.3 @@ -3468,13 +3468,13 @@
   101.4      create_vm_init_libraries();
   101.5    }
   101.6  
   101.7 +  // Notify JVMTI agents that VM initialization is complete - nop if no agents.
   101.8 +  JvmtiExport::post_vm_initialized();
   101.9 +
  101.10    if (!TRACE_START()) {
  101.11      vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
  101.12    }
  101.13  
  101.14 -  // Notify JVMTI agents that VM initialization is complete - nop if no agents.
  101.15 -  JvmtiExport::post_vm_initialized();
  101.16 -
  101.17    if (CleanChunkPoolAsync) {
  101.18      Chunk::start_chunk_pool_cleaner_task();
  101.19    }
   102.1 --- a/src/share/vm/runtime/vmStructs.cpp	Thu Apr 19 12:18:46 2012 -0700
   102.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Fri Apr 20 16:23:48 2012 -0700
   102.3 @@ -292,8 +292,6 @@
   102.4    nonstatic_field(instanceKlass,               _method_ordering,                              typeArrayOop)                          \
   102.5    nonstatic_field(instanceKlass,               _local_interfaces,                             objArrayOop)                           \
   102.6    nonstatic_field(instanceKlass,               _transitive_interfaces,                        objArrayOop)                           \
   102.7 -  nonstatic_field(instanceKlass,               _nof_implementors,                             int)                                   \
   102.8 -  nonstatic_field(instanceKlass,               _implementors[0],                              klassOop)                              \
   102.9    nonstatic_field(instanceKlass,               _fields,                                       typeArrayOop)                          \
  102.10    nonstatic_field(instanceKlass,               _java_fields_count,                            u2)                                    \
  102.11    nonstatic_field(instanceKlass,               _constants,                                    constantPoolOop)                       \
  102.12 @@ -2343,7 +2341,6 @@
  102.13    /* instanceKlass enum                */                                 \
  102.14    /*************************************/                                 \
  102.15                                                                            \
  102.16 -  declare_constant(instanceKlass::implementors_limit)                     \
  102.17                                                                            \
  102.18    /*************************************/                                 \
  102.19    /* FieldInfo FieldOffset enum        */                                 \
   103.1 --- a/src/share/vm/runtime/vmThread.cpp	Thu Apr 19 12:18:46 2012 -0700
   103.2 +++ b/src/share/vm/runtime/vmThread.cpp	Fri Apr 20 16:23:48 2012 -0700
   103.3 @@ -1,5 +1,5 @@
   103.4  /*
   103.5 - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
   103.6 + * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
   103.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   103.8   *
   103.9   * This code is free software; you can redistribute it and/or modify it
  103.10 @@ -304,7 +304,7 @@
  103.11      os::check_heap();
  103.12      // Silent verification so as not to pollute normal output,
  103.13      // unless we really asked for it.
  103.14 -    Universe::verify(true, !(PrintGCDetails || Verbose));
  103.15 +    Universe::verify(!(PrintGCDetails || Verbose));
  103.16    }
  103.17  
  103.18    CompileBroker::set_should_block();
   104.1 --- a/src/share/vm/trace/traceMacros.hpp	Thu Apr 19 12:18:46 2012 -0700
   104.2 +++ b/src/share/vm/trace/traceMacros.hpp	Fri Apr 20 16:23:48 2012 -0700
   104.3 @@ -1,5 +1,5 @@
   104.4  /*
   104.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   104.6 + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   104.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   104.8   *
   104.9   * This code is free software; you can redistribute it and/or modify it
  104.10 @@ -43,5 +43,9 @@
  104.11  #define TRACE_SET_KLASS_TRACE_ID(x1, x2) do { } while (0)
  104.12  #define TRACE_DEFINE_KLASS_METHODS typedef int ___IGNORED_hs_trace_type1
  104.13  #define TRACE_DEFINE_KLASS_TRACE_ID typedef int ___IGNORED_hs_trace_type2
  104.14 +#define TRACE_DEFINE_OFFSET typedef int ___IGNORED_hs_trace_type3
  104.15 +#define TRACE_ID_OFFSET in_ByteSize(0); ShouldNotReachHere()
  104.16 +#define TRACE_TEMPLATES(template)
  104.17 +#define TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)
  104.18  
  104.19  #endif
   105.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   105.2 +++ b/test/runtime/7158988/FieldMonitor.java	Fri Apr 20 16:23:48 2012 -0700
   105.3 @@ -0,0 +1,249 @@
   105.4 +/*
   105.5 + * Copyright 2012 SAP AG.  All Rights Reserved.
   105.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   105.7 + *
   105.8 + * This code is free software; you can redistribute it and/or modify it
   105.9 + * under the terms of the GNU General Public License version 2 only, as
  105.10 + * published by the Free Software Foundation.
  105.11 + *
  105.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  105.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  105.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  105.15 + * version 2 for more details (a copy is included in the LICENSE file that
  105.16 + * accompanied this code).
  105.17 + *
  105.18 + * You should have received a copy of the GNU General Public License version
  105.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  105.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  105.21 + *
  105.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  105.23 + * or visit www.oracle.com if you need additional information or have any
  105.24 + * questions.
  105.25 + */
  105.26 +
  105.27 +/*
  105.28 + * @test FieldMonitor.java
  105.29 + * @bug 7158988
  105.30 + * @summary verify jvm does not crash while debugging
  105.31 + * @run shell TestFieldMonitor.sh
  105.32 + * @author axel.siebenborn@sap.com
  105.33 + */
  105.34 +import java.io.BufferedReader;
  105.35 +import java.io.IOException;
  105.36 +import java.io.InputStream;
  105.37 +import java.io.InputStreamReader;
  105.38 +import java.io.OutputStream;
  105.39 +import java.io.OutputStreamWriter;
  105.40 +import java.io.Reader;
  105.41 +import java.io.Writer;
  105.42 +import java.util.Iterator;
  105.43 +import java.util.List;
  105.44 +import java.util.Map;
  105.45 +
  105.46 +import com.sun.jdi.Bootstrap;
  105.47 +import com.sun.jdi.Field;
  105.48 +import com.sun.jdi.ReferenceType;
  105.49 +import com.sun.jdi.VirtualMachine;
  105.50 +import com.sun.jdi.connect.Connector;
  105.51 +import com.sun.jdi.connect.IllegalConnectorArgumentsException;
  105.52 +import com.sun.jdi.connect.LaunchingConnector;
  105.53 +import com.sun.jdi.connect.VMStartException;
  105.54 +import com.sun.jdi.event.ClassPrepareEvent;
  105.55 +import com.sun.jdi.event.Event;
  105.56 +import com.sun.jdi.event.EventQueue;
  105.57 +import com.sun.jdi.event.EventSet;
  105.58 +import com.sun.jdi.event.ModificationWatchpointEvent;
  105.59 +import com.sun.jdi.event.VMDeathEvent;
  105.60 +import com.sun.jdi.event.VMDisconnectEvent;
  105.61 +import com.sun.jdi.request.ClassPrepareRequest;
  105.62 +import com.sun.jdi.request.EventRequest;
  105.63 +import com.sun.jdi.request.EventRequestManager;
  105.64 +import com.sun.jdi.request.ModificationWatchpointRequest;
  105.65 +
  105.66 +public class FieldMonitor {
  105.67 +
  105.68 +  public static final String CLASS_NAME = "TestPostFieldModification";
  105.69 +  public static final String FIELD_NAME = "value";
  105.70 +  public static final String ARGUMENTS = "-Xshare:off -XX:+PrintGC";
  105.71 +
  105.72 +  public static void main(String[] args)
  105.73 +      throws IOException, InterruptedException {
  105.74 +
  105.75 +    StringBuffer sb = new StringBuffer();
  105.76 +
  105.77 +    for (int i=0; i < args.length; i++) {
  105.78 +        sb.append(' ');
  105.79 +        sb.append(args[i]);
  105.80 +    }
  105.81 +    //VirtualMachine vm = launchTarget(sb.toString());
  105.82 +    VirtualMachine vm = launchTarget(CLASS_NAME);
  105.83 +
  105.84 +    System.out.println("Vm launched");
  105.85 +    // set watch field on already loaded classes
  105.86 +    List<ReferenceType> referenceTypes = vm
  105.87 +        .classesByName(CLASS_NAME);
  105.88 +    for (ReferenceType refType : referenceTypes) {
  105.89 +      addFieldWatch(vm, refType);
  105.90 +    }
  105.91 +    // watch for loaded classes
  105.92 +    addClassWatch(vm);
  105.93 +
  105.94 +    // process events
  105.95 +    EventQueue eventQueue = vm.eventQueue();
  105.96 +    // resume the vm
  105.97 +
  105.98 +    Process process = vm.process();
  105.99 +
 105.100 +
 105.101 +    // Copy target's output and error to our output and error.
 105.102 +    Thread outThread = new StreamRedirectThread("out reader", process.getInputStream());
 105.103 +    Thread errThread = new StreamRedirectThread("error reader", process.getErrorStream());
 105.104 +
 105.105 +    errThread.start();
 105.106 +    outThread.start();
 105.107 +
 105.108 +
 105.109 +    vm.resume();
 105.110 +    boolean connected = true;
 105.111 +    while (connected) {
 105.112 +      EventSet eventSet = eventQueue.remove();
 105.113 +      for (Event event : eventSet) {
 105.114 +        if (event instanceof VMDeathEvent
 105.115 +            || event instanceof VMDisconnectEvent) {
 105.116 +          // exit
 105.117 +          connected = false;
 105.118 +        } else if (event instanceof ClassPrepareEvent) {
 105.119 +          // watch field on loaded class
 105.120 +          System.out.println("ClassPrepareEvent");
 105.121 +          ClassPrepareEvent classPrepEvent = (ClassPrepareEvent) event;
 105.122 +          ReferenceType refType = classPrepEvent
 105.123 +              .referenceType();
 105.124 +          addFieldWatch(vm, refType);
 105.125 +        } else if (event instanceof ModificationWatchpointEvent) {
 105.126 +          System.out.println("sleep for 500 ms");
 105.127 +          Thread.sleep(500);
 105.128 +          System.out.println("resume...");
 105.129 +
 105.130 +          ModificationWatchpointEvent modEvent = (ModificationWatchpointEvent) event;
 105.131 +          System.out.println("old="
 105.132 +              + modEvent.valueCurrent());
 105.133 +          System.out.println("new=" + modEvent.valueToBe());
 105.134 +          System.out.println();
 105.135 +        }
 105.136 +      }
 105.137 +      eventSet.resume();
 105.138 +    }
 105.139 +    // Shutdown begins when event thread terminates
 105.140 +    try {
 105.141 +        errThread.join(); // Make sure output is forwarded
 105.142 +        outThread.join();
 105.143 +    } catch (InterruptedException exc) {
 105.144 +        // we don't interrupt
 105.145 +    }
 105.146 +  }
 105.147 +
 105.148 +  /**
 105.149 +   * Find a com.sun.jdi.CommandLineLaunch connector
 105.150 +   */
 105.151 +  static LaunchingConnector findLaunchingConnector() {
 105.152 +    List <Connector> connectors = Bootstrap.virtualMachineManager().allConnectors();
 105.153 +    Iterator <Connector> iter = connectors.iterator();
 105.154 +    while (iter.hasNext()) {
 105.155 +      Connector connector = iter.next();
 105.156 +      if (connector.name().equals("com.sun.jdi.CommandLineLaunch")) {
 105.157 +        return (LaunchingConnector)connector;
 105.158 +      }
 105.159 +    }
 105.160 +    throw new Error("No launching connector");
 105.161 +  }
 105.162 +  /**
 105.163 +   * Return the launching connector's arguments.
 105.164 +   */
 105.165 + static Map <String,Connector.Argument> connectorArguments(LaunchingConnector connector, String mainArgs) {
 105.166 +      Map<String,Connector.Argument> arguments = connector.defaultArguments();
 105.167 +      for (String key : arguments.keySet()) {
 105.168 +        System.out.println(key);
 105.169 +      }
 105.170 +
 105.171 +      Connector.Argument mainArg = (Connector.Argument)arguments.get("main");
 105.172 +      if (mainArg == null) {
 105.173 +          throw new Error("Bad launching connector");
 105.174 +      }
 105.175 +      mainArg.setValue(mainArgs);
 105.176 +
 105.177 +      Connector.Argument optionsArg = (Connector.Argument)arguments.get("options");
 105.178 +      if (optionsArg == null) {
 105.179 +        throw new Error("Bad launching connector");
 105.180 +      }
 105.181 +      optionsArg.setValue(ARGUMENTS);
 105.182 +      return arguments;
 105.183 +  }
 105.184 +
 105.185 + static VirtualMachine launchTarget(String mainArgs) {
 105.186 +    LaunchingConnector connector = findLaunchingConnector();
 105.187 +    Map  arguments = connectorArguments(connector, mainArgs);
 105.188 +    try {
 105.189 +        return (VirtualMachine) connector.launch(arguments);
 105.190 +    } catch (IOException exc) {
 105.191 +        throw new Error("Unable to launch target VM: " + exc);
 105.192 +    } catch (IllegalConnectorArgumentsException exc) {
 105.193 +        throw new Error("Internal error: " + exc);
 105.194 +    } catch (VMStartException exc) {
 105.195 +        throw new Error("Target VM failed to initialize: " +
 105.196 +                        exc.getMessage());
 105.197 +    }
 105.198 +}
 105.199 +
 105.200 +
 105.201 +  private static void addClassWatch(VirtualMachine vm) {
 105.202 +    EventRequestManager erm = vm.eventRequestManager();
 105.203 +    ClassPrepareRequest classPrepareRequest = erm
 105.204 +        .createClassPrepareRequest();
 105.205 +    classPrepareRequest.addClassFilter(CLASS_NAME);
 105.206 +    classPrepareRequest.setEnabled(true);
 105.207 +  }
 105.208 +
 105.209 +
 105.210 +  private static void addFieldWatch(VirtualMachine vm,
 105.211 +      ReferenceType refType) {
 105.212 +    EventRequestManager erm = vm.eventRequestManager();
 105.213 +    Field field = refType.fieldByName(FIELD_NAME);
 105.214 +    ModificationWatchpointRequest modificationWatchpointRequest = erm
 105.215 +        .createModificationWatchpointRequest(field);
 105.216 +    modificationWatchpointRequest.setSuspendPolicy(EventRequest.SUSPEND_EVENT_THREAD);
 105.217 +    modificationWatchpointRequest.setEnabled(true);
 105.218 +  }
 105.219 +}
 105.220 +
 105.221 +class StreamRedirectThread extends Thread {
 105.222 +
 105.223 +  private final BufferedReader in;
 105.224 +
 105.225 +  private static final int BUFFER_SIZE = 2048;
 105.226 +
 105.227 +  /**
 105.228 +   * Set up for copy.
 105.229 +   * @param name  Name of the thread
 105.230 +   * @param in    Stream to copy from
 105.231 +   * @param out   Stream to copy to
 105.232 +   */
 105.233 +  StreamRedirectThread(String name, InputStream in) {
 105.234 +    super(name);
 105.235 +    this.in = new BufferedReader(new InputStreamReader(in));
 105.236 +  }
 105.237 +
 105.238 +  /**
 105.239 +   * Copy.
 105.240 +   */
 105.241 +  public void run() {
 105.242 +    try {
 105.243 +      String line;
 105.244 +        while ((line = in.readLine ()) != null) {
 105.245 +          System.out.println ("testvm: " + line);
 105.246 +      }
 105.247 +     System.out.flush();
 105.248 +    } catch(IOException exc) {
 105.249 +      System.err.println("Child I/O Transfer - " + exc);
 105.250 +    }
 105.251 +  }
 105.252 +}
   106.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   106.2 +++ b/test/runtime/7158988/TestFieldMonitor.sh	Fri Apr 20 16:23:48 2012 -0700
   106.3 @@ -0,0 +1,94 @@
   106.4 +#!/bin/sh
   106.5 +
   106.6 +if [ "${TESTSRC}" = "" ]
   106.7 +then TESTSRC=.
   106.8 +fi
   106.9 +
  106.10 +if [ "${TESTJAVA}" = "" ]
  106.11 +then
  106.12 +  PARENT=`dirname \`which java\``
  106.13 +  TESTJAVA=`dirname ${PARENT}`
  106.14 +  echo "TESTJAVA not set, selecting " ${TESTJAVA}
  106.15 +  echo "If this is incorrect, try setting the variable manually."
  106.16 +fi
  106.17 +
  106.18 +if [ "${TESTCLASSES}" = "" ]
  106.19 +then
  106.20 +  echo "TESTCLASSES not set.  Test cannot execute.  Failed."
  106.21 +  exit 1
  106.22 +fi
  106.23 +
  106.24 +BIT_FLAG=""
  106.25 +
  106.26 +# set platform-dependent variables
  106.27 +OS=`uname -s`
  106.28 +case "$OS" in
  106.29 +  SunOS | Linux )
  106.30 +    NULL=/dev/null
  106.31 +    PS=":"
  106.32 +    FS="/"
  106.33 +    ## for solaris, linux it's HOME
  106.34 +    FILE_LOCATION=$HOME
  106.35 +    if [ -f ${FILE_LOCATION}${FS}JDK64BIT -a ${OS} = "SunOS" -a `uname -p`='sparc' ]
  106.36 +    then
  106.37 +        BIT_FLAG="-d64"
  106.38 +    fi
  106.39 +    ;;
  106.40 +  Windows_95 | Windows_98 | Windows_ME )
  106.41 +    NULL=NUL
  106.42 +    PS=";"
  106.43 +    FS="\\"
  106.44 +    echo "Test skipped, only for WinNT"
  106.45 +    exit 0
  106.46 +    ;;
  106.47 +  Windows_NT )
  106.48 +    NULL=NUL
  106.49 +    PS=";"
  106.50 +    FS="\\"
  106.51 +    ;;
  106.52 +  * )
  106.53 +    echo "Unrecognized system!"
  106.54 +    exit 1;
  106.55 +    ;;
  106.56 +esac
  106.57 +
  106.58 +#CLASSPATH=.${PS}${TESTCLASSES} ; export CLASSPATH
  106.59 +
  106.60 +cp ${TESTSRC}${FS}*.java .
  106.61 +
  106.62 +${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} -fullversion
  106.63 +
  106.64 +${TESTJAVA}${FS}bin${FS}javac -classpath .${PS}$TESTJAVA${FS}lib${FS}tools.jar *.java
  106.65 +
  106.66 +${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} -classpath .${PS}$TESTJAVA${FS}lib${FS}tools.jar FieldMonitor > test.out 2>&1 &
  106.67 +
  106.68 +P_PID=$!
  106.69 +
  106.70 +sleep 60
  106.71 +STATUS=0
  106.72 +
  106.73 +case "$OS" in
  106.74 +    SunOS | Linux )
  106.75 +        ps -ef | grep $P_PID | grep -v grep > ${NULL}
  106.76 +        if [ $? = 0 ]; then
  106.77 +            kill -9 $P_PID
  106.78 +            STATUS=1
  106.79 +        fi
  106.80 +        ;;
  106.81 +      * )
  106.82 +        ps | grep -i "FieldMonitor" | grep -v grep > ${NULL}
  106.83 +        if [ $? = 0 ]; then
  106.84 +            C_PID=`ps | grep -i "FieldMonitor" | awk '{print $1}'`
  106.85 +            kill -s 9 $C_PID
  106.86 +            STATUS=1
  106.87 +        fi
  106.88 +        ;;
  106.89 +esac
  106.90 +
  106.91 +grep "A fatal error has been detected" test.out > ${NULL}
  106.92 +if [ $? = 0 ]; then
  106.93 +    cat test.out
  106.94 +    STATUS=1
  106.95 +fi
  106.96 +
  106.97 +exit $STATUS
   107.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   107.2 +++ b/test/runtime/7158988/TestPostFieldModification.java	Fri Apr 20 16:23:48 2012 -0700
   107.3 @@ -0,0 +1,58 @@
   107.4 +/*
   107.5 + * Copyright 2012 SAP AG.  All Rights Reserved.
   107.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   107.7 + *
   107.8 + * This code is free software; you can redistribute it and/or modify it
   107.9 + * under the terms of the GNU General Public License version 2 only, as
  107.10 + * published by the Free Software Foundation.
  107.11 + *
  107.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  107.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  107.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  107.15 + * version 2 for more details (a copy is included in the LICENSE file that
  107.16 + * accompanied this code).
  107.17 + *
  107.18 + * You should have received a copy of the GNU General Public License version
  107.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  107.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  107.21 + *
  107.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  107.23 + * or visit www.oracle.com if you need additional information or have any
  107.24 + * questions.
  107.25 + */
  107.26 +
  107.27 +public class TestPostFieldModification {
  107.28 +
  107.29 +  public String value;  // watch modification of value
  107.30 +
  107.31 +  public static void main(String[] args){
  107.32 +
  107.33 +    System.out.println("Start threads");
  107.34 +    // this thread modifies the field 'value'
  107.35 +    new Thread() {
  107.36 +      TestPostFieldModification test = new TestPostFieldModification();
  107.37 +      public void run() {
  107.38 +        test.value="test";
  107.39 +        for(int i = 0; i < 10; i++) {
  107.40 +          test.value += new String("_test");
  107.41 +        }
  107.42 +      }
  107.43 +    }.start();
  107.44 +
  107.45 +    // this thread is used to trigger a gc
  107.46 +    Thread d = new Thread() {
  107.47 +      public void run() {
  107.48 +        while(true) {
  107.49 +          try {
  107.50 +            Thread.sleep(100);
  107.51 +          } catch (InterruptedException e) {
  107.52 +
  107.53 +          }
  107.54 +          System.gc();
  107.55 +        }
  107.56 +      }
  107.57 +    };
  107.58 +    d.setDaemon(true);
  107.59 +    d.start();
  107.60 +  }
  107.61 +}

mercurial