Merge

Fri, 26 Apr 2013 08:40:24 -0700

author
dcubed
date
Fri, 26 Apr 2013 08:40:24 -0700
changeset 4985
3c9b7ef92c61
parent 4978
57ac6a688ae6
parent 4984
c115fac239eb
child 4986
d1644a010f52
child 4987
f258c5828eb8
child 4990
9f96b7a853bc

Merge

     1.1 --- a/src/share/vm/ci/ciEnv.cpp	Fri Apr 26 00:40:22 2013 -0700
     1.2 +++ b/src/share/vm/ci/ciEnv.cpp	Fri Apr 26 08:40:24 2013 -0700
     1.3 @@ -483,7 +483,8 @@
     1.4      {
     1.5        // We have to lock the cpool to keep the oop from being resolved
     1.6        // while we are accessing it.
     1.7 -        MonitorLockerEx ml(cpool->lock());
     1.8 +      oop cplock = cpool->lock();
     1.9 +      ObjectLocker ol(cplock, THREAD, cplock != NULL);
    1.10        constantTag tag = cpool->tag_at(index);
    1.11        if (tag.is_klass()) {
    1.12          // The klass has been inserted into the constant pool
     2.1 --- a/src/share/vm/classfile/classFileParser.hpp	Fri Apr 26 00:40:22 2013 -0700
     2.2 +++ b/src/share/vm/classfile/classFileParser.hpp	Fri Apr 26 08:40:24 2013 -0700
     2.3 @@ -304,7 +304,19 @@
     2.4  
     2.5    inline void assert_property(bool b, const char* msg, TRAPS) {
     2.6  #ifdef ASSERT
     2.7 -    if (!b) { fatal(msg); }
     2.8 +    if (!b) {
     2.9 +      ResourceMark rm(THREAD);
    2.10 +      fatal(err_msg(msg, _class_name->as_C_string()));
    2.11 +    }
    2.12 +#endif
    2.13 +  }
    2.14 +
    2.15 +  inline void assert_property(bool b, const char* msg, int index, TRAPS) {
    2.16 +#ifdef ASSERT
    2.17 +    if (!b) {
    2.18 +      ResourceMark rm(THREAD);
    2.19 +      fatal(err_msg(msg, index, _class_name->as_C_string()));
    2.20 +    }
    2.21  #endif
    2.22    }
    2.23  
    2.24 @@ -312,7 +324,7 @@
    2.25      if (_need_verify) {
    2.26        guarantee_property(property, msg, index, CHECK);
    2.27      } else {
    2.28 -      assert_property(property, msg, CHECK);
    2.29 +      assert_property(property, msg, index, CHECK);
    2.30      }
    2.31    }
    2.32  
     3.1 --- a/src/share/vm/classfile/classLoaderData.cpp	Fri Apr 26 00:40:22 2013 -0700
     3.2 +++ b/src/share/vm/classfile/classLoaderData.cpp	Fri Apr 26 08:40:24 2013 -0700
     3.3 @@ -277,6 +277,9 @@
     3.4  void ClassLoaderData::unload() {
     3.5    _unloading = true;
     3.6  
     3.7 +  // Tell serviceability tools these classes are unloading
     3.8 +  classes_do(InstanceKlass::notify_unload_class);
     3.9 +
    3.10    if (TraceClassLoaderData) {
    3.11      ResourceMark rm;
    3.12      tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this);
    3.13 @@ -300,6 +303,9 @@
    3.14  
    3.15  
    3.16  ClassLoaderData::~ClassLoaderData() {
    3.17 +  // Release C heap structures for all the classes.
    3.18 +  classes_do(InstanceKlass::release_C_heap_structures);
    3.19 +
    3.20    Metaspace *m = _metaspace;
    3.21    if (m != NULL) {
    3.22      _metaspace = NULL;
     4.1 --- a/src/share/vm/classfile/dictionary.cpp	Fri Apr 26 00:40:22 2013 -0700
     4.2 +++ b/src/share/vm/classfile/dictionary.cpp	Fri Apr 26 08:40:24 2013 -0700
     4.3 @@ -27,7 +27,6 @@
     4.4  #include "classfile/systemDictionary.hpp"
     4.5  #include "oops/oop.inline.hpp"
     4.6  #include "prims/jvmtiRedefineClassesTrace.hpp"
     4.7 -#include "services/classLoadingService.hpp"
     4.8  #include "utilities/hashtable.inline.hpp"
     4.9  
    4.10  
    4.11 @@ -156,19 +155,7 @@
    4.12            if (k_def_class_loader_data == loader_data) {
    4.13              // This is the defining entry, so the referred class is about
    4.14              // to be unloaded.
    4.15 -            // Notify the debugger and clean up the class.
    4.16              class_was_unloaded = true;
    4.17 -            // notify the debugger
    4.18 -            if (JvmtiExport::should_post_class_unload()) {
    4.19 -              JvmtiExport::post_class_unload(ik);
    4.20 -            }
    4.21 -
    4.22 -            // notify ClassLoadingService of class unload
    4.23 -            ClassLoadingService::notify_class_unloaded(ik);
    4.24 -
    4.25 -            // Clean up C heap
    4.26 -            ik->release_C_heap_structures();
    4.27 -            ik->constants()->release_C_heap_structures();
    4.28            }
    4.29            // Also remove this system dictionary entry.
    4.30            purge_entry = true;
     5.1 --- a/src/share/vm/oops/constantPool.cpp	Fri Apr 26 00:40:22 2013 -0700
     5.2 +++ b/src/share/vm/oops/constantPool.cpp	Fri Apr 26 08:40:24 2013 -0700
     5.3 @@ -40,6 +40,7 @@
     5.4  #include "runtime/init.hpp"
     5.5  #include "runtime/javaCalls.hpp"
     5.6  #include "runtime/signature.hpp"
     5.7 +#include "runtime/synchronizer.hpp"
     5.8  #include "runtime/vframe.hpp"
     5.9  
    5.10  ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
    5.11 @@ -69,7 +70,6 @@
    5.12  
    5.13    // only set to non-zero if constant pool is merged by RedefineClasses
    5.14    set_version(0);
    5.15 -  set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock"));
    5.16  
    5.17    // initialize tag array
    5.18    int length = tags->length();
    5.19 @@ -95,9 +95,6 @@
    5.20  void ConstantPool::release_C_heap_structures() {
    5.21    // walk constant pool and decrement symbol reference counts
    5.22    unreference_symbols();
    5.23 -
    5.24 -  delete _lock;
    5.25 -  set_lock(NULL);
    5.26  }
    5.27  
    5.28  objArrayOop ConstantPool::resolved_references() const {
    5.29 @@ -154,9 +151,6 @@
    5.30        ClassLoaderData* loader_data = pool_holder()->class_loader_data();
    5.31        set_resolved_references(loader_data->add_handle(refs_handle));
    5.32      }
    5.33 -
    5.34 -    // Also need to recreate the mutex.  Make sure this matches the constructor
    5.35 -    set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock"));
    5.36    }
    5.37  }
    5.38  
    5.39 @@ -167,7 +161,23 @@
    5.40    set_resolved_reference_length(
    5.41      resolved_references() != NULL ? resolved_references()->length() : 0);
    5.42    set_resolved_references(NULL);
    5.43 -  set_lock(NULL);
    5.44 +}
    5.45 +
    5.46 +oop ConstantPool::lock() {
    5.47 +  if (_pool_holder) {
    5.48 +    // We re-use the _pool_holder's init_lock to reduce footprint.
    5.49 +    // Notes on deadlocks:
    5.50 +    // [1] This lock is a Java oop, so it can be recursively locked by
    5.51 +    //     the same thread without self-deadlocks.
    5.52 +    // [2] Deadlock will happen if there is circular dependency between
    5.53 +    //     the <clinit> of two Java classes. However, in this case,
    5.54 +    //     the deadlock would have happened long before we reach
    5.55 +    //     ConstantPool::lock(), so reusing init_lock does not
    5.56 +    //     increase the possibility of deadlock.
    5.57 +    return _pool_holder->init_lock();
    5.58 +  } else {
    5.59 +    return NULL;
    5.60 +  }
    5.61  }
    5.62  
    5.63  int ConstantPool::cp_to_object_index(int cp_index) {
    5.64 @@ -208,7 +218,9 @@
    5.65  
    5.66    Symbol* name = NULL;
    5.67    Handle       loader;
    5.68 -  {  MonitorLockerEx ml(this_oop->lock());
    5.69 +  {
    5.70 +    oop cplock = this_oop->lock();
    5.71 +    ObjectLocker ol(cplock , THREAD, cplock != NULL);
    5.72  
    5.73      if (this_oop->tag_at(which).is_unresolved_klass()) {
    5.74        if (this_oop->tag_at(which).is_unresolved_klass_in_error()) {
    5.75 @@ -255,7 +267,8 @@
    5.76  
    5.77        bool throw_orig_error = false;
    5.78        {
    5.79 -        MonitorLockerEx ml(this_oop->lock());
    5.80 +        oop cplock = this_oop->lock();
    5.81 +        ObjectLocker ol(cplock, THREAD, cplock != NULL);
    5.82  
    5.83          // some other thread has beaten us and has resolved the class.
    5.84          if (this_oop->tag_at(which).is_klass()) {
    5.85 @@ -323,7 +336,8 @@
    5.86        }
    5.87        return k();
    5.88      } else {
    5.89 -      MonitorLockerEx ml(this_oop->lock());
    5.90 +      oop cplock = this_oop->lock();
    5.91 +      ObjectLocker ol(cplock, THREAD, cplock != NULL);
    5.92        // Only updated constant pool - if it is resolved.
    5.93        do_resolve = this_oop->tag_at(which).is_unresolved_klass();
    5.94        if (do_resolve) {
    5.95 @@ -619,7 +633,8 @@
    5.96                                       int tag, TRAPS) {
    5.97    ResourceMark rm;
    5.98    Symbol* error = PENDING_EXCEPTION->klass()->name();
    5.99 -  MonitorLockerEx ml(this_oop->lock());  // lock cpool to change tag.
   5.100 +  oop cplock = this_oop->lock();
   5.101 +  ObjectLocker ol(cplock, THREAD, cplock != NULL);  // lock cpool to change tag.
   5.102  
   5.103    int error_tag = (tag == JVM_CONSTANT_MethodHandle) ?
   5.104             JVM_CONSTANT_MethodHandleInError : JVM_CONSTANT_MethodTypeInError;
   5.105 @@ -780,7 +795,8 @@
   5.106    if (cache_index >= 0) {
   5.107      // Cache the oop here also.
   5.108      Handle result_handle(THREAD, result_oop);
   5.109 -    MonitorLockerEx ml(this_oop->lock());  // don't know if we really need this
   5.110 +    oop cplock = this_oop->lock();
   5.111 +    ObjectLocker ol(cplock, THREAD, cplock != NULL);  // don't know if we really need this
   5.112      oop result = this_oop->resolved_references()->obj_at(cache_index);
   5.113      // Benign race condition:  resolved_references may already be filled in while we were trying to lock.
   5.114      // The important thing here is that all threads pick up the same result.
   5.115 @@ -1043,24 +1059,13 @@
   5.116  
   5.117    case JVM_CONSTANT_InvokeDynamic:
   5.118    {
   5.119 -    int k1 = invoke_dynamic_bootstrap_method_ref_index_at(index1);
   5.120 -    int k2 = cp2->invoke_dynamic_bootstrap_method_ref_index_at(index2);
   5.121 -    bool match = compare_entry_to(k1, cp2, k2, CHECK_false);
   5.122 -    if (!match)  return false;
   5.123 -    k1 = invoke_dynamic_name_and_type_ref_index_at(index1);
   5.124 -    k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
   5.125 -    match = compare_entry_to(k1, cp2, k2, CHECK_false);
   5.126 -    if (!match)  return false;
   5.127 -    int argc = invoke_dynamic_argument_count_at(index1);
   5.128 -    if (argc == cp2->invoke_dynamic_argument_count_at(index2)) {
   5.129 -      for (int j = 0; j < argc; j++) {
   5.130 -        k1 = invoke_dynamic_argument_index_at(index1, j);
   5.131 -        k2 = cp2->invoke_dynamic_argument_index_at(index2, j);
   5.132 -        match = compare_entry_to(k1, cp2, k2, CHECK_false);
   5.133 -        if (!match)  return false;
   5.134 -      }
   5.135 -      return true;           // got through loop; all elements equal
   5.136 -    }
   5.137 +    int k1 = invoke_dynamic_name_and_type_ref_index_at(index1);
   5.138 +    int k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
   5.139 +    int i1 = invoke_dynamic_bootstrap_specifier_index(index1);
   5.140 +    int i2 = cp2->invoke_dynamic_bootstrap_specifier_index(index2);
   5.141 +    bool match = compare_entry_to(k1, cp2, k2, CHECK_false) &&
   5.142 +                 compare_operand_to(i1, cp2, i2, CHECK_false);
   5.143 +    return match;
   5.144    } break;
   5.145  
   5.146    case JVM_CONSTANT_String:
   5.147 @@ -1095,6 +1100,80 @@
   5.148  } // end compare_entry_to()
   5.149  
   5.150  
   5.151 +// Resize the operands array with delta_len and delta_size.
   5.152 +// Used in RedefineClasses for CP merge.
   5.153 +void ConstantPool::resize_operands(int delta_len, int delta_size, TRAPS) {
   5.154 +  int old_len  = operand_array_length(operands());
   5.155 +  int new_len  = old_len + delta_len;
   5.156 +  int min_len  = (delta_len > 0) ? old_len : new_len;
   5.157 +
   5.158 +  int old_size = operands()->length();
   5.159 +  int new_size = old_size + delta_size;
   5.160 +  int min_size = (delta_size > 0) ? old_size : new_size;
   5.161 +
   5.162 +  ClassLoaderData* loader_data = pool_holder()->class_loader_data();
   5.163 +  Array<u2>* new_ops = MetadataFactory::new_array<u2>(loader_data, new_size, CHECK);
   5.164 +
   5.165 +  // Set index in the resized array for existing elements only
   5.166 +  for (int idx = 0; idx < min_len; idx++) {
   5.167 +    int offset = operand_offset_at(idx);                       // offset in original array
   5.168 +    operand_offset_at_put(new_ops, idx, offset + 2*delta_len); // offset in resized array
   5.169 +  }
   5.170 +  // Copy the bootstrap specifiers only
   5.171 +  Copy::conjoint_memory_atomic(operands()->adr_at(2*old_len),
   5.172 +                               new_ops->adr_at(2*new_len),
   5.173 +                               (min_size - 2*min_len) * sizeof(u2));
   5.174 +  // Explicitly deallocate old operands array.
   5.175 +  // Note, it is not needed for 7u backport.
   5.176 +  if ( operands() != NULL) { // the safety check
   5.177 +    MetadataFactory::free_array<u2>(loader_data, operands());
   5.178 +  }
   5.179 +  set_operands(new_ops);
   5.180 +} // end resize_operands()
   5.181 +
   5.182 +
   5.183 +// Extend the operands array with the length and size of the ext_cp operands.
   5.184 +// Used in RedefineClasses for CP merge.
   5.185 +void ConstantPool::extend_operands(constantPoolHandle ext_cp, TRAPS) {
   5.186 +  int delta_len = operand_array_length(ext_cp->operands());
   5.187 +  if (delta_len == 0) {
   5.188 +    return; // nothing to do
   5.189 +  }
   5.190 +  int delta_size = ext_cp->operands()->length();
   5.191 +
   5.192 +  assert(delta_len  > 0 && delta_size > 0, "extended operands array must be bigger");
   5.193 +
   5.194 +  if (operand_array_length(operands()) == 0) {
   5.195 +    ClassLoaderData* loader_data = pool_holder()->class_loader_data();
   5.196 +    Array<u2>* new_ops = MetadataFactory::new_array<u2>(loader_data, delta_size, CHECK);
   5.197 +    // The first element index defines the offset of second part
   5.198 +    operand_offset_at_put(new_ops, 0, 2*delta_len); // offset in new array
   5.199 +    set_operands(new_ops);
   5.200 +  } else {
   5.201 +    resize_operands(delta_len, delta_size, CHECK);
   5.202 +  }
   5.203 +
   5.204 +} // end extend_operands()
   5.205 +
   5.206 +
   5.207 +// Shrink the operands array to a smaller array with new_len length.
   5.208 +// Used in RedefineClasses for CP merge.
   5.209 +void ConstantPool::shrink_operands(int new_len, TRAPS) {
   5.210 +  int old_len = operand_array_length(operands());
   5.211 +  if (new_len == old_len) {
   5.212 +    return; // nothing to do
   5.213 +  }
   5.214 +  assert(new_len < old_len, "shrunken operands array must be smaller");
   5.215 +
   5.216 +  int free_base  = operand_next_offset_at(new_len - 1);
   5.217 +  int delta_len  = new_len - old_len;
   5.218 +  int delta_size = 2*delta_len + free_base - operands()->length();
   5.219 +
   5.220 +  resize_operands(delta_len, delta_size, CHECK);
   5.221 +
   5.222 +} // end shrink_operands()
   5.223 +
   5.224 +
   5.225  void ConstantPool::copy_operands(constantPoolHandle from_cp,
   5.226                                   constantPoolHandle to_cp,
   5.227                                   TRAPS) {
   5.228 @@ -1357,6 +1436,46 @@
   5.229  } // end find_matching_entry()
   5.230  
   5.231  
   5.232 +// Compare this constant pool's bootstrap specifier at idx1 to the constant pool
   5.233 +// cp2's bootstrap specifier at idx2.
   5.234 +bool ConstantPool::compare_operand_to(int idx1, constantPoolHandle cp2, int idx2, TRAPS) {
   5.235 +  int k1 = operand_bootstrap_method_ref_index_at(idx1);
   5.236 +  int k2 = cp2->operand_bootstrap_method_ref_index_at(idx2);
   5.237 +  bool match = compare_entry_to(k1, cp2, k2, CHECK_false);
   5.238 +
   5.239 +  if (!match) {
   5.240 +    return false;
   5.241 +  }
   5.242 +  int argc = operand_argument_count_at(idx1);
   5.243 +  if (argc == cp2->operand_argument_count_at(idx2)) {
   5.244 +    for (int j = 0; j < argc; j++) {
   5.245 +      k1 = operand_argument_index_at(idx1, j);
   5.246 +      k2 = cp2->operand_argument_index_at(idx2, j);
   5.247 +      match = compare_entry_to(k1, cp2, k2, CHECK_false);
   5.248 +      if (!match) {
   5.249 +        return false;
   5.250 +      }
   5.251 +    }
   5.252 +    return true;           // got through loop; all elements equal
   5.253 +  }
   5.254 +  return false;
   5.255 +} // end compare_operand_to()
   5.256 +
   5.257 +// Search constant pool search_cp for a bootstrap specifier that matches
   5.258 +// this constant pool's bootstrap specifier at pattern_i index.
   5.259 +// Return the index of a matching bootstrap specifier or (-1) if there is no match.
   5.260 +int ConstantPool::find_matching_operand(int pattern_i,
   5.261 +                    constantPoolHandle search_cp, int search_len, TRAPS) {
   5.262 +  for (int i = 0; i < search_len; i++) {
   5.263 +    bool found = compare_operand_to(pattern_i, search_cp, i, CHECK_(-1));
   5.264 +    if (found) {
   5.265 +      return i;
   5.266 +    }
   5.267 +  }
   5.268 +  return -1;  // bootstrap specifier not found; return unused index (-1)
   5.269 +} // end find_matching_operand()
   5.270 +
   5.271 +
   5.272  #ifndef PRODUCT
   5.273  
   5.274  const char* ConstantPool::printable_name_at(int which) {
     6.1 --- a/src/share/vm/oops/constantPool.hpp	Fri Apr 26 00:40:22 2013 -0700
     6.2 +++ b/src/share/vm/oops/constantPool.hpp	Fri Apr 26 08:40:24 2013 -0700
     6.3 @@ -111,7 +111,6 @@
     6.4      int                _version;
     6.5    } _saved;
     6.6  
     6.7 -  Monitor*             _lock;
     6.8  
     6.9    void set_tags(Array<u1>* tags)               { _tags = tags; }
    6.10    void tag_at_put(int which, jbyte t)          { tags()->at_put(which, t); }
    6.11 @@ -567,6 +566,47 @@
    6.12           _indy_argc_offset = 1,  // u2 argc
    6.13           _indy_argv_offset = 2   // u2 argv[argc]
    6.14    };
    6.15 +
    6.16 +  // These functions are used in RedefineClasses for CP merge
    6.17 +
    6.18 +  int operand_offset_at(int bootstrap_specifier_index) {
    6.19 +    assert(0 <= bootstrap_specifier_index &&
    6.20 +           bootstrap_specifier_index < operand_array_length(operands()),
    6.21 +           "Corrupted CP operands");
    6.22 +    return operand_offset_at(operands(), bootstrap_specifier_index);
    6.23 +  }
    6.24 +  int operand_bootstrap_method_ref_index_at(int bootstrap_specifier_index) {
    6.25 +    int offset = operand_offset_at(bootstrap_specifier_index);
    6.26 +    return operands()->at(offset + _indy_bsm_offset);
    6.27 +  }
    6.28 +  int operand_argument_count_at(int bootstrap_specifier_index) {
    6.29 +    int offset = operand_offset_at(bootstrap_specifier_index);
    6.30 +    int argc = operands()->at(offset + _indy_argc_offset);
    6.31 +    return argc;
    6.32 +  }
    6.33 +  int operand_argument_index_at(int bootstrap_specifier_index, int j) {
    6.34 +    int offset = operand_offset_at(bootstrap_specifier_index);
    6.35 +    return operands()->at(offset + _indy_argv_offset + j);
    6.36 +  }
    6.37 +  int operand_next_offset_at(int bootstrap_specifier_index) {
    6.38 +    int offset = operand_offset_at(bootstrap_specifier_index) + _indy_argv_offset
    6.39 +                   + operand_argument_count_at(bootstrap_specifier_index);
    6.40 +    return offset;
    6.41 +  }
    6.42 +  // Compare a bootsrap specifier in the operands arrays
    6.43 +  bool compare_operand_to(int bootstrap_specifier_index1, constantPoolHandle cp2,
    6.44 +                          int bootstrap_specifier_index2, TRAPS);
    6.45 +  // Find a bootsrap specifier in the operands array
    6.46 +  int find_matching_operand(int bootstrap_specifier_index, constantPoolHandle search_cp,
    6.47 +                            int operands_cur_len, TRAPS);
    6.48 +  // Resize the operands array with delta_len and delta_size
    6.49 +  void resize_operands(int delta_len, int delta_size, TRAPS);
    6.50 +  // Extend the operands array with the length and size of the ext_cp operands
    6.51 +  void extend_operands(constantPoolHandle ext_cp, TRAPS);
    6.52 +  // Shrink the operands array to a smaller array with new_len length
    6.53 +  void shrink_operands(int new_len, TRAPS);
    6.54 +
    6.55 +
    6.56    int invoke_dynamic_bootstrap_method_ref_index_at(int which) {
    6.57      assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
    6.58      int op_base = invoke_dynamic_operand_base(which);
    6.59 @@ -782,8 +822,17 @@
    6.60  
    6.61    void set_resolved_reference_length(int length) { _saved._resolved_reference_length = length; }
    6.62    int  resolved_reference_length() const  { return _saved._resolved_reference_length; }
    6.63 -  void set_lock(Monitor* lock)            { _lock = lock; }
    6.64 -  Monitor* lock()                         { return _lock; }
    6.65 +
    6.66 +  // lock() may return null -- constant pool updates may happen before this lock is
    6.67 +  // initialized, because the _pool_holder has not been fully initialized and
    6.68 +  // has not been registered into the system dictionary. In this case, no other
    6.69 +  // thread can be modifying this constantpool, so no synchronization is
    6.70 +  // necessary.
    6.71 +  //
    6.72 +  // Use cplock() like this:
    6.73 +  //    oop cplock = cp->lock();
    6.74 +  //    ObjectLocker ol(cplock , THREAD, cplock != NULL);
    6.75 +  oop lock();
    6.76  
    6.77    // Decrease ref counts of symbols that are in the constant pool
    6.78    // when the holder class is unloaded
     7.1 --- a/src/share/vm/oops/cpCache.cpp	Fri Apr 26 00:40:22 2013 -0700
     7.2 +++ b/src/share/vm/oops/cpCache.cpp	Fri Apr 26 08:40:24 2013 -0700
     7.3 @@ -266,7 +266,8 @@
     7.4    // the lock, so that when the losing writer returns, he can use the linked
     7.5    // cache entry.
     7.6  
     7.7 -  MonitorLockerEx ml(cpool->lock());
     7.8 +  oop cplock = cpool->lock();
     7.9 +  ObjectLocker ol(cplock, Thread::current(), cplock != NULL);
    7.10    if (!is_f1_null()) {
    7.11      return;
    7.12    }
     8.1 --- a/src/share/vm/oops/instanceKlass.cpp	Fri Apr 26 00:40:22 2013 -0700
     8.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Fri Apr 26 08:40:24 2013 -0700
     8.3 @@ -54,6 +54,7 @@
     8.4  #include "runtime/javaCalls.hpp"
     8.5  #include "runtime/mutexLocker.hpp"
     8.6  #include "runtime/thread.inline.hpp"
     8.7 +#include "services/classLoadingService.hpp"
     8.8  #include "services/threadService.hpp"
     8.9  #include "utilities/dtrace.hpp"
    8.10  #include "utilities/macros.hpp"
    8.11 @@ -418,25 +419,6 @@
    8.12    set_annotations(NULL);
    8.13  }
    8.14  
    8.15 -volatile oop InstanceKlass::init_lock() const {
    8.16 -  volatile oop lock = _init_lock;  // read once
    8.17 -  assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state
    8.18 -         "only fully initialized state can have a null lock");
    8.19 -  return lock;
    8.20 -}
    8.21 -
    8.22 -// Set the initialization lock to null so the object can be GC'ed.  Any racing
    8.23 -// threads to get this lock will see a null lock and will not lock.
    8.24 -// That's okay because they all check for initialized state after getting
    8.25 -// the lock and return.
    8.26 -void InstanceKlass::fence_and_clear_init_lock() {
    8.27 -  // make sure previous stores are all done, notably the init_state.
    8.28 -  OrderAccess::storestore();
    8.29 -  klass_oop_store(&_init_lock, NULL);
    8.30 -  assert(!is_not_initialized(), "class must be initialized now");
    8.31 -}
    8.32 -
    8.33 -
    8.34  bool InstanceKlass::should_be_initialized() const {
    8.35    return !is_initialized();
    8.36  }
    8.37 @@ -473,7 +455,7 @@
    8.38  void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
    8.39    EXCEPTION_MARK;
    8.40    volatile oop init_lock = this_oop->init_lock();
    8.41 -  ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
    8.42 +  ObjectLocker ol(init_lock, THREAD);
    8.43  
    8.44    // abort if someone beat us to the initialization
    8.45    if (!this_oop->is_not_initialized()) return;  // note: not equivalent to is_initialized()
    8.46 @@ -492,7 +474,6 @@
    8.47    } else {
    8.48      // linking successfull, mark class as initialized
    8.49      this_oop->set_init_state (fully_initialized);
    8.50 -    this_oop->fence_and_clear_init_lock();
    8.51      // trace
    8.52      if (TraceClassInitialization) {
    8.53        ResourceMark rm(THREAD);
    8.54 @@ -619,7 +600,7 @@
    8.55    // verification & rewriting
    8.56    {
    8.57      volatile oop init_lock = this_oop->init_lock();
    8.58 -    ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
    8.59 +    ObjectLocker ol(init_lock, THREAD);
    8.60      // rewritten will have been set if loader constraint error found
    8.61      // on an earlier link attempt
    8.62      // don't verify or rewrite if already rewritten
    8.63 @@ -742,7 +723,7 @@
    8.64    // Step 1
    8.65    {
    8.66      volatile oop init_lock = this_oop->init_lock();
    8.67 -    ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
    8.68 +    ObjectLocker ol(init_lock, THREAD);
    8.69  
    8.70      Thread *self = THREAD; // it's passed the current thread
    8.71  
    8.72 @@ -890,9 +871,8 @@
    8.73  
    8.74  void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
    8.75    volatile oop init_lock = this_oop->init_lock();
    8.76 -  ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
    8.77 +  ObjectLocker ol(init_lock, THREAD);
    8.78    this_oop->set_init_state(state);
    8.79 -  this_oop->fence_and_clear_init_lock();
    8.80    ol.notify_all(CHECK);
    8.81  }
    8.82  
    8.83 @@ -2312,7 +2292,29 @@
    8.84    m->clear_all_breakpoints();
    8.85  }
    8.86  
    8.87 +
    8.88 +void InstanceKlass::notify_unload_class(InstanceKlass* ik) {
    8.89 +  // notify the debugger
    8.90 +  if (JvmtiExport::should_post_class_unload()) {
    8.91 +    JvmtiExport::post_class_unload(ik);
    8.92 +  }
    8.93 +
    8.94 +  // notify ClassLoadingService of class unload
    8.95 +  ClassLoadingService::notify_class_unloaded(ik);
    8.96 +}
    8.97 +
    8.98 +void InstanceKlass::release_C_heap_structures(InstanceKlass* ik) {
    8.99 +  // Clean up C heap
   8.100 +  ik->release_C_heap_structures();
   8.101 +  ik->constants()->release_C_heap_structures();
   8.102 +}
   8.103 +
   8.104  void InstanceKlass::release_C_heap_structures() {
   8.105 +
   8.106 +  // Can't release the constant pool here because the constant pool can be
   8.107 +  // deallocated separately from the InstanceKlass for default methods and
   8.108 +  // redefine classes.
   8.109 +
   8.110    // Deallocate oop map cache
   8.111    if (_oop_map_cache != NULL) {
   8.112      delete _oop_map_cache;
   8.113 @@ -2837,7 +2839,7 @@
   8.114    st->print(BULLET"protection domain: "); ((InstanceKlass*)this)->protection_domain()->print_value_on(st); st->cr();
   8.115    st->print(BULLET"host class:        "); host_klass()->print_value_on_maybe_null(st); st->cr();
   8.116    st->print(BULLET"signers:           "); signers()->print_value_on(st);               st->cr();
   8.117 -  st->print(BULLET"init_lock:         "); ((oop)_init_lock)->print_value_on(st);             st->cr();
   8.118 +  st->print(BULLET"init_lock:         "); ((oop)_init_lock)->print_value_on(st);       st->cr();
   8.119    if (source_file_name() != NULL) {
   8.120      st->print(BULLET"source file:       ");
   8.121      source_file_name()->print_value_on(st);
     9.1 --- a/src/share/vm/oops/instanceKlass.hpp	Fri Apr 26 00:40:22 2013 -0700
     9.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Fri Apr 26 08:40:24 2013 -0700
     9.3 @@ -184,8 +184,9 @@
     9.4    oop             _protection_domain;
     9.5    // Class signers.
     9.6    objArrayOop     _signers;
     9.7 -  // Initialization lock.  Must be one per class and it has to be a VM internal
     9.8 -  // object so java code cannot lock it (like the mirror)
     9.9 +  // Lock for (1) initialization; (2) access to the ConstantPool of this class.
    9.10 +  // Must be one per class and it has to be a VM internal object so java code
    9.11 +  // cannot lock it (like the mirror).
    9.12    // It has to be an object not a Mutex because it's held through java calls.
    9.13    volatile oop    _init_lock;
    9.14  
    9.15 @@ -236,7 +237,7 @@
    9.16      _misc_rewritten            = 1 << 0, // methods rewritten.
    9.17      _misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops
    9.18      _misc_should_verify_class  = 1 << 2, // allow caching of preverification
    9.19 -    _misc_is_anonymous         = 1 << 3, // has embedded _inner_classes field
    9.20 +    _misc_is_anonymous         = 1 << 3, // has embedded _host_klass field
    9.21      _misc_is_contended         = 1 << 4, // marked with contended annotation
    9.22      _misc_has_default_methods  = 1 << 5  // class/superclass/implemented interfaces has default methods
    9.23    };
    9.24 @@ -934,7 +935,9 @@
    9.25    // referenced by handles.
    9.26    bool on_stack() const { return _constants->on_stack(); }
    9.27  
    9.28 -  void release_C_heap_structures();
    9.29 +  // callbacks for actions during class unloading
    9.30 +  static void notify_unload_class(InstanceKlass* ik);
    9.31 +  static void release_C_heap_structures(InstanceKlass* ik);
    9.32  
    9.33    // Parallel Scavenge and Parallel Old
    9.34    PARALLEL_GC_DECLS
    9.35 @@ -968,6 +971,7 @@
    9.36  #endif // INCLUDE_ALL_GCS
    9.37  
    9.38    u2 idnum_allocated_count() const      { return _idnum_allocated_count; }
    9.39 +
    9.40  private:
    9.41    // initialization state
    9.42  #ifdef ASSERT
    9.43 @@ -994,9 +998,10 @@
    9.44           { OrderAccess::release_store_ptr(&_methods_cached_itable_indices, indices); }
    9.45  
    9.46    // Lock during initialization
    9.47 -  volatile oop init_lock() const;
    9.48 -  void set_init_lock(oop value)      { klass_oop_store(&_init_lock, value); }
    9.49 -  void fence_and_clear_init_lock();  // after fully_initialized
    9.50 +public:
    9.51 +  volatile oop init_lock() const     {return _init_lock; }
    9.52 +private:
    9.53 +  void set_init_lock(oop value) { klass_oop_store(&_init_lock, value); }
    9.54  
    9.55    // Offsets for memory management
    9.56    oop* adr_protection_domain() const { return (oop*)&this->_protection_domain;}
    9.57 @@ -1022,6 +1027,8 @@
    9.58    // Returns the array class with this class as element type
    9.59    Klass* array_klass_impl(bool or_null, TRAPS);
    9.60  
    9.61 +  // Free CHeap allocated fields.
    9.62 +  void release_C_heap_structures();
    9.63  public:
    9.64    // CDS support - remove and restore oops from metadata. Oops are not shared.
    9.65    virtual void remove_unshareable_info();
    10.1 --- a/src/share/vm/prims/jvmtiEnv.cpp	Fri Apr 26 00:40:22 2013 -0700
    10.2 +++ b/src/share/vm/prims/jvmtiEnv.cpp	Fri Apr 26 08:40:24 2013 -0700
    10.3 @@ -259,7 +259,8 @@
    10.4        // bytes to the InstanceKlass here because they have not been
    10.5        // validated and we're not at a safepoint.
    10.6        constantPoolHandle  constants(current_thread, ikh->constants());
    10.7 -      MonitorLockerEx ml(constants->lock());    // lock constant pool while we query it
    10.8 +      oop cplock = constants->lock();
    10.9 +      ObjectLocker ol(cplock, current_thread, cplock != NULL);    // lock constant pool while we query it
   10.10  
   10.11        JvmtiClassFileReconstituter reconstituter(ikh);
   10.12        if (reconstituter.get_error() != JVMTI_ERROR_NONE) {
   10.13 @@ -2417,7 +2418,8 @@
   10.14  
   10.15    instanceKlassHandle ikh(thread, k_oop);
   10.16    constantPoolHandle  constants(thread, ikh->constants());
   10.17 -  MonitorLockerEx ml(constants->lock());    // lock constant pool while we query it
   10.18 +  oop cplock = constants->lock();
   10.19 +  ObjectLocker ol(cplock, thread, cplock != NULL);    // lock constant pool while we query it
   10.20  
   10.21    JvmtiConstantPoolReconstituter reconstituter(ikh);
   10.22    if (reconstituter.get_error() != JVMTI_ERROR_NONE) {
    11.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Apr 26 00:40:22 2013 -0700
    11.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Apr 26 08:40:24 2013 -0700
    11.3 @@ -415,20 +415,26 @@
    11.4      // this is an indirect CP entry so it needs special handling
    11.5      case JVM_CONSTANT_InvokeDynamic:
    11.6      {
    11.7 -      // TBD: cross-checks and possible extra appends into CP and bsm operands
    11.8 -      // are needed as well. This issue is tracked by a separate bug 8007037.
    11.9 -      int bss_idx = scratch_cp->invoke_dynamic_bootstrap_specifier_index(scratch_i);
   11.10 -
   11.11 -      int ref_i = scratch_cp->invoke_dynamic_name_and_type_ref_index_at(scratch_i);
   11.12 -      int new_ref_i = find_or_append_indirect_entry(scratch_cp, ref_i, merge_cp_p,
   11.13 +      // Index of the bootstrap specifier in the operands array
   11.14 +      int old_bs_i = scratch_cp->invoke_dynamic_bootstrap_specifier_index(scratch_i);
   11.15 +      int new_bs_i = find_or_append_operand(scratch_cp, old_bs_i, merge_cp_p,
   11.16 +                                            merge_cp_length_p, THREAD);
   11.17 +      // The bootstrap method NameAndType_info index
   11.18 +      int old_ref_i = scratch_cp->invoke_dynamic_name_and_type_ref_index_at(scratch_i);
   11.19 +      int new_ref_i = find_or_append_indirect_entry(scratch_cp, old_ref_i, merge_cp_p,
   11.20                                                      merge_cp_length_p, THREAD);
   11.21 -      if (new_ref_i != ref_i) {
   11.22 +      if (new_bs_i != old_bs_i) {
   11.23          RC_TRACE(0x00080000,
   11.24 -                 ("InvokeDynamic entry@%d name_and_type ref_index change: %d to %d",
   11.25 -                  *merge_cp_length_p, ref_i, new_ref_i));
   11.26 +                 ("InvokeDynamic entry@%d bootstrap_method_attr_index change: %d to %d",
   11.27 +                  *merge_cp_length_p, old_bs_i, new_bs_i));
   11.28        }
   11.29 -
   11.30 -      (*merge_cp_p)->invoke_dynamic_at_put(*merge_cp_length_p, bss_idx, new_ref_i);
   11.31 +      if (new_ref_i != old_ref_i) {
   11.32 +        RC_TRACE(0x00080000,
   11.33 +                 ("InvokeDynamic entry@%d name_and_type_index change: %d to %d",
   11.34 +                  *merge_cp_length_p, old_ref_i, new_ref_i));
   11.35 +      }
   11.36 +
   11.37 +      (*merge_cp_p)->invoke_dynamic_at_put(*merge_cp_length_p, new_bs_i, new_ref_i);
   11.38        if (scratch_i != *merge_cp_length_p) {
   11.39          // The new entry in *merge_cp_p is at a different index than
   11.40          // the new entry in scratch_cp so we need to map the index values.
   11.41 @@ -492,6 +498,105 @@
   11.42  } // end find_or_append_indirect_entry()
   11.43  
   11.44  
   11.45 +// Append a bootstrap specifier into the merge_cp operands that is semantically equal
   11.46 +// to the scratch_cp operands bootstrap specifier passed by the old_bs_i index.
   11.47 +// Recursively append new merge_cp entries referenced by the new bootstrap specifier.
   11.48 +void VM_RedefineClasses::append_operand(constantPoolHandle scratch_cp, int old_bs_i,
   11.49 +       constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS) {
   11.50 +
   11.51 +  int old_ref_i = scratch_cp->operand_bootstrap_method_ref_index_at(old_bs_i);
   11.52 +  int new_ref_i = find_or_append_indirect_entry(scratch_cp, old_ref_i, merge_cp_p,
   11.53 +                                                merge_cp_length_p, THREAD);
   11.54 +  if (new_ref_i != old_ref_i) {
   11.55 +    RC_TRACE(0x00080000,
   11.56 +             ("operands entry@%d bootstrap method ref_index change: %d to %d",
   11.57 +              _operands_cur_length, old_ref_i, new_ref_i));
   11.58 +  }
   11.59 +
   11.60 +  Array<u2>* merge_ops = (*merge_cp_p)->operands();
   11.61 +  int new_bs_i = _operands_cur_length;
   11.62 +  // We have _operands_cur_length == 0 when the merge_cp operands is empty yet.
   11.63 +  // However, the operand_offset_at(0) was set in the extend_operands() call.
   11.64 +  int new_base = (new_bs_i == 0) ? (*merge_cp_p)->operand_offset_at(0)
   11.65 +                                 : (*merge_cp_p)->operand_next_offset_at(new_bs_i - 1);
   11.66 +  int argc     = scratch_cp->operand_argument_count_at(old_bs_i);
   11.67 +
   11.68 +  ConstantPool::operand_offset_at_put(merge_ops, _operands_cur_length, new_base);
   11.69 +  merge_ops->at_put(new_base++, new_ref_i);
   11.70 +  merge_ops->at_put(new_base++, argc);
   11.71 +
   11.72 +  for (int i = 0; i < argc; i++) {
   11.73 +    int old_arg_ref_i = scratch_cp->operand_argument_index_at(old_bs_i, i);
   11.74 +    int new_arg_ref_i = find_or_append_indirect_entry(scratch_cp, old_arg_ref_i, merge_cp_p,
   11.75 +                                                      merge_cp_length_p, THREAD);
   11.76 +    merge_ops->at_put(new_base++, new_arg_ref_i);
   11.77 +    if (new_arg_ref_i != old_arg_ref_i) {
   11.78 +      RC_TRACE(0x00080000,
   11.79 +               ("operands entry@%d bootstrap method argument ref_index change: %d to %d",
   11.80 +                _operands_cur_length, old_arg_ref_i, new_arg_ref_i));
   11.81 +    }
   11.82 +  }
   11.83 +  if (old_bs_i != _operands_cur_length) {
   11.84 +    // The bootstrap specifier in *merge_cp_p is at a different index than
   11.85 +    // that in scratch_cp so we need to map the index values.
   11.86 +    map_operand_index(old_bs_i, new_bs_i);
   11.87 +  }
   11.88 +  _operands_cur_length++;
   11.89 +} // end append_operand()
   11.90 +
   11.91 +
   11.92 +int VM_RedefineClasses::find_or_append_operand(constantPoolHandle scratch_cp,
   11.93 +      int old_bs_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS) {
   11.94 +
   11.95 +  int new_bs_i = old_bs_i; // bootstrap specifier index
   11.96 +  bool match = (old_bs_i < _operands_cur_length) &&
   11.97 +               scratch_cp->compare_operand_to(old_bs_i, *merge_cp_p, old_bs_i, THREAD);
   11.98 +
   11.99 +  if (!match) {
  11.100 +    // forward reference in *merge_cp_p or not a direct match
  11.101 +    int found_i = scratch_cp->find_matching_operand(old_bs_i, *merge_cp_p,
  11.102 +                                                    _operands_cur_length, THREAD);
  11.103 +    if (found_i != -1) {
  11.104 +      guarantee(found_i != old_bs_i, "compare_operand_to() and find_matching_operand() disagree");
  11.105 +      // found a matching operand somewhere else in *merge_cp_p so just need a mapping
  11.106 +      new_bs_i = found_i;
  11.107 +      map_operand_index(old_bs_i, found_i);
  11.108 +    } else {
  11.109 +      // no match found so we have to append this bootstrap specifier to *merge_cp_p
  11.110 +      append_operand(scratch_cp, old_bs_i, merge_cp_p, merge_cp_length_p, THREAD);
  11.111 +      new_bs_i = _operands_cur_length - 1;
  11.112 +    }
  11.113 +  }
  11.114 +  return new_bs_i;
  11.115 +} // end find_or_append_operand()
  11.116 +
  11.117 +
  11.118 +void VM_RedefineClasses::finalize_operands_merge(constantPoolHandle merge_cp, TRAPS) {
  11.119 +  if (merge_cp->operands() == NULL) {
  11.120 +    return;
  11.121 +  }
  11.122 +  // Shrink the merge_cp operands
  11.123 +  merge_cp->shrink_operands(_operands_cur_length, CHECK);
  11.124 +
  11.125 +  if (RC_TRACE_ENABLED(0x00040000)) {
  11.126 +    // don't want to loop unless we are tracing
  11.127 +    int count = 0;
  11.128 +    for (int i = 1; i < _operands_index_map_p->length(); i++) {
  11.129 +      int value = _operands_index_map_p->at(i);
  11.130 +      if (value != -1) {
  11.131 +        RC_TRACE_WITH_THREAD(0x00040000, THREAD,
  11.132 +          ("operands_index_map[%d]: old=%d new=%d", count, i, value));
  11.133 +        count++;
  11.134 +      }
  11.135 +    }
  11.136 +  }
  11.137 +  // Clean-up
  11.138 +  _operands_index_map_p = NULL;
  11.139 +  _operands_cur_length = 0;
  11.140 +  _operands_index_map_count = 0;
  11.141 +} // end finalize_operands_merge()
  11.142 +
  11.143 +
  11.144  jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions(
  11.145               instanceKlassHandle the_class,
  11.146               instanceKlassHandle scratch_class) {
  11.147 @@ -765,6 +870,31 @@
  11.148  } // end find_new_index()
  11.149  
  11.150  
  11.151 +// Find new bootstrap specifier index value for old bootstrap specifier index
  11.152 +// value by seaching the index map. Returns unused index (-1) if there is
  11.153 +// no mapped value for the old bootstrap specifier index.
  11.154 +int VM_RedefineClasses::find_new_operand_index(int old_index) {
  11.155 +  if (_operands_index_map_count == 0) {
  11.156 +    // map is empty so nothing can be found
  11.157 +    return -1;
  11.158 +  }
  11.159 +
  11.160 +  if (old_index == -1 || old_index >= _operands_index_map_p->length()) {
  11.161 +    // The old_index is out of range so it is not mapped.
  11.162 +    // This should not happen in regular constant pool merging use.
  11.163 +    return -1;
  11.164 +  }
  11.165 +
  11.166 +  int value = _operands_index_map_p->at(old_index);
  11.167 +  if (value == -1) {
  11.168 +    // the old_index is not mapped
  11.169 +    return -1;
  11.170 +  }
  11.171 +
  11.172 +  return value;
  11.173 +} // end find_new_operand_index()
  11.174 +
  11.175 +
  11.176  // Returns true if the current mismatch is due to a resolved/unresolved
  11.177  // class pair. Otherwise, returns false.
  11.178  bool VM_RedefineClasses::is_unresolved_class_mismatch(constantPoolHandle cp1,
  11.179 @@ -1014,6 +1144,25 @@
  11.180  } // end map_index()
  11.181  
  11.182  
  11.183 +// Map old_index to new_index as needed.
  11.184 +void VM_RedefineClasses::map_operand_index(int old_index, int new_index) {
  11.185 +  if (find_new_operand_index(old_index) != -1) {
  11.186 +    // old_index is already mapped
  11.187 +    return;
  11.188 +  }
  11.189 +
  11.190 +  if (old_index == new_index) {
  11.191 +    // no mapping is needed
  11.192 +    return;
  11.193 +  }
  11.194 +
  11.195 +  _operands_index_map_p->at_put(old_index, new_index);
  11.196 +  _operands_index_map_count++;
  11.197 +
  11.198 +  RC_TRACE(0x00040000, ("mapped bootstrap specifier at index %d to %d", old_index, new_index));
  11.199 +} // end map_index()
  11.200 +
  11.201 +
  11.202  // Merge old_cp and scratch_cp and return the results of the merge via
  11.203  // merge_cp_p. The number of entries in *merge_cp_p is returned via
  11.204  // merge_cp_length_p. The entries in old_cp occupy the same locations
  11.205 @@ -1086,6 +1235,7 @@
  11.206      } // end for each old_cp entry
  11.207  
  11.208      ConstantPool::copy_operands(old_cp, *merge_cp_p, CHECK_0);
  11.209 +    (*merge_cp_p)->extend_operands(scratch_cp, CHECK_0);
  11.210  
  11.211      // We don't need to sanity check that *merge_cp_length_p is within
  11.212      // *merge_cp_p bounds since we have the minimum on-entry check above.
  11.213 @@ -1198,6 +1348,8 @@
  11.214          CHECK_0);
  11.215      }
  11.216  
  11.217 +    finalize_operands_merge(*merge_cp_p, THREAD);
  11.218 +
  11.219      RC_TRACE_WITH_THREAD(0x00020000, THREAD,
  11.220        ("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d",
  11.221        *merge_cp_length_p, scratch_i, _index_map_count));
  11.222 @@ -1270,6 +1422,11 @@
  11.223    _index_map_count = 0;
  11.224    _index_map_p = new intArray(scratch_cp->length(), -1);
  11.225  
  11.226 +  _operands_cur_length = ConstantPool::operand_array_length(old_cp->operands());
  11.227 +  _operands_index_map_count = 0;
  11.228 +  _operands_index_map_p = new intArray(
  11.229 +    ConstantPool::operand_array_length(scratch_cp->operands()), -1);
  11.230 +
  11.231    // reference to the cp holder is needed for copy_operands()
  11.232    merge_cp->set_pool_holder(scratch_class());
  11.233    bool result = merge_constant_pools(old_cp, scratch_cp, &merge_cp,
  11.234 @@ -1400,7 +1557,6 @@
  11.235    return true;
  11.236  } // end rewrite_cp_refs()
  11.237  
  11.238 -
  11.239  // Rewrite constant pool references in the methods.
  11.240  bool VM_RedefineClasses::rewrite_cp_refs_in_methods(
  11.241         instanceKlassHandle scratch_class, TRAPS) {
    12.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.hpp	Fri Apr 26 00:40:22 2013 -0700
    12.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp	Fri Apr 26 08:40:24 2013 -0700
    12.3 @@ -359,8 +359,15 @@
    12.4    // _index_map_p contains any entries.
    12.5    int                         _index_map_count;
    12.6    intArray *                  _index_map_p;
    12.7 +
    12.8 +  // _operands_index_map_count is just an optimization for knowing if
    12.9 +  // _operands_index_map_p contains any entries.
   12.10 +  int                         _operands_cur_length;
   12.11 +  int                         _operands_index_map_count;
   12.12 +  intArray *                  _operands_index_map_p;
   12.13 +
   12.14    // ptr to _class_count scratch_classes
   12.15 -  Klass**                   _scratch_classes;
   12.16 +  Klass**                     _scratch_classes;
   12.17    jvmtiError                  _res;
   12.18  
   12.19    // Performance measurement support. These timers do not cover all
   12.20 @@ -422,12 +429,19 @@
   12.21    // Support for constant pool merging (these routines are in alpha order):
   12.22    void append_entry(constantPoolHandle scratch_cp, int scratch_i,
   12.23      constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS);
   12.24 +  void append_operand(constantPoolHandle scratch_cp, int scratch_bootstrap_spec_index,
   12.25 +    constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS);
   12.26 +  void finalize_operands_merge(constantPoolHandle merge_cp, TRAPS);
   12.27    int find_or_append_indirect_entry(constantPoolHandle scratch_cp, int scratch_i,
   12.28      constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS);
   12.29 +  int find_or_append_operand(constantPoolHandle scratch_cp, int scratch_bootstrap_spec_index,
   12.30 +    constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS);
   12.31    int find_new_index(int old_index);
   12.32 +  int find_new_operand_index(int old_bootstrap_spec_index);
   12.33    bool is_unresolved_class_mismatch(constantPoolHandle cp1, int index1,
   12.34      constantPoolHandle cp2, int index2);
   12.35    void map_index(constantPoolHandle scratch_cp, int old_index, int new_index);
   12.36 +  void map_operand_index(int old_bootstrap_spec_index, int new_bootstrap_spec_index);
   12.37    bool merge_constant_pools(constantPoolHandle old_cp,
   12.38      constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p,
   12.39      int *merge_cp_length_p, TRAPS);
    13.1 --- a/src/share/vm/services/memBaseline.cpp	Fri Apr 26 00:40:22 2013 -0700
    13.2 +++ b/src/share/vm/services/memBaseline.cpp	Fri Apr 26 08:40:24 2013 -0700
    13.3 @@ -1,5 +1,5 @@
    13.4  /*
    13.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    13.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    13.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.8   *
    13.9   * This code is free software; you can redistribute it and/or modify it
   13.10 @@ -23,9 +23,12 @@
   13.11   */
   13.12  #include "precompiled.hpp"
   13.13  #include "memory/allocation.hpp"
   13.14 +#include "runtime/safepoint.hpp"
   13.15 +#include "runtime/thread.inline.hpp"
   13.16  #include "services/memBaseline.hpp"
   13.17  #include "services/memTracker.hpp"
   13.18  
   13.19 +
   13.20  MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
   13.21    {mtJavaHeap,   "Java Heap"},
   13.22    {mtClass,      "Class"},
   13.23 @@ -149,6 +152,14 @@
   13.24    return true;
   13.25  }
   13.26  
   13.27 +// check if there is a safepoint in progress, if so, block the thread
   13.28 +// for the safepoint
   13.29 +void MemBaseline::check_safepoint(JavaThread* thr) {
   13.30 +  if (SafepointSynchronize::is_synchronizing()) {
   13.31 +    SafepointSynchronize::block(thr);
   13.32 +  }
   13.33 +}
   13.34 +
   13.35  // baseline mmap'd memory records, generate overall summary and summaries by
   13.36  // memory types
   13.37  bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
   13.38 @@ -306,7 +317,7 @@
   13.39          committed_rec->pc() != vm_ptr->pc()) {
   13.40          if (!_vm_map->append(vm_ptr)) {
   13.41            return false;
   13.42 -  }
   13.43 +        }
   13.44          committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
   13.45      } else {
   13.46          committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
   13.47 @@ -344,16 +355,27 @@
   13.48  
   13.49  // baseline a snapshot. If summary_only = false, memory usages aggregated by
   13.50  // callsites are also baselined.
   13.51 +// The method call can be lengthy, especially when detail tracking info is
   13.52 +// requested. So the method checks for safepoint explicitly.
   13.53  bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
   13.54 -  MutexLockerEx snapshot_locker(snapshot._lock, true);
   13.55 +  Thread* THREAD = Thread::current();
   13.56 +  assert(THREAD->is_Java_thread(), "must be a JavaThread");
   13.57 +  MutexLocker snapshot_locker(snapshot._lock);
   13.58    reset();
   13.59 -  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
   13.60 -               baseline_vm_summary(snapshot._vm_ptrs);
   13.61 +  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
   13.62 +  if (_baselined) {
   13.63 +    check_safepoint((JavaThread*)THREAD);
   13.64 +    _baselined = baseline_vm_summary(snapshot._vm_ptrs);
   13.65 +  }
   13.66    _number_of_classes = snapshot.number_of_classes();
   13.67  
   13.68    if (!summary_only && MemTracker::track_callsite() && _baselined) {
   13.69 -    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs) &&
   13.70 -      baseline_vm_details(snapshot._vm_ptrs);
   13.71 +    check_safepoint((JavaThread*)THREAD);
   13.72 +    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs);
   13.73 +    if (_baselined) {
   13.74 +      check_safepoint((JavaThread*)THREAD);
   13.75 +      _baselined =  baseline_vm_details(snapshot._vm_ptrs);
   13.76 +    }
   13.77    }
   13.78    return _baselined;
   13.79  }
    14.1 --- a/src/share/vm/services/memBaseline.hpp	Fri Apr 26 00:40:22 2013 -0700
    14.2 +++ b/src/share/vm/services/memBaseline.hpp	Fri Apr 26 08:40:24 2013 -0700
    14.3 @@ -1,5 +1,5 @@
    14.4  /*
    14.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    14.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    14.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.8   *
    14.9   * This code is free software; you can redistribute it and/or modify it
   14.10 @@ -330,6 +330,9 @@
   14.11    // should not use copy constructor
   14.12    MemBaseline(MemBaseline& copy) { ShouldNotReachHere(); }
   14.13  
   14.14 +  // check and block at a safepoint
   14.15 +  static inline void check_safepoint(JavaThread* thr);
   14.16 +
   14.17   public:
   14.18    // create a memory baseline
   14.19    MemBaseline();
    15.1 --- a/src/share/vm/services/memTracker.cpp	Fri Apr 26 00:40:22 2013 -0700
    15.2 +++ b/src/share/vm/services/memTracker.cpp	Fri Apr 26 08:40:24 2013 -0700
    15.3 @@ -573,7 +573,7 @@
    15.4  
    15.5  // baseline current memory snapshot
    15.6  bool MemTracker::baseline() {
    15.7 -  MutexLockerEx lock(_query_lock, true);
    15.8 +  MutexLocker lock(_query_lock);
    15.9    MemSnapshot* snapshot = get_snapshot();
   15.10    if (snapshot != NULL) {
   15.11      return _baseline.baseline(*snapshot, false);
   15.12 @@ -584,7 +584,7 @@
   15.13  // print memory usage from current snapshot
   15.14  bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
   15.15    MemBaseline  baseline;
   15.16 -  MutexLockerEx lock(_query_lock, true);
   15.17 +  MutexLocker  lock(_query_lock);
   15.18    MemSnapshot* snapshot = get_snapshot();
   15.19    if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
   15.20      BaselineReporter reporter(out, unit);
   15.21 @@ -597,7 +597,7 @@
   15.22  // Whitebox API for blocking until the current generation of NMT data has been merged
   15.23  bool MemTracker::wbtest_wait_for_data_merge() {
   15.24    // NMT can't be shutdown while we're holding _query_lock
   15.25 -  MutexLockerEx lock(_query_lock, true);
   15.26 +  MutexLocker lock(_query_lock);
   15.27    assert(_worker_thread != NULL, "Invalid query");
   15.28    // the generation at query time, so NMT will spin till this generation is processed
   15.29    unsigned long generation_at_query_time = SequenceGenerator::current_generation();
   15.30 @@ -641,7 +641,7 @@
   15.31  
   15.32  // compare memory usage between current snapshot and baseline
   15.33  bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
   15.34 -  MutexLockerEx lock(_query_lock, true);
   15.35 +  MutexLocker lock(_query_lock);
   15.36    if (_baseline.baselined()) {
   15.37      MemBaseline baseline;
   15.38      MemSnapshot* snapshot = get_snapshot();

mercurial