src/share/vm/oops/instanceKlass.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2658
c7f3d0b4570f
child 2690
2cd0180da6e1
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/javaClasses.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "classfile/verifier.hpp"
    29 #include "classfile/vmSymbols.hpp"
    30 #include "compiler/compileBroker.hpp"
    31 #include "gc_implementation/shared/markSweep.inline.hpp"
    32 #include "gc_interface/collectedHeap.inline.hpp"
    33 #include "interpreter/oopMapCache.hpp"
    34 #include "interpreter/rewriter.hpp"
    35 #include "jvmtifiles/jvmti.h"
    36 #include "memory/genOopClosures.inline.hpp"
    37 #include "memory/oopFactory.hpp"
    38 #include "memory/permGen.hpp"
    39 #include "oops/instanceKlass.hpp"
    40 #include "oops/instanceMirrorKlass.hpp"
    41 #include "oops/instanceOop.hpp"
    42 #include "oops/methodOop.hpp"
    43 #include "oops/objArrayKlassKlass.hpp"
    44 #include "oops/oop.inline.hpp"
    45 #include "oops/symbol.hpp"
    46 #include "prims/jvmtiExport.hpp"
    47 #include "prims/jvmtiRedefineClassesTrace.hpp"
    48 #include "runtime/fieldDescriptor.hpp"
    49 #include "runtime/handles.inline.hpp"
    50 #include "runtime/javaCalls.hpp"
    51 #include "runtime/mutexLocker.hpp"
    52 #include "services/threadService.hpp"
    53 #include "utilities/dtrace.hpp"
    54 #ifdef TARGET_OS_FAMILY_linux
    55 # include "thread_linux.inline.hpp"
    56 #endif
    57 #ifdef TARGET_OS_FAMILY_solaris
    58 # include "thread_solaris.inline.hpp"
    59 #endif
    60 #ifdef TARGET_OS_FAMILY_windows
    61 # include "thread_windows.inline.hpp"
    62 #endif
    63 #ifndef SERIALGC
    64 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    65 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    66 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    67 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    68 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
    69 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
    70 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
    71 #include "oops/oop.pcgc.inline.hpp"
    72 #endif
    73 #ifdef COMPILER1
    74 #include "c1/c1_Compiler.hpp"
    75 #endif
    77 #ifdef DTRACE_ENABLED
    79 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
    80   char*, intptr_t, oop, intptr_t);
    81 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
    82   char*, intptr_t, oop, intptr_t, int);
    83 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
    84   char*, intptr_t, oop, intptr_t, int);
    85 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
    86   char*, intptr_t, oop, intptr_t, int);
    87 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
    88   char*, intptr_t, oop, intptr_t, int);
    89 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
    90   char*, intptr_t, oop, intptr_t, int);
    91 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
    92   char*, intptr_t, oop, intptr_t, int);
    93 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
    94   char*, intptr_t, oop, intptr_t, int);
    96 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)          \
    97   {                                                              \
    98     char* data = NULL;                                           \
    99     int len = 0;                                                 \
   100     Symbol* name = (clss)->name();                               \
   101     if (name != NULL) {                                          \
   102       data = (char*)name->bytes();                               \
   103       len = name->utf8_length();                                 \
   104     }                                                            \
   105     HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
   106       data, len, (clss)->class_loader(), thread_type);           \
   107   }
   109 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
   110   {                                                              \
   111     char* data = NULL;                                           \
   112     int len = 0;                                                 \
   113     Symbol* name = (clss)->name();                               \
   114     if (name != NULL) {                                          \
   115       data = (char*)name->bytes();                               \
   116       len = name->utf8_length();                                 \
   117     }                                                            \
   118     HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
   119       data, len, (clss)->class_loader(), thread_type, wait);     \
   120   }
   122 #else //  ndef DTRACE_ENABLED
   124 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)
   125 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait)
   127 #endif //  ndef DTRACE_ENABLED
   129 bool instanceKlass::should_be_initialized() const {
   130   return !is_initialized();
   131 }
   133 klassVtable* instanceKlass::vtable() const {
   134   return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size());
   135 }
   137 klassItable* instanceKlass::itable() const {
   138   return new klassItable(as_klassOop());
   139 }
   141 void instanceKlass::eager_initialize(Thread *thread) {
   142   if (!EagerInitialization) return;
   144   if (this->is_not_initialized()) {
   145     // abort if the the class has a class initializer
   146     if (this->class_initializer() != NULL) return;
   148     // abort if it is java.lang.Object (initialization is handled in genesis)
   149     klassOop super = this->super();
   150     if (super == NULL) return;
   152     // abort if the super class should be initialized
   153     if (!instanceKlass::cast(super)->is_initialized()) return;
   155     // call body to expose the this pointer
   156     instanceKlassHandle this_oop(thread, this->as_klassOop());
   157     eager_initialize_impl(this_oop);
   158   }
   159 }
   162 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
   163   EXCEPTION_MARK;
   164   ObjectLocker ol(this_oop, THREAD);
   166   // abort if someone beat us to the initialization
   167   if (!this_oop->is_not_initialized()) return;  // note: not equivalent to is_initialized()
   169   ClassState old_state = this_oop->_init_state;
   170   link_class_impl(this_oop, true, THREAD);
   171   if (HAS_PENDING_EXCEPTION) {
   172     CLEAR_PENDING_EXCEPTION;
   173     // Abort if linking the class throws an exception.
   175     // Use a test to avoid redundantly resetting the state if there's
   176     // no change.  Set_init_state() asserts that state changes make
   177     // progress, whereas here we might just be spinning in place.
   178     if( old_state != this_oop->_init_state )
   179       this_oop->set_init_state (old_state);
   180   } else {
   181     // linking successfull, mark class as initialized
   182     this_oop->set_init_state (fully_initialized);
   183     // trace
   184     if (TraceClassInitialization) {
   185       ResourceMark rm(THREAD);
   186       tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
   187     }
   188   }
   189 }
   192 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
   193 // process. The step comments refers to the procedure described in that section.
   194 // Note: implementation moved to static method to expose the this pointer.
   195 void instanceKlass::initialize(TRAPS) {
   196   if (this->should_be_initialized()) {
   197     HandleMark hm(THREAD);
   198     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
   199     initialize_impl(this_oop, CHECK);
   200     // Note: at this point the class may be initialized
   201     //       OR it may be in the state of being initialized
   202     //       in case of recursive initialization!
   203   } else {
   204     assert(is_initialized(), "sanity check");
   205   }
   206 }
   209 bool instanceKlass::verify_code(
   210     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
   211   // 1) Verify the bytecodes
   212   Verifier::Mode mode =
   213     throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
   214   return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
   215 }
   218 // Used exclusively by the shared spaces dump mechanism to prevent
   219 // classes mapped into the shared regions in new VMs from appearing linked.
   221 void instanceKlass::unlink_class() {
   222   assert(is_linked(), "must be linked");
   223   _init_state = loaded;
   224 }
   226 void instanceKlass::link_class(TRAPS) {
   227   assert(is_loaded(), "must be loaded");
   228   if (!is_linked()) {
   229     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
   230     link_class_impl(this_oop, true, CHECK);
   231   }
   232 }
   234 // Called to verify that a class can link during initialization, without
   235 // throwing a VerifyError.
   236 bool instanceKlass::link_class_or_fail(TRAPS) {
   237   assert(is_loaded(), "must be loaded");
   238   if (!is_linked()) {
   239     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
   240     link_class_impl(this_oop, false, CHECK_false);
   241   }
   242   return is_linked();
   243 }
   245 bool instanceKlass::link_class_impl(
   246     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
   247   // check for error state
   248   if (this_oop->is_in_error_state()) {
   249     ResourceMark rm(THREAD);
   250     THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
   251                this_oop->external_name(), false);
   252   }
   253   // return if already verified
   254   if (this_oop->is_linked()) {
   255     return true;
   256   }
   258   // Timing
   259   // timer handles recursion
   260   assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
   261   JavaThread* jt = (JavaThread*)THREAD;
   263   // link super class before linking this class
   264   instanceKlassHandle super(THREAD, this_oop->super());
   265   if (super.not_null()) {
   266     if (super->is_interface()) {  // check if super class is an interface
   267       ResourceMark rm(THREAD);
   268       Exceptions::fthrow(
   269         THREAD_AND_LOCATION,
   270         vmSymbols::java_lang_IncompatibleClassChangeError(),
   271         "class %s has interface %s as super class",
   272         this_oop->external_name(),
   273         super->external_name()
   274       );
   275       return false;
   276     }
   278     link_class_impl(super, throw_verifyerror, CHECK_false);
   279   }
   281   // link all interfaces implemented by this class before linking this class
   282   objArrayHandle interfaces (THREAD, this_oop->local_interfaces());
   283   int num_interfaces = interfaces->length();
   284   for (int index = 0; index < num_interfaces; index++) {
   285     HandleMark hm(THREAD);
   286     instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index)));
   287     link_class_impl(ih, throw_verifyerror, CHECK_false);
   288   }
   290   // in case the class is linked in the process of linking its superclasses
   291   if (this_oop->is_linked()) {
   292     return true;
   293   }
   295   // trace only the link time for this klass that includes
   296   // the verification time
   297   PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(),
   298                              ClassLoader::perf_class_link_selftime(),
   299                              ClassLoader::perf_classes_linked(),
   300                              jt->get_thread_stat()->perf_recursion_counts_addr(),
   301                              jt->get_thread_stat()->perf_timers_addr(),
   302                              PerfClassTraceTime::CLASS_LINK);
   304   // verification & rewriting
   305   {
   306     ObjectLocker ol(this_oop, THREAD);
   307     // rewritten will have been set if loader constraint error found
   308     // on an earlier link attempt
   309     // don't verify or rewrite if already rewritten
   310     if (!this_oop->is_linked()) {
   311       if (!this_oop->is_rewritten()) {
   312         {
   313           // Timer includes any side effects of class verification (resolution,
   314           // etc), but not recursive entry into verify_code().
   315           PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
   316                                    ClassLoader::perf_class_verify_selftime(),
   317                                    ClassLoader::perf_classes_verified(),
   318                                    jt->get_thread_stat()->perf_recursion_counts_addr(),
   319                                    jt->get_thread_stat()->perf_timers_addr(),
   320                                    PerfClassTraceTime::CLASS_VERIFY);
   321           bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
   322           if (!verify_ok) {
   323             return false;
   324           }
   325         }
   327         // Just in case a side-effect of verify linked this class already
   328         // (which can sometimes happen since the verifier loads classes
   329         // using custom class loaders, which are free to initialize things)
   330         if (this_oop->is_linked()) {
   331           return true;
   332         }
   334         // also sets rewritten
   335         this_oop->rewrite_class(CHECK_false);
   336       }
   338       // Initialize the vtable and interface table after
   339       // methods have been rewritten since rewrite may
   340       // fabricate new methodOops.
   341       // also does loader constraint checking
   342       if (!this_oop()->is_shared()) {
   343         ResourceMark rm(THREAD);
   344         this_oop->vtable()->initialize_vtable(true, CHECK_false);
   345         this_oop->itable()->initialize_itable(true, CHECK_false);
   346       }
   347 #ifdef ASSERT
   348       else {
   349         ResourceMark rm(THREAD);
   350         this_oop->vtable()->verify(tty, true);
   351         // In case itable verification is ever added.
   352         // this_oop->itable()->verify(tty, true);
   353       }
   354 #endif
   355       this_oop->set_init_state(linked);
   356       if (JvmtiExport::should_post_class_prepare()) {
   357         Thread *thread = THREAD;
   358         assert(thread->is_Java_thread(), "thread->is_Java_thread()");
   359         JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
   360       }
   361     }
   362   }
   363   return true;
   364 }
   367 // Rewrite the byte codes of all of the methods of a class.
   368 // Three cases:
   369 //    During the link of a newly loaded class.
   370 //    During the preloading of classes to be written to the shared spaces.
   371 //      - Rewrite the methods and update the method entry points.
   372 //
   373 //    During the link of a class in the shared spaces.
   374 //      - The methods were already rewritten, update the metho entry points.
   375 //
   376 // The rewriter must be called exactly once. Rewriting must happen after
   377 // verification but before the first method of the class is executed.
   379 void instanceKlass::rewrite_class(TRAPS) {
   380   assert(is_loaded(), "must be loaded");
   381   instanceKlassHandle this_oop(THREAD, this->as_klassOop());
   382   if (this_oop->is_rewritten()) {
   383     assert(this_oop()->is_shared(), "rewriting an unshared class?");
   384     return;
   385   }
   386   Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
   387   this_oop->set_rewritten();
   388 }
   391 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
   392   // Make sure klass is linked (verified) before initialization
   393   // A class could already be verified, since it has been reflected upon.
   394   this_oop->link_class(CHECK);
   396   DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1);
   398   bool wait = false;
   400   // refer to the JVM book page 47 for description of steps
   401   // Step 1
   402   { ObjectLocker ol(this_oop, THREAD);
   404     Thread *self = THREAD; // it's passed the current thread
   406     // Step 2
   407     // If we were to use wait() instead of waitInterruptibly() then
   408     // we might end up throwing IE from link/symbol resolution sites
   409     // that aren't expected to throw.  This would wreak havoc.  See 6320309.
   410     while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
   411         wait = true;
   412       ol.waitUninterruptibly(CHECK);
   413     }
   415     // Step 3
   416     if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) {
   417       DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait);
   418       return;
   419     }
   421     // Step 4
   422     if (this_oop->is_initialized()) {
   423       DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait);
   424       return;
   425     }
   427     // Step 5
   428     if (this_oop->is_in_error_state()) {
   429       DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait);
   430       ResourceMark rm(THREAD);
   431       const char* desc = "Could not initialize class ";
   432       const char* className = this_oop->external_name();
   433       size_t msglen = strlen(desc) + strlen(className) + 1;
   434       char* message = NEW_RESOURCE_ARRAY(char, msglen);
   435       if (NULL == message) {
   436         // Out of memory: can't create detailed error message
   437         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
   438       } else {
   439         jio_snprintf(message, msglen, "%s%s", desc, className);
   440         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
   441       }
   442     }
   444     // Step 6
   445     this_oop->set_init_state(being_initialized);
   446     this_oop->set_init_thread(self);
   447   }
   449   // Step 7
   450   klassOop super_klass = this_oop->super();
   451   if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
   452     Klass::cast(super_klass)->initialize(THREAD);
   454     if (HAS_PENDING_EXCEPTION) {
   455       Handle e(THREAD, PENDING_EXCEPTION);
   456       CLEAR_PENDING_EXCEPTION;
   457       {
   458         EXCEPTION_MARK;
   459         this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
   460         CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, superclass initialization error is thrown below
   461       }
   462       DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait);
   463       THROW_OOP(e());
   464     }
   465   }
   467   // Step 8
   468   {
   469     assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
   470     JavaThread* jt = (JavaThread*)THREAD;
   471     DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait);
   472     // Timer includes any side effects of class initialization (resolution,
   473     // etc), but not recursive entry into call_class_initializer().
   474     PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
   475                              ClassLoader::perf_class_init_selftime(),
   476                              ClassLoader::perf_classes_inited(),
   477                              jt->get_thread_stat()->perf_recursion_counts_addr(),
   478                              jt->get_thread_stat()->perf_timers_addr(),
   479                              PerfClassTraceTime::CLASS_CLINIT);
   480     this_oop->call_class_initializer(THREAD);
   481   }
   483   // Step 9
   484   if (!HAS_PENDING_EXCEPTION) {
   485     this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
   486     { ResourceMark rm(THREAD);
   487       debug_only(this_oop->vtable()->verify(tty, true);)
   488     }
   489   }
   490   else {
   491     // Step 10 and 11
   492     Handle e(THREAD, PENDING_EXCEPTION);
   493     CLEAR_PENDING_EXCEPTION;
   494     {
   495       EXCEPTION_MARK;
   496       this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
   497       CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, class initialization error is thrown below
   498     }
   499     DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait);
   500     if (e->is_a(SystemDictionary::Error_klass())) {
   501       THROW_OOP(e());
   502     } else {
   503       JavaCallArguments args(e);
   504       THROW_ARG(vmSymbols::java_lang_ExceptionInInitializerError(),
   505                 vmSymbols::throwable_void_signature(),
   506                 &args);
   507     }
   508   }
   509   DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait);
   510 }
   513 // Note: implementation moved to static method to expose the this pointer.
   514 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
   515   instanceKlassHandle kh(THREAD, this->as_klassOop());
   516   set_initialization_state_and_notify_impl(kh, state, CHECK);
   517 }
   519 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
   520   ObjectLocker ol(this_oop, THREAD);
   521   this_oop->set_init_state(state);
   522   ol.notify_all(CHECK);
   523 }
   525 void instanceKlass::add_implementor(klassOop k) {
   526   assert(Compile_lock->owned_by_self(), "");
   527   // Filter out my subinterfaces.
   528   // (Note: Interfaces are never on the subklass list.)
   529   if (instanceKlass::cast(k)->is_interface()) return;
   531   // Filter out subclasses whose supers already implement me.
   532   // (Note: CHA must walk subclasses of direct implementors
   533   // in order to locate indirect implementors.)
   534   klassOop sk = instanceKlass::cast(k)->super();
   535   if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop()))
   536     // We only need to check one immediate superclass, since the
   537     // implements_interface query looks at transitive_interfaces.
   538     // Any supers of the super have the same (or fewer) transitive_interfaces.
   539     return;
   541   // Update number of implementors
   542   int i = _nof_implementors++;
   544   // Record this implementor, if there are not too many already
   545   if (i < implementors_limit) {
   546     assert(_implementors[i] == NULL, "should be exactly one implementor");
   547     oop_store_without_check((oop*)&_implementors[i], k);
   548   } else if (i == implementors_limit) {
   549     // clear out the list on first overflow
   550     for (int i2 = 0; i2 < implementors_limit; i2++)
   551       oop_store_without_check((oop*)&_implementors[i2], NULL);
   552   }
   554   // The implementor also implements the transitive_interfaces
   555   for (int index = 0; index < local_interfaces()->length(); index++) {
   556     instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k);
   557   }
   558 }
   560 void instanceKlass::init_implementor() {
   561   for (int i = 0; i < implementors_limit; i++)
   562     oop_store_without_check((oop*)&_implementors[i], NULL);
   563   _nof_implementors = 0;
   564 }
   567 void instanceKlass::process_interfaces(Thread *thread) {
   568   // link this class into the implementors list of every interface it implements
   569   KlassHandle this_as_oop (thread, this->as_klassOop());
   570   for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
   571     assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass");
   572     instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i)));
   573     assert(interf->is_interface(), "expected interface");
   574     interf->add_implementor(this_as_oop());
   575   }
   576 }
   578 bool instanceKlass::can_be_primary_super_slow() const {
   579   if (is_interface())
   580     return false;
   581   else
   582     return Klass::can_be_primary_super_slow();
   583 }
   585 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
   586   // The secondaries are the implemented interfaces.
   587   instanceKlass* ik = instanceKlass::cast(as_klassOop());
   588   objArrayHandle interfaces (THREAD, ik->transitive_interfaces());
   589   int num_secondaries = num_extra_slots + interfaces->length();
   590   if (num_secondaries == 0) {
   591     return Universe::the_empty_system_obj_array();
   592   } else if (num_extra_slots == 0) {
   593     return interfaces();
   594   } else {
   595     // a mix of both
   596     objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
   597     for (int i = 0; i < interfaces->length(); i++) {
   598       secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i));
   599     }
   600     return secondaries;
   601   }
   602 }
   604 bool instanceKlass::compute_is_subtype_of(klassOop k) {
   605   if (Klass::cast(k)->is_interface()) {
   606     return implements_interface(k);
   607   } else {
   608     return Klass::compute_is_subtype_of(k);
   609   }
   610 }
   612 bool instanceKlass::implements_interface(klassOop k) const {
   613   if (as_klassOop() == k) return true;
   614   assert(Klass::cast(k)->is_interface(), "should be an interface class");
   615   for (int i = 0; i < transitive_interfaces()->length(); i++) {
   616     if (transitive_interfaces()->obj_at(i) == k) {
   617       return true;
   618     }
   619   }
   620   return false;
   621 }
   623 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) {
   624   if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
   625   if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
   626     report_java_out_of_memory("Requested array size exceeds VM limit");
   627     THROW_OOP_0(Universe::out_of_memory_error_array_size());
   628   }
   629   int size = objArrayOopDesc::object_size(length);
   630   klassOop ak = array_klass(n, CHECK_NULL);
   631   KlassHandle h_ak (THREAD, ak);
   632   objArrayOop o =
   633     (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL);
   634   return o;
   635 }
   637 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) {
   638   if (TraceFinalizerRegistration) {
   639     tty->print("Registered ");
   640     i->print_value_on(tty);
   641     tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i);
   642   }
   643   instanceHandle h_i(THREAD, i);
   644   // Pass the handle as argument, JavaCalls::call expects oop as jobjects
   645   JavaValue result(T_VOID);
   646   JavaCallArguments args(h_i);
   647   methodHandle mh (THREAD, Universe::finalizer_register_method());
   648   JavaCalls::call(&result, mh, &args, CHECK_NULL);
   649   return h_i();
   650 }
   652 instanceOop instanceKlass::allocate_instance(TRAPS) {
   653   assert(!oop_is_instanceMirror(), "wrong allocation path");
   654   bool has_finalizer_flag = has_finalizer(); // Query before possible GC
   655   int size = size_helper();  // Query before forming handle.
   657   KlassHandle h_k(THREAD, as_klassOop());
   659   instanceOop i;
   661   i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
   662   if (has_finalizer_flag && !RegisterFinalizersAtInit) {
   663     i = register_finalizer(i, CHECK_NULL);
   664   }
   665   return i;
   666 }
   668 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) {
   669   // Finalizer registration occurs in the Object.<init> constructor
   670   // and constructors normally aren't run when allocating perm
   671   // instances so simply disallow finalizable perm objects.  This can
   672   // be relaxed if a need for it is found.
   673   assert(!has_finalizer(), "perm objects not allowed to have finalizers");
   674   assert(!oop_is_instanceMirror(), "wrong allocation path");
   675   int size = size_helper();  // Query before forming handle.
   676   KlassHandle h_k(THREAD, as_klassOop());
   677   instanceOop i = (instanceOop)
   678     CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
   679   return i;
   680 }
   682 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
   683   if (is_interface() || is_abstract()) {
   684     ResourceMark rm(THREAD);
   685     THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
   686               : vmSymbols::java_lang_InstantiationException(), external_name());
   687   }
   688   if (as_klassOop() == SystemDictionary::Class_klass()) {
   689     ResourceMark rm(THREAD);
   690     THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
   691               : vmSymbols::java_lang_IllegalAccessException(), external_name());
   692   }
   693 }
   695 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
   696   instanceKlassHandle this_oop(THREAD, as_klassOop());
   697   return array_klass_impl(this_oop, or_null, n, THREAD);
   698 }
   700 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
   701   if (this_oop->array_klasses() == NULL) {
   702     if (or_null) return NULL;
   704     ResourceMark rm;
   705     JavaThread *jt = (JavaThread *)THREAD;
   706     {
   707       // Atomic creation of array_klasses
   708       MutexLocker mc(Compile_lock, THREAD);   // for vtables
   709       MutexLocker ma(MultiArray_lock, THREAD);
   711       // Check if update has already taken place
   712       if (this_oop->array_klasses() == NULL) {
   713         objArrayKlassKlass* oakk =
   714           (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
   716         klassOop  k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);
   717         this_oop->set_array_klasses(k);
   718       }
   719     }
   720   }
   721   // _this will always be set at this point
   722   objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part();
   723   if (or_null) {
   724     return oak->array_klass_or_null(n);
   725   }
   726   return oak->array_klass(n, CHECK_NULL);
   727 }
   729 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) {
   730   return array_klass_impl(or_null, 1, THREAD);
   731 }
   733 void instanceKlass::call_class_initializer(TRAPS) {
   734   instanceKlassHandle ik (THREAD, as_klassOop());
   735   call_class_initializer_impl(ik, THREAD);
   736 }
   738 static int call_class_initializer_impl_counter = 0;   // for debugging
   740 methodOop instanceKlass::class_initializer() {
   741   methodOop clinit = find_method(
   742       vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
   743   if (clinit != NULL && clinit->has_valid_initializer_flags()) {
   744     return clinit;
   745   }
   746   return NULL;
   747 }
   749 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
   750   methodHandle h_method(THREAD, this_oop->class_initializer());
   751   assert(!this_oop->is_initialized(), "we cannot initialize twice");
   752   if (TraceClassInitialization) {
   753     tty->print("%d Initializing ", call_class_initializer_impl_counter++);
   754     this_oop->name()->print_value();
   755     tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
   756   }
   757   if (h_method() != NULL) {
   758     JavaCallArguments args; // No arguments
   759     JavaValue result(T_VOID);
   760     JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
   761   }
   762 }
   765 void instanceKlass::mask_for(methodHandle method, int bci,
   766   InterpreterOopMap* entry_for) {
   767   // Dirty read, then double-check under a lock.
   768   if (_oop_map_cache == NULL) {
   769     // Otherwise, allocate a new one.
   770     MutexLocker x(OopMapCacheAlloc_lock);
   771     // First time use. Allocate a cache in C heap
   772     if (_oop_map_cache == NULL) {
   773       _oop_map_cache = new OopMapCache();
   774     }
   775   }
   776   // _oop_map_cache is constant after init; lookup below does is own locking.
   777   _oop_map_cache->lookup(method, bci, entry_for);
   778 }
   781 bool instanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
   782   const int n = fields()->length();
   783   for (int i = 0; i < n; i += next_offset ) {
   784     int name_index = fields()->ushort_at(i + name_index_offset);
   785     int sig_index  = fields()->ushort_at(i + signature_index_offset);
   786     Symbol* f_name = constants()->symbol_at(name_index);
   787     Symbol* f_sig  = constants()->symbol_at(sig_index);
   788     if (f_name == name && f_sig == sig) {
   789       fd->initialize(as_klassOop(), i);
   790       return true;
   791     }
   792   }
   793   return false;
   794 }
   797 void instanceKlass::shared_symbols_iterate(SymbolClosure* closure) {
   798   Klass::shared_symbols_iterate(closure);
   799   closure->do_symbol(&_generic_signature);
   800   closure->do_symbol(&_source_file_name);
   801   closure->do_symbol(&_source_debug_extension);
   803   const int n = fields()->length();
   804   for (int i = 0; i < n; i += next_offset ) {
   805     int name_index = fields()->ushort_at(i + name_index_offset);
   806     closure->do_symbol(constants()->symbol_at_addr(name_index));
   807     int sig_index  = fields()->ushort_at(i + signature_index_offset);
   808     closure->do_symbol(constants()->symbol_at_addr(sig_index));
   809   }
   810 }
   813 klassOop instanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
   814   const int n = local_interfaces()->length();
   815   for (int i = 0; i < n; i++) {
   816     klassOop intf1 = klassOop(local_interfaces()->obj_at(i));
   817     assert(Klass::cast(intf1)->is_interface(), "just checking type");
   818     // search for field in current interface
   819     if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) {
   820       assert(fd->is_static(), "interface field must be static");
   821       return intf1;
   822     }
   823     // search for field in direct superinterfaces
   824     klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd);
   825     if (intf2 != NULL) return intf2;
   826   }
   827   // otherwise field lookup fails
   828   return NULL;
   829 }
   832 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
   833   // search order according to newest JVM spec (5.4.3.2, p.167).
   834   // 1) search for field in current klass
   835   if (find_local_field(name, sig, fd)) {
   836     return as_klassOop();
   837   }
   838   // 2) search for field recursively in direct superinterfaces
   839   { klassOop intf = find_interface_field(name, sig, fd);
   840     if (intf != NULL) return intf;
   841   }
   842   // 3) apply field lookup recursively if superclass exists
   843   { klassOop supr = super();
   844     if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd);
   845   }
   846   // 4) otherwise field lookup fails
   847   return NULL;
   848 }
   851 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const {
   852   // search order according to newest JVM spec (5.4.3.2, p.167).
   853   // 1) search for field in current klass
   854   if (find_local_field(name, sig, fd)) {
   855     if (fd->is_static() == is_static) return as_klassOop();
   856   }
   857   // 2) search for field recursively in direct superinterfaces
   858   if (is_static) {
   859     klassOop intf = find_interface_field(name, sig, fd);
   860     if (intf != NULL) return intf;
   861   }
   862   // 3) apply field lookup recursively if superclass exists
   863   { klassOop supr = super();
   864     if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd);
   865   }
   866   // 4) otherwise field lookup fails
   867   return NULL;
   868 }
   871 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
   872   int length = fields()->length();
   873   for (int i = 0; i < length; i += next_offset) {
   874     if (offset_from_fields( i ) == offset) {
   875       fd->initialize(as_klassOop(), i);
   876       if (fd->is_static() == is_static) return true;
   877     }
   878   }
   879   return false;
   880 }
   883 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
   884   klassOop klass = as_klassOop();
   885   while (klass != NULL) {
   886     if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) {
   887       return true;
   888     }
   889     klass = Klass::cast(klass)->super();
   890   }
   891   return false;
   892 }
   895 void instanceKlass::methods_do(void f(methodOop method)) {
   896   int len = methods()->length();
   897   for (int index = 0; index < len; index++) {
   898     methodOop m = methodOop(methods()->obj_at(index));
   899     assert(m->is_method(), "must be method");
   900     f(m);
   901   }
   902 }
   905 void instanceKlass::do_local_static_fields(FieldClosure* cl) {
   906   fieldDescriptor fd;
   907   int length = fields()->length();
   908   for (int i = 0; i < length; i += next_offset) {
   909     fd.initialize(as_klassOop(), i);
   910     if (fd.is_static()) cl->do_field(&fd);
   911   }
   912 }
   915 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
   916   instanceKlassHandle h_this(THREAD, as_klassOop());
   917   do_local_static_fields_impl(h_this, f, CHECK);
   918 }
   921 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
   922   fieldDescriptor fd;
   923   int length = this_oop->fields()->length();
   924   for (int i = 0; i < length; i += next_offset) {
   925     fd.initialize(this_oop(), i);
   926     if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
   927   }
   928 }
   931 static int compare_fields_by_offset(int* a, int* b) {
   932   return a[0] - b[0];
   933 }
   935 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
   936   instanceKlass* super = superklass();
   937   if (super != NULL) {
   938     super->do_nonstatic_fields(cl);
   939   }
   940   fieldDescriptor fd;
   941   int length = fields()->length();
   942   // In DebugInfo nonstatic fields are sorted by offset.
   943   int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
   944   int j = 0;
   945   for (int i = 0; i < length; i += next_offset) {
   946     fd.initialize(as_klassOop(), i);
   947     if (!fd.is_static()) {
   948       fields_sorted[j + 0] = fd.offset();
   949       fields_sorted[j + 1] = i;
   950       j += 2;
   951     }
   952   }
   953   if (j > 0) {
   954     length = j;
   955     // _sort_Fn is defined in growableArray.hpp.
   956     qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
   957     for (int i = 0; i < length; i += 2) {
   958       fd.initialize(as_klassOop(), fields_sorted[i + 1]);
   959       assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
   960       cl->do_field(&fd);
   961     }
   962   }
   963   FREE_C_HEAP_ARRAY(int, fields_sorted);
   964 }
   967 void instanceKlass::array_klasses_do(void f(klassOop k)) {
   968   if (array_klasses() != NULL)
   969     arrayKlass::cast(array_klasses())->array_klasses_do(f);
   970 }
   973 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
   974   f(as_klassOop());
   975   array_klasses_do(f);
   976 }
   978 #ifdef ASSERT
   979 static int linear_search(objArrayOop methods, Symbol* name, Symbol* signature) {
   980   int len = methods->length();
   981   for (int index = 0; index < len; index++) {
   982     methodOop m = (methodOop)(methods->obj_at(index));
   983     assert(m->is_method(), "must be method");
   984     if (m->signature() == signature && m->name() == name) {
   985        return index;
   986     }
   987   }
   988   return -1;
   989 }
   990 #endif
   992 methodOop instanceKlass::find_method(Symbol* name, Symbol* signature) const {
   993   return instanceKlass::find_method(methods(), name, signature);
   994 }
   996 methodOop instanceKlass::find_method(objArrayOop methods, Symbol* name, Symbol* signature) {
   997   int len = methods->length();
   998   // methods are sorted, so do binary search
   999   int l = 0;
  1000   int h = len - 1;
  1001   while (l <= h) {
  1002     int mid = (l + h) >> 1;
  1003     methodOop m = (methodOop)methods->obj_at(mid);
  1004     assert(m->is_method(), "must be method");
  1005     int res = m->name()->fast_compare(name);
  1006     if (res == 0) {
  1007       // found matching name; do linear search to find matching signature
  1008       // first, quick check for common case
  1009       if (m->signature() == signature) return m;
  1010       // search downwards through overloaded methods
  1011       int i;
  1012       for (i = mid - 1; i >= l; i--) {
  1013         methodOop m = (methodOop)methods->obj_at(i);
  1014         assert(m->is_method(), "must be method");
  1015         if (m->name() != name) break;
  1016         if (m->signature() == signature) return m;
  1018       // search upwards
  1019       for (i = mid + 1; i <= h; i++) {
  1020         methodOop m = (methodOop)methods->obj_at(i);
  1021         assert(m->is_method(), "must be method");
  1022         if (m->name() != name) break;
  1023         if (m->signature() == signature) return m;
  1025       // not found
  1026 #ifdef ASSERT
  1027       int index = linear_search(methods, name, signature);
  1028       assert(index == -1, err_msg("binary search should have found entry %d", index));
  1029 #endif
  1030       return NULL;
  1031     } else if (res < 0) {
  1032       l = mid + 1;
  1033     } else {
  1034       h = mid - 1;
  1037 #ifdef ASSERT
  1038   int index = linear_search(methods, name, signature);
  1039   assert(index == -1, err_msg("binary search should have found entry %d", index));
  1040 #endif
  1041   return NULL;
  1044 methodOop instanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
  1045   klassOop klass = as_klassOop();
  1046   while (klass != NULL) {
  1047     methodOop method = instanceKlass::cast(klass)->find_method(name, signature);
  1048     if (method != NULL) return method;
  1049     klass = instanceKlass::cast(klass)->super();
  1051   return NULL;
  1054 // lookup a method in all the interfaces that this class implements
  1055 methodOop instanceKlass::lookup_method_in_all_interfaces(Symbol* name,
  1056                                                          Symbol* signature) const {
  1057   objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces();
  1058   int num_ifs = all_ifs->length();
  1059   instanceKlass *ik = NULL;
  1060   for (int i = 0; i < num_ifs; i++) {
  1061     ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i)));
  1062     methodOop m = ik->lookup_method(name, signature);
  1063     if (m != NULL) {
  1064       return m;
  1067   return NULL;
  1070 /* jni_id_for_impl for jfieldIds only */
  1071 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
  1072   MutexLocker ml(JfieldIdCreation_lock);
  1073   // Retry lookup after we got the lock
  1074   JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
  1075   if (probe == NULL) {
  1076     // Slow case, allocate new static field identifier
  1077     probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids());
  1078     this_oop->set_jni_ids(probe);
  1080   return probe;
  1084 /* jni_id_for for jfieldIds only */
  1085 JNIid* instanceKlass::jni_id_for(int offset) {
  1086   JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset);
  1087   if (probe == NULL) {
  1088     probe = jni_id_for_impl(this->as_klassOop(), offset);
  1090   return probe;
  1094 // Lookup or create a jmethodID.
  1095 // This code is called by the VMThread and JavaThreads so the
  1096 // locking has to be done very carefully to avoid deadlocks
  1097 // and/or other cache consistency problems.
  1098 //
  1099 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
  1100   size_t idnum = (size_t)method_h->method_idnum();
  1101   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
  1102   size_t length = 0;
  1103   jmethodID id = NULL;
  1105   // We use a double-check locking idiom here because this cache is
  1106   // performance sensitive. In the normal system, this cache only
  1107   // transitions from NULL to non-NULL which is safe because we use
  1108   // release_set_methods_jmethod_ids() to advertise the new cache.
  1109   // A partially constructed cache should never be seen by a racing
  1110   // thread. We also use release_store_ptr() to save a new jmethodID
  1111   // in the cache so a partially constructed jmethodID should never be
  1112   // seen either. Cache reads of existing jmethodIDs proceed without a
  1113   // lock, but cache writes of a new jmethodID requires uniqueness and
  1114   // creation of the cache itself requires no leaks so a lock is
  1115   // generally acquired in those two cases.
  1116   //
  1117   // If the RedefineClasses() API has been used, then this cache can
  1118   // grow and we'll have transitions from non-NULL to bigger non-NULL.
  1119   // Cache creation requires no leaks and we require safety between all
  1120   // cache accesses and freeing of the old cache so a lock is generally
  1121   // acquired when the RedefineClasses() API has been used.
  1123   if (jmeths != NULL) {
  1124     // the cache already exists
  1125     if (!ik_h->idnum_can_increment()) {
  1126       // the cache can't grow so we can just get the current values
  1127       get_jmethod_id_length_value(jmeths, idnum, &length, &id);
  1128     } else {
  1129       // cache can grow so we have to be more careful
  1130       if (Threads::number_of_threads() == 0 ||
  1131           SafepointSynchronize::is_at_safepoint()) {
  1132         // we're single threaded or at a safepoint - no locking needed
  1133         get_jmethod_id_length_value(jmeths, idnum, &length, &id);
  1134       } else {
  1135         MutexLocker ml(JmethodIdCreation_lock);
  1136         get_jmethod_id_length_value(jmeths, idnum, &length, &id);
  1140   // implied else:
  1141   // we need to allocate a cache so default length and id values are good
  1143   if (jmeths == NULL ||   // no cache yet
  1144       length <= idnum ||  // cache is too short
  1145       id == NULL) {       // cache doesn't contain entry
  1147     // This function can be called by the VMThread so we have to do all
  1148     // things that might block on a safepoint before grabbing the lock.
  1149     // Otherwise, we can deadlock with the VMThread or have a cache
  1150     // consistency issue. These vars keep track of what we might have
  1151     // to free after the lock is dropped.
  1152     jmethodID  to_dealloc_id     = NULL;
  1153     jmethodID* to_dealloc_jmeths = NULL;
  1155     // may not allocate new_jmeths or use it if we allocate it
  1156     jmethodID* new_jmeths = NULL;
  1157     if (length <= idnum) {
  1158       // allocate a new cache that might be used
  1159       size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
  1160       new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
  1161       memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
  1162       // cache size is stored in element[0], other elements offset by one
  1163       new_jmeths[0] = (jmethodID)size;
  1166     // allocate a new jmethodID that might be used
  1167     jmethodID new_id = NULL;
  1168     if (method_h->is_old() && !method_h->is_obsolete()) {
  1169       // The method passed in is old (but not obsolete), we need to use the current version
  1170       methodOop current_method = ik_h->method_with_idnum((int)idnum);
  1171       assert(current_method != NULL, "old and but not obsolete, so should exist");
  1172       methodHandle current_method_h(current_method == NULL? method_h() : current_method);
  1173       new_id = JNIHandles::make_jmethod_id(current_method_h);
  1174     } else {
  1175       // It is the current version of the method or an obsolete method,
  1176       // use the version passed in
  1177       new_id = JNIHandles::make_jmethod_id(method_h);
  1180     if (Threads::number_of_threads() == 0 ||
  1181         SafepointSynchronize::is_at_safepoint()) {
  1182       // we're single threaded or at a safepoint - no locking needed
  1183       id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
  1184                                           &to_dealloc_id, &to_dealloc_jmeths);
  1185     } else {
  1186       MutexLocker ml(JmethodIdCreation_lock);
  1187       id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
  1188                                           &to_dealloc_id, &to_dealloc_jmeths);
  1191     // The lock has been dropped so we can free resources.
  1192     // Free up either the old cache or the new cache if we allocated one.
  1193     if (to_dealloc_jmeths != NULL) {
  1194       FreeHeap(to_dealloc_jmeths);
  1196     // free up the new ID since it wasn't needed
  1197     if (to_dealloc_id != NULL) {
  1198       JNIHandles::destroy_jmethod_id(to_dealloc_id);
  1201   return id;
  1205 // Common code to fetch the jmethodID from the cache or update the
  1206 // cache with the new jmethodID. This function should never do anything
  1207 // that causes the caller to go to a safepoint or we can deadlock with
  1208 // the VMThread or have cache consistency issues.
  1209 //
  1210 jmethodID instanceKlass::get_jmethod_id_fetch_or_update(
  1211             instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
  1212             jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
  1213             jmethodID** to_dealloc_jmeths_p) {
  1214   assert(new_id != NULL, "sanity check");
  1215   assert(to_dealloc_id_p != NULL, "sanity check");
  1216   assert(to_dealloc_jmeths_p != NULL, "sanity check");
  1217   assert(Threads::number_of_threads() == 0 ||
  1218          SafepointSynchronize::is_at_safepoint() ||
  1219          JmethodIdCreation_lock->owned_by_self(), "sanity check");
  1221   // reacquire the cache - we are locked, single threaded or at a safepoint
  1222   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
  1223   jmethodID  id     = NULL;
  1224   size_t     length = 0;
  1226   if (jmeths == NULL ||                         // no cache yet
  1227       (length = (size_t)jmeths[0]) <= idnum) {  // cache is too short
  1228     if (jmeths != NULL) {
  1229       // copy any existing entries from the old cache
  1230       for (size_t index = 0; index < length; index++) {
  1231         new_jmeths[index+1] = jmeths[index+1];
  1233       *to_dealloc_jmeths_p = jmeths;  // save old cache for later delete
  1235     ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
  1236   } else {
  1237     // fetch jmethodID (if any) from the existing cache
  1238     id = jmeths[idnum+1];
  1239     *to_dealloc_jmeths_p = new_jmeths;  // save new cache for later delete
  1241   if (id == NULL) {
  1242     // No matching jmethodID in the existing cache or we have a new
  1243     // cache or we just grew the cache. This cache write is done here
  1244     // by the first thread to win the foot race because a jmethodID
  1245     // needs to be unique once it is generally available.
  1246     id = new_id;
  1248     // The jmethodID cache can be read while unlocked so we have to
  1249     // make sure the new jmethodID is complete before installing it
  1250     // in the cache.
  1251     OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
  1252   } else {
  1253     *to_dealloc_id_p = new_id; // save new id for later delete
  1255   return id;
  1259 // Common code to get the jmethodID cache length and the jmethodID
  1260 // value at index idnum if there is one.
  1261 //
  1262 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache,
  1263        size_t idnum, size_t *length_p, jmethodID* id_p) {
  1264   assert(cache != NULL, "sanity check");
  1265   assert(length_p != NULL, "sanity check");
  1266   assert(id_p != NULL, "sanity check");
  1268   // cache size is stored in element[0], other elements offset by one
  1269   *length_p = (size_t)cache[0];
  1270   if (*length_p <= idnum) {  // cache is too short
  1271     *id_p = NULL;
  1272   } else {
  1273     *id_p = cache[idnum+1];  // fetch jmethodID (if any)
  1278 // Lookup a jmethodID, NULL if not found.  Do no blocking, no allocations, no handles
  1279 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
  1280   size_t idnum = (size_t)method->method_idnum();
  1281   jmethodID* jmeths = methods_jmethod_ids_acquire();
  1282   size_t length;                                // length assigned as debugging crumb
  1283   jmethodID id = NULL;
  1284   if (jmeths != NULL &&                         // If there is a cache
  1285       (length = (size_t)jmeths[0]) > idnum) {   // and if it is long enough,
  1286     id = jmeths[idnum+1];                       // Look up the id (may be NULL)
  1288   return id;
  1292 // Cache an itable index
  1293 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
  1294   int* indices = methods_cached_itable_indices_acquire();
  1295   int* to_dealloc_indices = NULL;
  1297   // We use a double-check locking idiom here because this cache is
  1298   // performance sensitive. In the normal system, this cache only
  1299   // transitions from NULL to non-NULL which is safe because we use
  1300   // release_set_methods_cached_itable_indices() to advertise the
  1301   // new cache. A partially constructed cache should never be seen
  1302   // by a racing thread. Cache reads and writes proceed without a
  1303   // lock, but creation of the cache itself requires no leaks so a
  1304   // lock is generally acquired in that case.
  1305   //
  1306   // If the RedefineClasses() API has been used, then this cache can
  1307   // grow and we'll have transitions from non-NULL to bigger non-NULL.
  1308   // Cache creation requires no leaks and we require safety between all
  1309   // cache accesses and freeing of the old cache so a lock is generally
  1310   // acquired when the RedefineClasses() API has been used.
  1312   if (indices == NULL || idnum_can_increment()) {
  1313     // we need a cache or the cache can grow
  1314     MutexLocker ml(JNICachedItableIndex_lock);
  1315     // reacquire the cache to see if another thread already did the work
  1316     indices = methods_cached_itable_indices_acquire();
  1317     size_t length = 0;
  1318     // cache size is stored in element[0], other elements offset by one
  1319     if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
  1320       size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
  1321       int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
  1322       new_indices[0] = (int)size;
  1323       // copy any existing entries
  1324       size_t i;
  1325       for (i = 0; i < length; i++) {
  1326         new_indices[i+1] = indices[i+1];
  1328       // Set all the rest to -1
  1329       for (i = length; i < size; i++) {
  1330         new_indices[i+1] = -1;
  1332       if (indices != NULL) {
  1333         // We have an old cache to delete so save it for after we
  1334         // drop the lock.
  1335         to_dealloc_indices = indices;
  1337       release_set_methods_cached_itable_indices(indices = new_indices);
  1340     if (idnum_can_increment()) {
  1341       // this cache can grow so we have to write to it safely
  1342       indices[idnum+1] = index;
  1344   } else {
  1345     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  1348   if (!idnum_can_increment()) {
  1349     // The cache cannot grow and this JNI itable index value does not
  1350     // have to be unique like a jmethodID. If there is a race to set it,
  1351     // it doesn't matter.
  1352     indices[idnum+1] = index;
  1355   if (to_dealloc_indices != NULL) {
  1356     // we allocated a new cache so free the old one
  1357     FreeHeap(to_dealloc_indices);
  1362 // Retrieve a cached itable index
  1363 int instanceKlass::cached_itable_index(size_t idnum) {
  1364   int* indices = methods_cached_itable_indices_acquire();
  1365   if (indices != NULL && ((size_t)indices[0]) > idnum) {
  1366      // indices exist and are long enough, retrieve possible cached
  1367     return indices[idnum+1];
  1369   return -1;
  1373 //
  1374 // nmethodBucket is used to record dependent nmethods for
  1375 // deoptimization.  nmethod dependencies are actually <klass, method>
  1376 // pairs but we really only care about the klass part for purposes of
  1377 // finding nmethods which might need to be deoptimized.  Instead of
  1378 // recording the method, a count of how many times a particular nmethod
  1379 // was recorded is kept.  This ensures that any recording errors are
  1380 // noticed since an nmethod should be removed as many times are it's
  1381 // added.
  1382 //
  1383 class nmethodBucket {
  1384  private:
  1385   nmethod*       _nmethod;
  1386   int            _count;
  1387   nmethodBucket* _next;
  1389  public:
  1390   nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
  1391     _nmethod = nmethod;
  1392     _next = next;
  1393     _count = 1;
  1395   int count()                             { return _count; }
  1396   int increment()                         { _count += 1; return _count; }
  1397   int decrement()                         { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
  1398   nmethodBucket* next()                   { return _next; }
  1399   void set_next(nmethodBucket* b)         { _next = b; }
  1400   nmethod* get_nmethod()                  { return _nmethod; }
  1401 };
  1404 //
  1405 // Walk the list of dependent nmethods searching for nmethods which
  1406 // are dependent on the klassOop that was passed in and mark them for
  1407 // deoptimization.  Returns the number of nmethods found.
  1408 //
  1409 int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
  1410   assert_locked_or_safepoint(CodeCache_lock);
  1411   int found = 0;
  1412   nmethodBucket* b = _dependencies;
  1413   while (b != NULL) {
  1414     nmethod* nm = b->get_nmethod();
  1415     // since dependencies aren't removed until an nmethod becomes a zombie,
  1416     // the dependency list may contain nmethods which aren't alive.
  1417     if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
  1418       if (TraceDependencies) {
  1419         ResourceMark rm;
  1420         tty->print_cr("Marked for deoptimization");
  1421         tty->print_cr("  context = %s", this->external_name());
  1422         changes.print();
  1423         nm->print();
  1424         nm->print_dependencies();
  1426       nm->mark_for_deoptimization();
  1427       found++;
  1429     b = b->next();
  1431   return found;
  1435 //
  1436 // Add an nmethodBucket to the list of dependencies for this nmethod.
  1437 // It's possible that an nmethod has multiple dependencies on this klass
  1438 // so a count is kept for each bucket to guarantee that creation and
  1439 // deletion of dependencies is consistent.
  1440 //
  1441 void instanceKlass::add_dependent_nmethod(nmethod* nm) {
  1442   assert_locked_or_safepoint(CodeCache_lock);
  1443   nmethodBucket* b = _dependencies;
  1444   nmethodBucket* last = NULL;
  1445   while (b != NULL) {
  1446     if (nm == b->get_nmethod()) {
  1447       b->increment();
  1448       return;
  1450     b = b->next();
  1452   _dependencies = new nmethodBucket(nm, _dependencies);
  1456 //
  1457 // Decrement count of the nmethod in the dependency list and remove
  1458 // the bucket competely when the count goes to 0.  This method must
  1459 // find a corresponding bucket otherwise there's a bug in the
  1460 // recording of dependecies.
  1461 //
  1462 void instanceKlass::remove_dependent_nmethod(nmethod* nm) {
  1463   assert_locked_or_safepoint(CodeCache_lock);
  1464   nmethodBucket* b = _dependencies;
  1465   nmethodBucket* last = NULL;
  1466   while (b != NULL) {
  1467     if (nm == b->get_nmethod()) {
  1468       if (b->decrement() == 0) {
  1469         if (last == NULL) {
  1470           _dependencies = b->next();
  1471         } else {
  1472           last->set_next(b->next());
  1474         delete b;
  1476       return;
  1478     last = b;
  1479     b = b->next();
  1481 #ifdef ASSERT
  1482   tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
  1483   nm->print();
  1484 #endif // ASSERT
  1485   ShouldNotReachHere();
  1489 #ifndef PRODUCT
  1490 void instanceKlass::print_dependent_nmethods(bool verbose) {
  1491   nmethodBucket* b = _dependencies;
  1492   int idx = 0;
  1493   while (b != NULL) {
  1494     nmethod* nm = b->get_nmethod();
  1495     tty->print("[%d] count=%d { ", idx++, b->count());
  1496     if (!verbose) {
  1497       nm->print_on(tty, "nmethod");
  1498       tty->print_cr(" } ");
  1499     } else {
  1500       nm->print();
  1501       nm->print_dependencies();
  1502       tty->print_cr("--- } ");
  1504     b = b->next();
  1509 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
  1510   nmethodBucket* b = _dependencies;
  1511   while (b != NULL) {
  1512     if (nm == b->get_nmethod()) {
  1513       return true;
  1515     b = b->next();
  1517   return false;
  1519 #endif //PRODUCT
  1522 #ifdef ASSERT
  1523 template <class T> void assert_is_in(T *p) {
  1524   T heap_oop = oopDesc::load_heap_oop(p);
  1525   if (!oopDesc::is_null(heap_oop)) {
  1526     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
  1527     assert(Universe::heap()->is_in(o), "should be in heap");
  1530 template <class T> void assert_is_in_closed_subset(T *p) {
  1531   T heap_oop = oopDesc::load_heap_oop(p);
  1532   if (!oopDesc::is_null(heap_oop)) {
  1533     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
  1534     assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
  1537 template <class T> void assert_is_in_reserved(T *p) {
  1538   T heap_oop = oopDesc::load_heap_oop(p);
  1539   if (!oopDesc::is_null(heap_oop)) {
  1540     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
  1541     assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
  1544 template <class T> void assert_nothing(T *p) {}
  1546 #else
  1547 template <class T> void assert_is_in(T *p) {}
  1548 template <class T> void assert_is_in_closed_subset(T *p) {}
  1549 template <class T> void assert_is_in_reserved(T *p) {}
  1550 template <class T> void assert_nothing(T *p) {}
  1551 #endif // ASSERT
  1553 //
  1554 // Macros that iterate over areas of oops which are specialized on type of
  1555 // oop pointer either narrow or wide, depending on UseCompressedOops
  1556 //
  1557 // Parameters are:
  1558 //   T         - type of oop to point to (either oop or narrowOop)
  1559 //   start_p   - starting pointer for region to iterate over
  1560 //   count     - number of oops or narrowOops to iterate over
  1561 //   do_oop    - action to perform on each oop (it's arbitrary C code which
  1562 //               makes it more efficient to put in a macro rather than making
  1563 //               it a template function)
  1564 //   assert_fn - assert function which is template function because performance
  1565 //               doesn't matter when enabled.
  1566 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
  1567   T, start_p, count, do_oop,                \
  1568   assert_fn)                                \
  1569 {                                           \
  1570   T* p         = (T*)(start_p);             \
  1571   T* const end = p + (count);               \
  1572   while (p < end) {                         \
  1573     (assert_fn)(p);                         \
  1574     do_oop;                                 \
  1575     ++p;                                    \
  1576   }                                         \
  1579 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
  1580   T, start_p, count, do_oop,                \
  1581   assert_fn)                                \
  1582 {                                           \
  1583   T* const start = (T*)(start_p);           \
  1584   T*       p     = start + (count);         \
  1585   while (start < p) {                       \
  1586     --p;                                    \
  1587     (assert_fn)(p);                         \
  1588     do_oop;                                 \
  1589   }                                         \
  1592 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
  1593   T, start_p, count, low, high,             \
  1594   do_oop, assert_fn)                        \
  1595 {                                           \
  1596   T* const l = (T*)(low);                   \
  1597   T* const h = (T*)(high);                  \
  1598   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
  1599          mask_bits((intptr_t)h, sizeof(T)-1) == 0,   \
  1600          "bounded region must be properly aligned"); \
  1601   T* p       = (T*)(start_p);               \
  1602   T* end     = p + (count);                 \
  1603   if (p < l) p = l;                         \
  1604   if (end > h) end = h;                     \
  1605   while (p < end) {                         \
  1606     (assert_fn)(p);                         \
  1607     do_oop;                                 \
  1608     ++p;                                    \
  1609   }                                         \
  1613 // The following macros call specialized macros, passing either oop or
  1614 // narrowOop as the specialization type.  These test the UseCompressedOops
  1615 // flag.
  1616 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn)            \
  1617 {                                                                        \
  1618   /* Compute oopmap block range. The common case                         \
  1619      is nonstatic_oop_map_size == 1. */                                  \
  1620   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
  1621   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
  1622   if (UseCompressedOops) {                                               \
  1623     while (map < end_map) {                                              \
  1624       InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop,                   \
  1625         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
  1626         do_oop, assert_fn)                                               \
  1627       ++map;                                                             \
  1628     }                                                                    \
  1629   } else {                                                               \
  1630     while (map < end_map) {                                              \
  1631       InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,                         \
  1632         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
  1633         do_oop, assert_fn)                                               \
  1634       ++map;                                                             \
  1635     }                                                                    \
  1636   }                                                                      \
  1639 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn)    \
  1640 {                                                                        \
  1641   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();          \
  1642   OopMapBlock* map             = start_map + nonstatic_oop_map_count();  \
  1643   if (UseCompressedOops) {                                               \
  1644     while (start_map < map) {                                            \
  1645       --map;                                                             \
  1646       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop,           \
  1647         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
  1648         do_oop, assert_fn)                                               \
  1649     }                                                                    \
  1650   } else {                                                               \
  1651     while (start_map < map) {                                            \
  1652       --map;                                                             \
  1653       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop,                 \
  1654         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
  1655         do_oop, assert_fn)                                               \
  1656     }                                                                    \
  1657   }                                                                      \
  1660 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop,    \
  1661                                               assert_fn)                 \
  1662 {                                                                        \
  1663   /* Compute oopmap block range. The common case is                      \
  1664      nonstatic_oop_map_size == 1, so we accept the                       \
  1665      usually non-existent extra overhead of examining                    \
  1666      all the maps. */                                                    \
  1667   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
  1668   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
  1669   if (UseCompressedOops) {                                               \
  1670     while (map < end_map) {                                              \
  1671       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop,           \
  1672         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
  1673         low, high,                                                       \
  1674         do_oop, assert_fn)                                               \
  1675       ++map;                                                             \
  1676     }                                                                    \
  1677   } else {                                                               \
  1678     while (map < end_map) {                                              \
  1679       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,                 \
  1680         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
  1681         low, high,                                                       \
  1682         do_oop, assert_fn)                                               \
  1683       ++map;                                                             \
  1684     }                                                                    \
  1685   }                                                                      \
  1688 void instanceKlass::oop_follow_contents(oop obj) {
  1689   assert(obj != NULL, "can't follow the content of NULL object");
  1690   obj->follow_header();
  1691   InstanceKlass_OOP_MAP_ITERATE( \
  1692     obj, \
  1693     MarkSweep::mark_and_push(p), \
  1694     assert_is_in_closed_subset)
  1697 #ifndef SERIALGC
  1698 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
  1699                                         oop obj) {
  1700   assert(obj != NULL, "can't follow the content of NULL object");
  1701   obj->follow_header(cm);
  1702   InstanceKlass_OOP_MAP_ITERATE( \
  1703     obj, \
  1704     PSParallelCompact::mark_and_push(cm, p), \
  1705     assert_is_in)
  1707 #endif // SERIALGC
  1709 // closure's do_header() method dicates whether the given closure should be
  1710 // applied to the klass ptr in the object header.
  1712 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)        \
  1714 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
  1715   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
  1716   /* header */                                                          \
  1717   if (closure->do_header()) {                                           \
  1718     obj->oop_iterate_header(closure);                                   \
  1719   }                                                                     \
  1720   InstanceKlass_OOP_MAP_ITERATE(                                        \
  1721     obj,                                                                \
  1722     SpecializationStats::                                               \
  1723       record_do_oop_call##nv_suffix(SpecializationStats::ik);           \
  1724     (closure)->do_oop##nv_suffix(p),                                    \
  1725     assert_is_in_closed_subset)                                         \
  1726   return size_helper();                                                 \
  1729 #ifndef SERIALGC
  1730 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
  1732 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj,                \
  1733                                               OopClosureType* closure) {        \
  1734   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
  1735   /* header */                                                                  \
  1736   if (closure->do_header()) {                                                   \
  1737     obj->oop_iterate_header(closure);                                           \
  1738   }                                                                             \
  1739   /* instance variables */                                                      \
  1740   InstanceKlass_OOP_MAP_REVERSE_ITERATE(                                        \
  1741     obj,                                                                        \
  1742     SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
  1743     (closure)->do_oop##nv_suffix(p),                                            \
  1744     assert_is_in_closed_subset)                                                 \
  1745    return size_helper();                                                        \
  1747 #endif // !SERIALGC
  1749 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
  1751 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj,              \
  1752                                                   OopClosureType* closure, \
  1753                                                   MemRegion mr) {          \
  1754   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
  1755   if (closure->do_header()) {                                            \
  1756     obj->oop_iterate_header(closure, mr);                                \
  1757   }                                                                      \
  1758   InstanceKlass_BOUNDED_OOP_MAP_ITERATE(                                 \
  1759     obj, mr.start(), mr.end(),                                           \
  1760     (closure)->do_oop##nv_suffix(p),                                     \
  1761     assert_is_in_closed_subset)                                          \
  1762   return size_helper();                                                  \
  1765 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
  1766 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
  1767 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
  1768 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
  1769 #ifndef SERIALGC
  1770 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
  1771 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
  1772 #endif // !SERIALGC
  1774 int instanceKlass::oop_adjust_pointers(oop obj) {
  1775   int size = size_helper();
  1776   InstanceKlass_OOP_MAP_ITERATE( \
  1777     obj, \
  1778     MarkSweep::adjust_pointer(p), \
  1779     assert_is_in)
  1780   obj->adjust_header();
  1781   return size;
  1784 #ifndef SERIALGC
  1785 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
  1786   InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
  1787     obj, \
  1788     if (PSScavenge::should_scavenge(p)) { \
  1789       pm->claim_or_forward_depth(p); \
  1790     }, \
  1791     assert_nothing )
  1794 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
  1795   InstanceKlass_OOP_MAP_ITERATE( \
  1796     obj, \
  1797     PSParallelCompact::adjust_pointer(p), \
  1798     assert_nothing)
  1799   return size_helper();
  1802 #endif // SERIALGC
  1804 // This klass is alive but the implementor link is not followed/updated.
  1805 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
  1807 void instanceKlass::follow_weak_klass_links(
  1808   BoolObjectClosure* is_alive, OopClosure* keep_alive) {
  1809   assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
  1810   if (ClassUnloading) {
  1811     for (int i = 0; i < implementors_limit; i++) {
  1812       klassOop impl = _implementors[i];
  1813       if (impl == NULL)  break;  // no more in the list
  1814       if (!is_alive->do_object_b(impl)) {
  1815         // remove this guy from the list by overwriting him with the tail
  1816         int lasti = --_nof_implementors;
  1817         assert(lasti >= i && lasti < implementors_limit, "just checking");
  1818         _implementors[i] = _implementors[lasti];
  1819         _implementors[lasti] = NULL;
  1820         --i; // rerun the loop at this index
  1823   } else {
  1824     for (int i = 0; i < implementors_limit; i++) {
  1825       keep_alive->do_oop(&adr_implementors()[i]);
  1828   Klass::follow_weak_klass_links(is_alive, keep_alive);
  1831 void instanceKlass::remove_unshareable_info() {
  1832   Klass::remove_unshareable_info();
  1833   init_implementor();
  1836 static void clear_all_breakpoints(methodOop m) {
  1837   m->clear_all_breakpoints();
  1840 void instanceKlass::release_C_heap_structures() {
  1841   // Deallocate oop map cache
  1842   if (_oop_map_cache != NULL) {
  1843     delete _oop_map_cache;
  1844     _oop_map_cache = NULL;
  1847   // Deallocate JNI identifiers for jfieldIDs
  1848   JNIid::deallocate(jni_ids());
  1849   set_jni_ids(NULL);
  1851   jmethodID* jmeths = methods_jmethod_ids_acquire();
  1852   if (jmeths != (jmethodID*)NULL) {
  1853     release_set_methods_jmethod_ids(NULL);
  1854     FreeHeap(jmeths);
  1857   int* indices = methods_cached_itable_indices_acquire();
  1858   if (indices != (int*)NULL) {
  1859     release_set_methods_cached_itable_indices(NULL);
  1860     FreeHeap(indices);
  1863   // release dependencies
  1864   nmethodBucket* b = _dependencies;
  1865   _dependencies = NULL;
  1866   while (b != NULL) {
  1867     nmethodBucket* next = b->next();
  1868     delete b;
  1869     b = next;
  1872   // Deallocate breakpoint records
  1873   if (breakpoints() != 0x0) {
  1874     methods_do(clear_all_breakpoints);
  1875     assert(breakpoints() == 0x0, "should have cleared breakpoints");
  1878   // deallocate information about previous versions
  1879   if (_previous_versions != NULL) {
  1880     for (int i = _previous_versions->length() - 1; i >= 0; i--) {
  1881       PreviousVersionNode * pv_node = _previous_versions->at(i);
  1882       delete pv_node;
  1884     delete _previous_versions;
  1885     _previous_versions = NULL;
  1888   // deallocate the cached class file
  1889   if (_cached_class_file_bytes != NULL) {
  1890     os::free(_cached_class_file_bytes);
  1891     _cached_class_file_bytes = NULL;
  1892     _cached_class_file_len = 0;
  1895   // Decrement symbol reference counts associated with the unloaded class.
  1896   if (_name != NULL) _name->decrement_refcount();
  1897   // unreference array name derived from this class name (arrays of an unloaded
  1898   // class can't be referenced anymore).
  1899   if (_array_name != NULL)  _array_name->decrement_refcount();
  1900   if (_source_file_name != NULL) _source_file_name->decrement_refcount();
  1901   if (_source_debug_extension != NULL) _source_debug_extension->decrement_refcount();
  1902   // walk constant pool and decrement symbol reference counts
  1903   _constants->unreference_symbols();
  1906 void instanceKlass::set_source_file_name(Symbol* n) {
  1907   _source_file_name = n;
  1908   if (_source_file_name != NULL) _source_file_name->increment_refcount();
  1911 void instanceKlass::set_source_debug_extension(Symbol* n) {
  1912   _source_debug_extension = n;
  1913   if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount();
  1916 address instanceKlass::static_field_addr(int offset) {
  1917   return (address)(offset + instanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror());
  1921 const char* instanceKlass::signature_name() const {
  1922   const char* src = (const char*) (name()->as_C_string());
  1923   const int src_length = (int)strlen(src);
  1924   char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
  1925   int src_index = 0;
  1926   int dest_index = 0;
  1927   dest[dest_index++] = 'L';
  1928   while (src_index < src_length) {
  1929     dest[dest_index++] = src[src_index++];
  1931   dest[dest_index++] = ';';
  1932   dest[dest_index] = '\0';
  1933   return dest;
  1936 // different verisons of is_same_class_package
  1937 bool instanceKlass::is_same_class_package(klassOop class2) {
  1938   klassOop class1 = as_klassOop();
  1939   oop classloader1 = instanceKlass::cast(class1)->class_loader();
  1940   Symbol* classname1 = Klass::cast(class1)->name();
  1942   if (Klass::cast(class2)->oop_is_objArray()) {
  1943     class2 = objArrayKlass::cast(class2)->bottom_klass();
  1945   oop classloader2;
  1946   if (Klass::cast(class2)->oop_is_instance()) {
  1947     classloader2 = instanceKlass::cast(class2)->class_loader();
  1948   } else {
  1949     assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array");
  1950     classloader2 = NULL;
  1952   Symbol* classname2 = Klass::cast(class2)->name();
  1954   return instanceKlass::is_same_class_package(classloader1, classname1,
  1955                                               classloader2, classname2);
  1958 bool instanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) {
  1959   klassOop class1 = as_klassOop();
  1960   oop classloader1 = instanceKlass::cast(class1)->class_loader();
  1961   Symbol* classname1 = Klass::cast(class1)->name();
  1963   return instanceKlass::is_same_class_package(classloader1, classname1,
  1964                                               classloader2, classname2);
  1967 // return true if two classes are in the same package, classloader
  1968 // and classname information is enough to determine a class's package
  1969 bool instanceKlass::is_same_class_package(oop class_loader1, Symbol* class_name1,
  1970                                           oop class_loader2, Symbol* class_name2) {
  1971   if (class_loader1 != class_loader2) {
  1972     return false;
  1973   } else if (class_name1 == class_name2) {
  1974     return true;                // skip painful bytewise comparison
  1975   } else {
  1976     ResourceMark rm;
  1978     // The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly
  1979     // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
  1980     // Otherwise, we just compare jbyte values between the strings.
  1981     const jbyte *name1 = class_name1->base();
  1982     const jbyte *name2 = class_name2->base();
  1984     const jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
  1985     const jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
  1987     if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
  1988       // One of the two doesn't have a package.  Only return true
  1989       // if the other one also doesn't have a package.
  1990       return last_slash1 == last_slash2;
  1991     } else {
  1992       // Skip over '['s
  1993       if (*name1 == '[') {
  1994         do {
  1995           name1++;
  1996         } while (*name1 == '[');
  1997         if (*name1 != 'L') {
  1998           // Something is terribly wrong.  Shouldn't be here.
  1999           return false;
  2002       if (*name2 == '[') {
  2003         do {
  2004           name2++;
  2005         } while (*name2 == '[');
  2006         if (*name2 != 'L') {
  2007           // Something is terribly wrong.  Shouldn't be here.
  2008           return false;
  2012       // Check that package part is identical
  2013       int length1 = last_slash1 - name1;
  2014       int length2 = last_slash2 - name2;
  2016       return UTF8::equal(name1, length1, name2, length2);
  2021 // Returns true iff super_method can be overridden by a method in targetclassname
  2022 // See JSL 3rd edition 8.4.6.1
  2023 // Assumes name-signature match
  2024 // "this" is instanceKlass of super_method which must exist
  2025 // note that the instanceKlass of the method in the targetclassname has not always been created yet
  2026 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) {
  2027    // Private methods can not be overridden
  2028    if (super_method->is_private()) {
  2029      return false;
  2031    // If super method is accessible, then override
  2032    if ((super_method->is_protected()) ||
  2033        (super_method->is_public())) {
  2034      return true;
  2036    // Package-private methods are not inherited outside of package
  2037    assert(super_method->is_package_private(), "must be package private");
  2038    return(is_same_class_package(targetclassloader(), targetclassname));
  2041 /* defined for now in jvm.cpp, for historical reasons *--
  2042 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self,
  2043                                                      Symbol*& simple_name_result, TRAPS) {
  2044   ...
  2046 */
  2048 // tell if two classes have the same enclosing class (at package level)
  2049 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
  2050                                                 klassOop class2_oop, TRAPS) {
  2051   if (class2_oop == class1->as_klassOop())          return true;
  2052   if (!Klass::cast(class2_oop)->oop_is_instance())  return false;
  2053   instanceKlassHandle class2(THREAD, class2_oop);
  2055   // must be in same package before we try anything else
  2056   if (!class1->is_same_class_package(class2->class_loader(), class2->name()))
  2057     return false;
  2059   // As long as there is an outer1.getEnclosingClass,
  2060   // shift the search outward.
  2061   instanceKlassHandle outer1 = class1;
  2062   for (;;) {
  2063     // As we walk along, look for equalities between outer1 and class2.
  2064     // Eventually, the walks will terminate as outer1 stops
  2065     // at the top-level class around the original class.
  2066     bool ignore_inner_is_member;
  2067     klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
  2068                                                     CHECK_false);
  2069     if (next == NULL)  break;
  2070     if (next == class2())  return true;
  2071     outer1 = instanceKlassHandle(THREAD, next);
  2074   // Now do the same for class2.
  2075   instanceKlassHandle outer2 = class2;
  2076   for (;;) {
  2077     bool ignore_inner_is_member;
  2078     klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
  2079                                                     CHECK_false);
  2080     if (next == NULL)  break;
  2081     // Might as well check the new outer against all available values.
  2082     if (next == class1())  return true;
  2083     if (next == outer1())  return true;
  2084     outer2 = instanceKlassHandle(THREAD, next);
  2087   // If by this point we have not found an equality between the
  2088   // two classes, we know they are in separate package members.
  2089   return false;
  2093 jint instanceKlass::compute_modifier_flags(TRAPS) const {
  2094   klassOop k = as_klassOop();
  2095   jint access = access_flags().as_int();
  2097   // But check if it happens to be member class.
  2098   typeArrayOop inner_class_list = inner_classes();
  2099   int length = (inner_class_list == NULL) ? 0 : inner_class_list->length();
  2100   assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
  2101   if (length > 0) {
  2102     typeArrayHandle inner_class_list_h(THREAD, inner_class_list);
  2103     instanceKlassHandle ik(THREAD, k);
  2104     for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
  2105       int ioff = inner_class_list_h->ushort_at(
  2106                       i + instanceKlass::inner_class_inner_class_info_offset);
  2108       // Inner class attribute can be zero, skip it.
  2109       // Strange but true:  JVM spec. allows null inner class refs.
  2110       if (ioff == 0) continue;
  2112       // only look at classes that are already loaded
  2113       // since we are looking for the flags for our self.
  2114       Symbol* inner_name = ik->constants()->klass_name_at(ioff);
  2115       if ((ik->name() == inner_name)) {
  2116         // This is really a member class.
  2117         access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset);
  2118         break;
  2122   // Remember to strip ACC_SUPER bit
  2123   return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS;
  2126 jint instanceKlass::jvmti_class_status() const {
  2127   jint result = 0;
  2129   if (is_linked()) {
  2130     result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED;
  2133   if (is_initialized()) {
  2134     assert(is_linked(), "Class status is not consistent");
  2135     result |= JVMTI_CLASS_STATUS_INITIALIZED;
  2137   if (is_in_error_state()) {
  2138     result |= JVMTI_CLASS_STATUS_ERROR;
  2140   return result;
  2143 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) {
  2144   itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
  2145   int method_table_offset_in_words = ioe->offset()/wordSize;
  2146   int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
  2147                        / itableOffsetEntry::size();
  2149   for (int cnt = 0 ; ; cnt ++, ioe ++) {
  2150     // If the interface isn't implemented by the receiver class,
  2151     // the VM should throw IncompatibleClassChangeError.
  2152     if (cnt >= nof_interfaces) {
  2153       THROW_0(vmSymbols::java_lang_IncompatibleClassChangeError());
  2156     klassOop ik = ioe->interface_klass();
  2157     if (ik == holder) break;
  2160   itableMethodEntry* ime = ioe->first_method_entry(as_klassOop());
  2161   methodOop m = ime[index].method();
  2162   if (m == NULL) {
  2163     THROW_0(vmSymbols::java_lang_AbstractMethodError());
  2165   return m;
  2168 // On-stack replacement stuff
  2169 void instanceKlass::add_osr_nmethod(nmethod* n) {
  2170   // only one compilation can be active
  2171   NEEDS_CLEANUP
  2172   // This is a short non-blocking critical region, so the no safepoint check is ok.
  2173   OsrList_lock->lock_without_safepoint_check();
  2174   assert(n->is_osr_method(), "wrong kind of nmethod");
  2175   n->set_osr_link(osr_nmethods_head());
  2176   set_osr_nmethods_head(n);
  2177   // Raise the highest osr level if necessary
  2178   if (TieredCompilation) {
  2179     methodOop m = n->method();
  2180     m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
  2182   // Remember to unlock again
  2183   OsrList_lock->unlock();
  2185   // Get rid of the osr methods for the same bci that have lower levels.
  2186   if (TieredCompilation) {
  2187     for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
  2188       nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
  2189       if (inv != NULL && inv->is_in_use()) {
  2190         inv->make_not_entrant();
  2197 void instanceKlass::remove_osr_nmethod(nmethod* n) {
  2198   // This is a short non-blocking critical region, so the no safepoint check is ok.
  2199   OsrList_lock->lock_without_safepoint_check();
  2200   assert(n->is_osr_method(), "wrong kind of nmethod");
  2201   nmethod* last = NULL;
  2202   nmethod* cur  = osr_nmethods_head();
  2203   int max_level = CompLevel_none;  // Find the max comp level excluding n
  2204   methodOop m = n->method();
  2205   // Search for match
  2206   while(cur != NULL && cur != n) {
  2207     if (TieredCompilation) {
  2208       // Find max level before n
  2209       max_level = MAX2(max_level, cur->comp_level());
  2211     last = cur;
  2212     cur = cur->osr_link();
  2214   nmethod* next = NULL;
  2215   if (cur == n) {
  2216     next = cur->osr_link();
  2217     if (last == NULL) {
  2218       // Remove first element
  2219       set_osr_nmethods_head(next);
  2220     } else {
  2221       last->set_osr_link(next);
  2224   n->set_osr_link(NULL);
  2225   if (TieredCompilation) {
  2226     cur = next;
  2227     while (cur != NULL) {
  2228       // Find max level after n
  2229       max_level = MAX2(max_level, cur->comp_level());
  2230       cur = cur->osr_link();
  2232     m->set_highest_osr_comp_level(max_level);
  2234   // Remember to unlock again
  2235   OsrList_lock->unlock();
  2238 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
  2239   // This is a short non-blocking critical region, so the no safepoint check is ok.
  2240   OsrList_lock->lock_without_safepoint_check();
  2241   nmethod* osr = osr_nmethods_head();
  2242   nmethod* best = NULL;
  2243   while (osr != NULL) {
  2244     assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
  2245     // There can be a time when a c1 osr method exists but we are waiting
  2246     // for a c2 version. When c2 completes its osr nmethod we will trash
  2247     // the c1 version and only be able to find the c2 version. However
  2248     // while we overflow in the c1 code at back branches we don't want to
  2249     // try and switch to the same code as we are already running
  2251     if (osr->method() == m &&
  2252         (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
  2253       if (match_level) {
  2254         if (osr->comp_level() == comp_level) {
  2255           // Found a match - return it.
  2256           OsrList_lock->unlock();
  2257           return osr;
  2259       } else {
  2260         if (best == NULL || (osr->comp_level() > best->comp_level())) {
  2261           if (osr->comp_level() == CompLevel_highest_tier) {
  2262             // Found the best possible - return it.
  2263             OsrList_lock->unlock();
  2264             return osr;
  2266           best = osr;
  2270     osr = osr->osr_link();
  2272   OsrList_lock->unlock();
  2273   if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
  2274     return best;
  2276   return NULL;
  2279 // -----------------------------------------------------------------------------------------------------
  2280 #ifndef PRODUCT
  2282 // Printing
  2284 #define BULLET  " - "
  2286 void FieldPrinter::do_field(fieldDescriptor* fd) {
  2287   _st->print(BULLET);
  2288    if (_obj == NULL) {
  2289      fd->print_on(_st);
  2290      _st->cr();
  2291    } else {
  2292      fd->print_on_for(_st, _obj);
  2293      _st->cr();
  2298 void instanceKlass::oop_print_on(oop obj, outputStream* st) {
  2299   Klass::oop_print_on(obj, st);
  2301   if (as_klassOop() == SystemDictionary::String_klass()) {
  2302     typeArrayOop value  = java_lang_String::value(obj);
  2303     juint        offset = java_lang_String::offset(obj);
  2304     juint        length = java_lang_String::length(obj);
  2305     if (value != NULL &&
  2306         value->is_typeArray() &&
  2307         offset          <= (juint) value->length() &&
  2308         offset + length <= (juint) value->length()) {
  2309       st->print(BULLET"string: ");
  2310       Handle h_obj(obj);
  2311       java_lang_String::print(h_obj, st);
  2312       st->cr();
  2313       if (!WizardMode)  return;  // that is enough
  2317   st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj));
  2318   FieldPrinter print_field(st, obj);
  2319   do_nonstatic_fields(&print_field);
  2321   if (as_klassOop() == SystemDictionary::Class_klass()) {
  2322     st->print(BULLET"signature: ");
  2323     java_lang_Class::print_signature(obj, st);
  2324     st->cr();
  2325     klassOop mirrored_klass = java_lang_Class::as_klassOop(obj);
  2326     st->print(BULLET"fake entry for mirror: ");
  2327     mirrored_klass->print_value_on(st);
  2328     st->cr();
  2329     st->print(BULLET"fake entry resolved_constructor: ");
  2330     methodOop ctor = java_lang_Class::resolved_constructor(obj);
  2331     ctor->print_value_on(st);
  2332     klassOop array_klass = java_lang_Class::array_klass(obj);
  2333     st->cr();
  2334     st->print(BULLET"fake entry for array: ");
  2335     array_klass->print_value_on(st);
  2336     st->cr();
  2337     st->print_cr(BULLET"fake entry for oop_size: %d", java_lang_Class::oop_size(obj));
  2338     st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj));
  2339     klassOop real_klass = java_lang_Class::as_klassOop(obj);
  2340     if (real_klass && real_klass->klass_part()->oop_is_instance()) {
  2341       instanceKlass::cast(real_klass)->do_local_static_fields(&print_field);
  2343   } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
  2344     st->print(BULLET"signature: ");
  2345     java_lang_invoke_MethodType::print_signature(obj, st);
  2346     st->cr();
  2350 #endif //PRODUCT
  2352 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
  2353   st->print("a ");
  2354   name()->print_value_on(st);
  2355   obj->print_address_on(st);
  2356   if (as_klassOop() == SystemDictionary::String_klass()
  2357       && java_lang_String::value(obj) != NULL) {
  2358     ResourceMark rm;
  2359     int len = java_lang_String::length(obj);
  2360     int plen = (len < 24 ? len : 12);
  2361     char* str = java_lang_String::as_utf8_string(obj, 0, plen);
  2362     st->print(" = \"%s\"", str);
  2363     if (len > plen)
  2364       st->print("...[%d]", len);
  2365   } else if (as_klassOop() == SystemDictionary::Class_klass()) {
  2366     klassOop k = java_lang_Class::as_klassOop(obj);
  2367     st->print(" = ");
  2368     if (k != NULL) {
  2369       k->print_value_on(st);
  2370     } else {
  2371       const char* tname = type2name(java_lang_Class::primitive_type(obj));
  2372       st->print("%s", tname ? tname : "type?");
  2374   } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
  2375     st->print(" = ");
  2376     java_lang_invoke_MethodType::print_signature(obj, st);
  2377   } else if (java_lang_boxing_object::is_instance(obj)) {
  2378     st->print(" = ");
  2379     java_lang_boxing_object::print(obj, st);
  2383 const char* instanceKlass::internal_name() const {
  2384   return external_name();
  2387 // Verification
  2389 class VerifyFieldClosure: public OopClosure {
  2390  protected:
  2391   template <class T> void do_oop_work(T* p) {
  2392     guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
  2393     oop obj = oopDesc::load_decode_heap_oop(p);
  2394     if (!obj->is_oop_or_null()) {
  2395       tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
  2396       Universe::print();
  2397       guarantee(false, "boom");
  2400  public:
  2401   virtual void do_oop(oop* p)       { VerifyFieldClosure::do_oop_work(p); }
  2402   virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
  2403 };
  2405 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
  2406   Klass::oop_verify_on(obj, st);
  2407   VerifyFieldClosure blk;
  2408   oop_oop_iterate(obj, &blk);
  2411 #ifndef PRODUCT
  2413 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
  2414   // This verification code is disabled.  JDK_Version::is_gte_jdk14x_version()
  2415   // cannot be called since this function is called before the VM is
  2416   // able to determine what JDK version is running with.
  2417   // The check below always is false since 1.4.
  2418   return;
  2420   // This verification code temporarily disabled for the 1.4
  2421   // reflection implementation since java.lang.Class now has
  2422   // Java-level instance fields. Should rewrite this to handle this
  2423   // case.
  2424   if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
  2425     // Verify that java.lang.Class instances have a fake oop field added.
  2426     instanceKlass* ik = instanceKlass::cast(k);
  2428     // Check that we have the right class
  2429     static bool first_time = true;
  2430     guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
  2431     first_time = false;
  2432     const int extra = java_lang_Class::number_of_fake_oop_fields;
  2433     guarantee(ik->nonstatic_field_size() == extra, "just checking");
  2434     guarantee(ik->nonstatic_oop_map_count() == 1, "just checking");
  2435     guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
  2437     // Check that the map is (2,extra)
  2438     int offset = java_lang_Class::klass_offset;
  2440     OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
  2441     guarantee(map->offset() == offset && map->count() == (unsigned int) extra,
  2442               "sanity");
  2446 #endif // ndef PRODUCT
  2448 // JNIid class for jfieldIDs only
  2449 // Note to reviewers:
  2450 // These JNI functions are just moved over to column 1 and not changed
  2451 // in the compressed oops workspace.
  2452 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
  2453   _holder = holder;
  2454   _offset = offset;
  2455   _next = next;
  2456   debug_only(_is_static_field_id = false;)
  2460 JNIid* JNIid::find(int offset) {
  2461   JNIid* current = this;
  2462   while (current != NULL) {
  2463     if (current->offset() == offset) return current;
  2464     current = current->next();
  2466   return NULL;
  2469 void JNIid::oops_do(OopClosure* f) {
  2470   for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
  2471     f->do_oop(cur->holder_addr());
  2475 void JNIid::deallocate(JNIid* current) {
  2476   while (current != NULL) {
  2477     JNIid* next = current->next();
  2478     delete current;
  2479     current = next;
  2484 void JNIid::verify(klassOop holder) {
  2485   int first_field_offset  = instanceMirrorKlass::offset_of_static_fields();
  2486   int end_field_offset;
  2487   end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
  2489   JNIid* current = this;
  2490   while (current != NULL) {
  2491     guarantee(current->holder() == holder, "Invalid klass in JNIid");
  2492 #ifdef ASSERT
  2493     int o = current->offset();
  2494     if (current->is_static_field_id()) {
  2495       guarantee(o >= first_field_offset  && o < end_field_offset,  "Invalid static field offset in JNIid");
  2497 #endif
  2498     current = current->next();
  2503 #ifdef ASSERT
  2504 void instanceKlass::set_init_state(ClassState state) {
  2505   bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
  2506                                                : (_init_state < state);
  2507   assert(good_state || state == allocated, "illegal state transition");
  2508   _init_state = state;
  2510 #endif
  2513 // RedefineClasses() support for previous versions:
  2515 // Add an information node that contains weak references to the
  2516 // interesting parts of the previous version of the_class.
  2517 // This is also where we clean out any unused weak references.
  2518 // Note that while we delete nodes from the _previous_versions
  2519 // array, we never delete the array itself until the klass is
  2520 // unloaded. The has_been_redefined() query depends on that fact.
  2521 //
  2522 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
  2523        BitMap* emcp_methods, int emcp_method_count) {
  2524   assert(Thread::current()->is_VM_thread(),
  2525          "only VMThread can add previous versions");
  2527   if (_previous_versions == NULL) {
  2528     // This is the first previous version so make some space.
  2529     // Start with 2 elements under the assumption that the class
  2530     // won't be redefined much.
  2531     _previous_versions =  new (ResourceObj::C_HEAP)
  2532                             GrowableArray<PreviousVersionNode *>(2, true);
  2535   // RC_TRACE macro has an embedded ResourceMark
  2536   RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
  2537     ikh->external_name(), _previous_versions->length(), emcp_method_count));
  2538   constantPoolHandle cp_h(ikh->constants());
  2539   jobject cp_ref;
  2540   if (cp_h->is_shared()) {
  2541     // a shared ConstantPool requires a regular reference; a weak
  2542     // reference would be collectible
  2543     cp_ref = JNIHandles::make_global(cp_h);
  2544   } else {
  2545     cp_ref = JNIHandles::make_weak_global(cp_h);
  2547   PreviousVersionNode * pv_node = NULL;
  2548   objArrayOop old_methods = ikh->methods();
  2550   if (emcp_method_count == 0) {
  2551     // non-shared ConstantPool gets a weak reference
  2552     pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL);
  2553     RC_TRACE(0x00000400,
  2554       ("add: all methods are obsolete; flushing any EMCP weak refs"));
  2555   } else {
  2556     int local_count = 0;
  2557     GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP)
  2558       GrowableArray<jweak>(emcp_method_count, true);
  2559     for (int i = 0; i < old_methods->length(); i++) {
  2560       if (emcp_methods->at(i)) {
  2561         // this old method is EMCP so save a weak ref
  2562         methodOop old_method = (methodOop) old_methods->obj_at(i);
  2563         methodHandle old_method_h(old_method);
  2564         jweak method_ref = JNIHandles::make_weak_global(old_method_h);
  2565         method_refs->append(method_ref);
  2566         if (++local_count >= emcp_method_count) {
  2567           // no more EMCP methods so bail out now
  2568           break;
  2572     // non-shared ConstantPool gets a weak reference
  2573     pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs);
  2576   _previous_versions->append(pv_node);
  2578   // Using weak references allows the interesting parts of previous
  2579   // classes to be GC'ed when they are no longer needed. Since the
  2580   // caller is the VMThread and we are at a safepoint, this is a good
  2581   // time to clear out unused weak references.
  2583   RC_TRACE(0x00000400, ("add: previous version length=%d",
  2584     _previous_versions->length()));
  2586   // skip the last entry since we just added it
  2587   for (int i = _previous_versions->length() - 2; i >= 0; i--) {
  2588     // check the previous versions array for a GC'ed weak refs
  2589     pv_node = _previous_versions->at(i);
  2590     cp_ref = pv_node->prev_constant_pool();
  2591     assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
  2592     if (cp_ref == NULL) {
  2593       delete pv_node;
  2594       _previous_versions->remove_at(i);
  2595       // Since we are traversing the array backwards, we don't have to
  2596       // do anything special with the index.
  2597       continue;  // robustness
  2600     constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
  2601     if (cp == NULL) {
  2602       // this entry has been GC'ed so remove it
  2603       delete pv_node;
  2604       _previous_versions->remove_at(i);
  2605       // Since we are traversing the array backwards, we don't have to
  2606       // do anything special with the index.
  2607       continue;
  2608     } else {
  2609       RC_TRACE(0x00000400, ("add: previous version @%d is alive", i));
  2612     GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
  2613     if (method_refs != NULL) {
  2614       RC_TRACE(0x00000400, ("add: previous methods length=%d",
  2615         method_refs->length()));
  2616       for (int j = method_refs->length() - 1; j >= 0; j--) {
  2617         jweak method_ref = method_refs->at(j);
  2618         assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
  2619         if (method_ref == NULL) {
  2620           method_refs->remove_at(j);
  2621           // Since we are traversing the array backwards, we don't have to
  2622           // do anything special with the index.
  2623           continue;  // robustness
  2626         methodOop method = (methodOop)JNIHandles::resolve(method_ref);
  2627         if (method == NULL || emcp_method_count == 0) {
  2628           // This method entry has been GC'ed or the current
  2629           // RedefineClasses() call has made all methods obsolete
  2630           // so remove it.
  2631           JNIHandles::destroy_weak_global(method_ref);
  2632           method_refs->remove_at(j);
  2633         } else {
  2634           // RC_TRACE macro has an embedded ResourceMark
  2635           RC_TRACE(0x00000400,
  2636             ("add: %s(%s): previous method @%d in version @%d is alive",
  2637             method->name()->as_C_string(), method->signature()->as_C_string(),
  2638             j, i));
  2644   int obsolete_method_count = old_methods->length() - emcp_method_count;
  2646   if (emcp_method_count != 0 && obsolete_method_count != 0 &&
  2647       _previous_versions->length() > 1) {
  2648     // We have a mix of obsolete and EMCP methods. If there is more
  2649     // than the previous version that we just added, then we have to
  2650     // clear out any matching EMCP method entries the hard way.
  2651     int local_count = 0;
  2652     for (int i = 0; i < old_methods->length(); i++) {
  2653       if (!emcp_methods->at(i)) {
  2654         // only obsolete methods are interesting
  2655         methodOop old_method = (methodOop) old_methods->obj_at(i);
  2656         Symbol* m_name = old_method->name();
  2657         Symbol* m_signature = old_method->signature();
  2659         // skip the last entry since we just added it
  2660         for (int j = _previous_versions->length() - 2; j >= 0; j--) {
  2661           // check the previous versions array for a GC'ed weak refs
  2662           pv_node = _previous_versions->at(j);
  2663           cp_ref = pv_node->prev_constant_pool();
  2664           assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
  2665           if (cp_ref == NULL) {
  2666             delete pv_node;
  2667             _previous_versions->remove_at(j);
  2668             // Since we are traversing the array backwards, we don't have to
  2669             // do anything special with the index.
  2670             continue;  // robustness
  2673           constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
  2674           if (cp == NULL) {
  2675             // this entry has been GC'ed so remove it
  2676             delete pv_node;
  2677             _previous_versions->remove_at(j);
  2678             // Since we are traversing the array backwards, we don't have to
  2679             // do anything special with the index.
  2680             continue;
  2683           GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
  2684           if (method_refs == NULL) {
  2685             // We have run into a PreviousVersion generation where
  2686             // all methods were made obsolete during that generation's
  2687             // RedefineClasses() operation. At the time of that
  2688             // operation, all EMCP methods were flushed so we don't
  2689             // have to go back any further.
  2690             //
  2691             // A NULL method_refs is different than an empty method_refs.
  2692             // We cannot infer any optimizations about older generations
  2693             // from an empty method_refs for the current generation.
  2694             break;
  2697           for (int k = method_refs->length() - 1; k >= 0; k--) {
  2698             jweak method_ref = method_refs->at(k);
  2699             assert(method_ref != NULL,
  2700               "weak method ref was unexpectedly cleared");
  2701             if (method_ref == NULL) {
  2702               method_refs->remove_at(k);
  2703               // Since we are traversing the array backwards, we don't
  2704               // have to do anything special with the index.
  2705               continue;  // robustness
  2708             methodOop method = (methodOop)JNIHandles::resolve(method_ref);
  2709             if (method == NULL) {
  2710               // this method entry has been GC'ed so skip it
  2711               JNIHandles::destroy_weak_global(method_ref);
  2712               method_refs->remove_at(k);
  2713               continue;
  2716             if (method->name() == m_name &&
  2717                 method->signature() == m_signature) {
  2718               // The current RedefineClasses() call has made all EMCP
  2719               // versions of this method obsolete so mark it as obsolete
  2720               // and remove the weak ref.
  2721               RC_TRACE(0x00000400,
  2722                 ("add: %s(%s): flush obsolete method @%d in version @%d",
  2723                 m_name->as_C_string(), m_signature->as_C_string(), k, j));
  2725               method->set_is_obsolete();
  2726               JNIHandles::destroy_weak_global(method_ref);
  2727               method_refs->remove_at(k);
  2728               break;
  2732           // The previous loop may not find a matching EMCP method, but
  2733           // that doesn't mean that we can optimize and not go any
  2734           // further back in the PreviousVersion generations. The EMCP
  2735           // method for this generation could have already been GC'ed,
  2736           // but there still may be an older EMCP method that has not
  2737           // been GC'ed.
  2740         if (++local_count >= obsolete_method_count) {
  2741           // no more obsolete methods so bail out now
  2742           break;
  2747 } // end add_previous_version()
  2750 // Determine if instanceKlass has a previous version.
  2751 bool instanceKlass::has_previous_version() const {
  2752   if (_previous_versions == NULL) {
  2753     // no previous versions array so answer is easy
  2754     return false;
  2757   for (int i = _previous_versions->length() - 1; i >= 0; i--) {
  2758     // Check the previous versions array for an info node that hasn't
  2759     // been GC'ed
  2760     PreviousVersionNode * pv_node = _previous_versions->at(i);
  2762     jobject cp_ref = pv_node->prev_constant_pool();
  2763     assert(cp_ref != NULL, "cp reference was unexpectedly cleared");
  2764     if (cp_ref == NULL) {
  2765       continue;  // robustness
  2768     constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
  2769     if (cp != NULL) {
  2770       // we have at least one previous version
  2771       return true;
  2774     // We don't have to check the method refs. If the constant pool has
  2775     // been GC'ed then so have the methods.
  2778   // all of the underlying nodes' info has been GC'ed
  2779   return false;
  2780 } // end has_previous_version()
  2782 methodOop instanceKlass::method_with_idnum(int idnum) {
  2783   methodOop m = NULL;
  2784   if (idnum < methods()->length()) {
  2785     m = (methodOop) methods()->obj_at(idnum);
  2787   if (m == NULL || m->method_idnum() != idnum) {
  2788     for (int index = 0; index < methods()->length(); ++index) {
  2789       m = (methodOop) methods()->obj_at(index);
  2790       if (m->method_idnum() == idnum) {
  2791         return m;
  2795   return m;
  2799 // Set the annotation at 'idnum' to 'anno'.
  2800 // We don't want to create or extend the array if 'anno' is NULL, since that is the
  2801 // default value.  However, if the array exists and is long enough, we must set NULL values.
  2802 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) {
  2803   objArrayOop md = *md_p;
  2804   if (md != NULL && md->length() > idnum) {
  2805     md->obj_at_put(idnum, anno);
  2806   } else if (anno != NULL) {
  2807     // create the array
  2808     int length = MAX2(idnum+1, (int)_idnum_allocated_count);
  2809     md = oopFactory::new_system_objArray(length, Thread::current());
  2810     if (*md_p != NULL) {
  2811       // copy the existing entries
  2812       for (int index = 0; index < (*md_p)->length(); index++) {
  2813         md->obj_at_put(index, (*md_p)->obj_at(index));
  2816     set_annotations(md, md_p);
  2817     md->obj_at_put(idnum, anno);
  2818   } // if no array and idnum isn't included there is nothing to do
  2821 // Construct a PreviousVersionNode entry for the array hung off
  2822 // the instanceKlass.
  2823 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool,
  2824   bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) {
  2826   _prev_constant_pool = prev_constant_pool;
  2827   _prev_cp_is_weak = prev_cp_is_weak;
  2828   _prev_EMCP_methods = prev_EMCP_methods;
  2832 // Destroy a PreviousVersionNode
  2833 PreviousVersionNode::~PreviousVersionNode() {
  2834   if (_prev_constant_pool != NULL) {
  2835     if (_prev_cp_is_weak) {
  2836       JNIHandles::destroy_weak_global(_prev_constant_pool);
  2837     } else {
  2838       JNIHandles::destroy_global(_prev_constant_pool);
  2842   if (_prev_EMCP_methods != NULL) {
  2843     for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) {
  2844       jweak method_ref = _prev_EMCP_methods->at(i);
  2845       if (method_ref != NULL) {
  2846         JNIHandles::destroy_weak_global(method_ref);
  2849     delete _prev_EMCP_methods;
  2854 // Construct a PreviousVersionInfo entry
  2855 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
  2856   _prev_constant_pool_handle = constantPoolHandle();  // NULL handle
  2857   _prev_EMCP_method_handles = NULL;
  2859   jobject cp_ref = pv_node->prev_constant_pool();
  2860   assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared");
  2861   if (cp_ref == NULL) {
  2862     return;  // robustness
  2865   constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
  2866   if (cp == NULL) {
  2867     // Weak reference has been GC'ed. Since the constant pool has been
  2868     // GC'ed, the methods have also been GC'ed.
  2869     return;
  2872   // make the constantPoolOop safe to return
  2873   _prev_constant_pool_handle = constantPoolHandle(cp);
  2875   GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
  2876   if (method_refs == NULL) {
  2877     // the instanceKlass did not have any EMCP methods
  2878     return;
  2881   _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
  2883   int n_methods = method_refs->length();
  2884   for (int i = 0; i < n_methods; i++) {
  2885     jweak method_ref = method_refs->at(i);
  2886     assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
  2887     if (method_ref == NULL) {
  2888       continue;  // robustness
  2891     methodOop method = (methodOop)JNIHandles::resolve(method_ref);
  2892     if (method == NULL) {
  2893       // this entry has been GC'ed so skip it
  2894       continue;
  2897     // make the methodOop safe to return
  2898     _prev_EMCP_method_handles->append(methodHandle(method));
  2903 // Destroy a PreviousVersionInfo
  2904 PreviousVersionInfo::~PreviousVersionInfo() {
  2905   // Since _prev_EMCP_method_handles is not C-heap allocated, we
  2906   // don't have to delete it.
  2910 // Construct a helper for walking the previous versions array
  2911 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) {
  2912   _previous_versions = ik->previous_versions();
  2913   _current_index = 0;
  2914   // _hm needs no initialization
  2915   _current_p = NULL;
  2919 // Destroy a PreviousVersionWalker
  2920 PreviousVersionWalker::~PreviousVersionWalker() {
  2921   // Delete the current info just in case the caller didn't walk to
  2922   // the end of the previous versions list. No harm if _current_p is
  2923   // already NULL.
  2924   delete _current_p;
  2926   // When _hm is destroyed, all the Handles returned in
  2927   // PreviousVersionInfo objects will be destroyed.
  2928   // Also, after this destructor is finished it will be
  2929   // safe to delete the GrowableArray allocated in the
  2930   // PreviousVersionInfo objects.
  2934 // Return the interesting information for the next previous version
  2935 // of the klass. Returns NULL if there are no more previous versions.
  2936 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
  2937   if (_previous_versions == NULL) {
  2938     // no previous versions so nothing to return
  2939     return NULL;
  2942   delete _current_p;  // cleanup the previous info for the caller
  2943   _current_p = NULL;  // reset to NULL so we don't delete same object twice
  2945   int length = _previous_versions->length();
  2947   while (_current_index < length) {
  2948     PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
  2949     PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP)
  2950                                           PreviousVersionInfo(pv_node);
  2952     constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
  2953     if (cp_h.is_null()) {
  2954       delete pv_info;
  2956       // The underlying node's info has been GC'ed so try the next one.
  2957       // We don't have to check the methods. If the constant pool has
  2958       // GC'ed then so have the methods.
  2959       continue;
  2962     // Found a node with non GC'ed info so return it. The caller will
  2963     // need to delete pv_info when they are done with it.
  2964     _current_p = pv_info;
  2965     return pv_info;
  2968   // all of the underlying nodes' info has been GC'ed
  2969   return NULL;
  2970 } // end next_previous_version()

mercurial