src/share/vm/oops/instanceKlass.cpp

Thu, 13 Mar 2008 14:17:48 -0700

author
dcubed
date
Thu, 13 Mar 2008 14:17:48 -0700
changeset 487
75b0f3cb1943
parent 479
52fed2ec0afb
parent 484
31000d79ec71
child 548
ba764ed4b6f2
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_instanceKlass.cpp.incl"
    28 bool instanceKlass::should_be_initialized() const {
    29   return !is_initialized();
    30 }
    32 klassVtable* instanceKlass::vtable() const {
    33   return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size());
    34 }
    36 klassItable* instanceKlass::itable() const {
    37   return new klassItable(as_klassOop());
    38 }
    40 void instanceKlass::eager_initialize(Thread *thread) {
    41   if (!EagerInitialization) return;
    43   if (this->is_not_initialized()) {
    44     // abort if the the class has a class initializer
    45     if (this->class_initializer() != NULL) return;
    47     // abort if it is java.lang.Object (initialization is handled in genesis)
    48     klassOop super = this->super();
    49     if (super == NULL) return;
    51     // abort if the super class should be initialized
    52     if (!instanceKlass::cast(super)->is_initialized()) return;
    54     // call body to expose the this pointer
    55     instanceKlassHandle this_oop(thread, this->as_klassOop());
    56     eager_initialize_impl(this_oop);
    57   }
    58 }
    61 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
    62   EXCEPTION_MARK;
    63   ObjectLocker ol(this_oop, THREAD);
    65   // abort if someone beat us to the initialization
    66   if (!this_oop->is_not_initialized()) return;  // note: not equivalent to is_initialized()
    68   ClassState old_state = this_oop->_init_state;
    69   link_class_impl(this_oop, true, THREAD);
    70   if (HAS_PENDING_EXCEPTION) {
    71     CLEAR_PENDING_EXCEPTION;
    72     // Abort if linking the class throws an exception.
    74     // Use a test to avoid redundantly resetting the state if there's
    75     // no change.  Set_init_state() asserts that state changes make
    76     // progress, whereas here we might just be spinning in place.
    77     if( old_state != this_oop->_init_state )
    78       this_oop->set_init_state (old_state);
    79   } else {
    80     // linking successfull, mark class as initialized
    81     this_oop->set_init_state (fully_initialized);
    82     // trace
    83     if (TraceClassInitialization) {
    84       ResourceMark rm(THREAD);
    85       tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
    86     }
    87   }
    88 }
    91 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
    92 // process. The step comments refers to the procedure described in that section.
    93 // Note: implementation moved to static method to expose the this pointer.
    94 void instanceKlass::initialize(TRAPS) {
    95   if (this->should_be_initialized()) {
    96     HandleMark hm(THREAD);
    97     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
    98     initialize_impl(this_oop, CHECK);
    99     // Note: at this point the class may be initialized
   100     //       OR it may be in the state of being initialized
   101     //       in case of recursive initialization!
   102   } else {
   103     assert(is_initialized(), "sanity check");
   104   }
   105 }
   108 bool instanceKlass::verify_code(
   109     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
   110   // 1) Verify the bytecodes
   111   Verifier::Mode mode =
   112     throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
   113   return Verifier::verify(this_oop, mode, CHECK_false);
   114 }
   117 // Used exclusively by the shared spaces dump mechanism to prevent
   118 // classes mapped into the shared regions in new VMs from appearing linked.
   120 void instanceKlass::unlink_class() {
   121   assert(is_linked(), "must be linked");
   122   _init_state = loaded;
   123 }
   125 void instanceKlass::link_class(TRAPS) {
   126   assert(is_loaded(), "must be loaded");
   127   if (!is_linked()) {
   128     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
   129     link_class_impl(this_oop, true, CHECK);
   130   }
   131 }
   133 // Called to verify that a class can link during initialization, without
   134 // throwing a VerifyError.
   135 bool instanceKlass::link_class_or_fail(TRAPS) {
   136   assert(is_loaded(), "must be loaded");
   137   if (!is_linked()) {
   138     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
   139     link_class_impl(this_oop, false, CHECK_false);
   140   }
   141   return is_linked();
   142 }
   144 bool instanceKlass::link_class_impl(
   145     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
   146   // check for error state
   147   if (this_oop->is_in_error_state()) {
   148     ResourceMark rm(THREAD);
   149     THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
   150                this_oop->external_name(), false);
   151   }
   152   // return if already verified
   153   if (this_oop->is_linked()) {
   154     return true;
   155   }
   157   // Timing
   158   // timer handles recursion
   159   assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
   160   JavaThread* jt = (JavaThread*)THREAD;
   161   PerfTraceTimedEvent vmtimer(ClassLoader::perf_class_link_time(),
   162                         ClassLoader::perf_classes_linked(),
   163                         jt->get_thread_stat()->class_link_recursion_count_addr());
   165   // link super class before linking this class
   166   instanceKlassHandle super(THREAD, this_oop->super());
   167   if (super.not_null()) {
   168     if (super->is_interface()) {  // check if super class is an interface
   169       ResourceMark rm(THREAD);
   170       Exceptions::fthrow(
   171         THREAD_AND_LOCATION,
   172         vmSymbolHandles::java_lang_IncompatibleClassChangeError(),
   173         "class %s has interface %s as super class",
   174         this_oop->external_name(),
   175         super->external_name()
   176       );
   177       return false;
   178     }
   180     link_class_impl(super, throw_verifyerror, CHECK_false);
   181   }
   183   // link all interfaces implemented by this class before linking this class
   184   objArrayHandle interfaces (THREAD, this_oop->local_interfaces());
   185   int num_interfaces = interfaces->length();
   186   for (int index = 0; index < num_interfaces; index++) {
   187     HandleMark hm(THREAD);
   188     instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index)));
   189     link_class_impl(ih, throw_verifyerror, CHECK_false);
   190   }
   192   // in case the class is linked in the process of linking its superclasses
   193   if (this_oop->is_linked()) {
   194     return true;
   195   }
   197   // verification & rewriting
   198   {
   199     ObjectLocker ol(this_oop, THREAD);
   200     // rewritten will have been set if loader constraint error found
   201     // on an earlier link attempt
   202     // don't verify or rewrite if already rewritten
   203     if (!this_oop->is_linked()) {
   204       if (!this_oop->is_rewritten()) {
   205         {
   206           assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
   207           JavaThread* jt = (JavaThread*)THREAD;
   208           // Timer includes any side effects of class verification (resolution,
   209           // etc), but not recursive entry into verify_code().
   210           PerfTraceTime timer(ClassLoader::perf_class_verify_time(),
   211                             jt->get_thread_stat()->class_verify_recursion_count_addr());
   212           bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
   213           if (!verify_ok) {
   214             return false;
   215           }
   216         }
   218         // Just in case a side-effect of verify linked this class already
   219         // (which can sometimes happen since the verifier loads classes
   220         // using custom class loaders, which are free to initialize things)
   221         if (this_oop->is_linked()) {
   222           return true;
   223         }
   225         // also sets rewritten
   226         this_oop->rewrite_class(CHECK_false);
   227       }
   229       // Initialize the vtable and interface table after
   230       // methods have been rewritten since rewrite may
   231       // fabricate new methodOops.
   232       // also does loader constraint checking
   233       if (!this_oop()->is_shared()) {
   234         ResourceMark rm(THREAD);
   235         this_oop->vtable()->initialize_vtable(true, CHECK_false);
   236         this_oop->itable()->initialize_itable(true, CHECK_false);
   237       }
   238 #ifdef ASSERT
   239       else {
   240         ResourceMark rm(THREAD);
   241         this_oop->vtable()->verify(tty, true);
   242         // In case itable verification is ever added.
   243         // this_oop->itable()->verify(tty, true);
   244       }
   245 #endif
   246       this_oop->set_init_state(linked);
   247       if (JvmtiExport::should_post_class_prepare()) {
   248         Thread *thread = THREAD;
   249         assert(thread->is_Java_thread(), "thread->is_Java_thread()");
   250         JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
   251       }
   252     }
   253   }
   254   return true;
   255 }
   258 // Rewrite the byte codes of all of the methods of a class.
   259 // Three cases:
   260 //    During the link of a newly loaded class.
   261 //    During the preloading of classes to be written to the shared spaces.
   262 //      - Rewrite the methods and update the method entry points.
   263 //
   264 //    During the link of a class in the shared spaces.
   265 //      - The methods were already rewritten, update the metho entry points.
   266 //
   267 // The rewriter must be called exactly once. Rewriting must happen after
   268 // verification but before the first method of the class is executed.
   270 void instanceKlass::rewrite_class(TRAPS) {
   271   assert(is_loaded(), "must be loaded");
   272   instanceKlassHandle this_oop(THREAD, this->as_klassOop());
   273   if (this_oop->is_rewritten()) {
   274     assert(this_oop()->is_shared(), "rewriting an unshared class?");
   275     return;
   276   }
   277   Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
   278   this_oop->set_rewritten();
   279 }
   282 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
   283   // Make sure klass is linked (verified) before initialization
   284   // A class could already be verified, since it has been reflected upon.
   285   this_oop->link_class(CHECK);
   287   // refer to the JVM book page 47 for description of steps
   288   // Step 1
   289   { ObjectLocker ol(this_oop, THREAD);
   291     Thread *self = THREAD; // it's passed the current thread
   293     // Step 2
   294     // If we were to use wait() instead of waitInterruptibly() then
   295     // we might end up throwing IE from link/symbol resolution sites
   296     // that aren't expected to throw.  This would wreak havoc.  See 6320309.
   297     while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
   298       ol.waitUninterruptibly(CHECK);
   299     }
   301     // Step 3
   302     if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self))
   303       return;
   305     // Step 4
   306     if (this_oop->is_initialized())
   307       return;
   309     // Step 5
   310     if (this_oop->is_in_error_state()) {
   311       ResourceMark rm(THREAD);
   312       const char* desc = "Could not initialize class ";
   313       const char* className = this_oop->external_name();
   314       size_t msglen = strlen(desc) + strlen(className) + 1;
   315       char* message = NEW_C_HEAP_ARRAY(char, msglen);
   316       if (NULL == message) {
   317         // Out of memory: can't create detailed error message
   318         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
   319       } else {
   320         jio_snprintf(message, msglen, "%s%s", desc, className);
   321         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
   322       }
   323     }
   325     // Step 6
   326     this_oop->set_init_state(being_initialized);
   327     this_oop->set_init_thread(self);
   328   }
   330   // Step 7
   331   klassOop super_klass = this_oop->super();
   332   if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
   333     Klass::cast(super_klass)->initialize(THREAD);
   335     if (HAS_PENDING_EXCEPTION) {
   336       Handle e(THREAD, PENDING_EXCEPTION);
   337       CLEAR_PENDING_EXCEPTION;
   338       {
   339         EXCEPTION_MARK;
   340         this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
   341         CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, superclass initialization error is thrown below
   342       }
   343       THROW_OOP(e());
   344     }
   345   }
   347   // Step 8
   348   {
   349     assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
   350     JavaThread* jt = (JavaThread*)THREAD;
   351     // Timer includes any side effects of class initialization (resolution,
   352     // etc), but not recursive entry into call_class_initializer().
   353     PerfTraceTimedEvent timer(ClassLoader::perf_class_init_time(),
   354                               ClassLoader::perf_classes_inited(),
   355                               jt->get_thread_stat()->class_init_recursion_count_addr());
   356     this_oop->call_class_initializer(THREAD);
   357   }
   359   // Step 9
   360   if (!HAS_PENDING_EXCEPTION) {
   361     this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
   362     { ResourceMark rm(THREAD);
   363       debug_only(this_oop->vtable()->verify(tty, true);)
   364     }
   365   }
   366   else {
   367     // Step 10 and 11
   368     Handle e(THREAD, PENDING_EXCEPTION);
   369     CLEAR_PENDING_EXCEPTION;
   370     {
   371       EXCEPTION_MARK;
   372       this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
   373       CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, class initialization error is thrown below
   374     }
   375     if (e->is_a(SystemDictionary::error_klass())) {
   376       THROW_OOP(e());
   377     } else {
   378       JavaCallArguments args(e);
   379       THROW_ARG(vmSymbolHandles::java_lang_ExceptionInInitializerError(),
   380                 vmSymbolHandles::throwable_void_signature(),
   381                 &args);
   382     }
   383   }
   384 }
   387 // Note: implementation moved to static method to expose the this pointer.
   388 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
   389   instanceKlassHandle kh(THREAD, this->as_klassOop());
   390   set_initialization_state_and_notify_impl(kh, state, CHECK);
   391 }
   393 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
   394   ObjectLocker ol(this_oop, THREAD);
   395   this_oop->set_init_state(state);
   396   ol.notify_all(CHECK);
   397 }
   399 void instanceKlass::add_implementor(klassOop k) {
   400   assert(Compile_lock->owned_by_self(), "");
   401   // Filter out my subinterfaces.
   402   // (Note: Interfaces are never on the subklass list.)
   403   if (instanceKlass::cast(k)->is_interface()) return;
   405   // Filter out subclasses whose supers already implement me.
   406   // (Note: CHA must walk subclasses of direct implementors
   407   // in order to locate indirect implementors.)
   408   klassOop sk = instanceKlass::cast(k)->super();
   409   if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop()))
   410     // We only need to check one immediate superclass, since the
   411     // implements_interface query looks at transitive_interfaces.
   412     // Any supers of the super have the same (or fewer) transitive_interfaces.
   413     return;
   415   // Update number of implementors
   416   int i = _nof_implementors++;
   418   // Record this implementor, if there are not too many already
   419   if (i < implementors_limit) {
   420     assert(_implementors[i] == NULL, "should be exactly one implementor");
   421     oop_store_without_check((oop*)&_implementors[i], k);
   422   } else if (i == implementors_limit) {
   423     // clear out the list on first overflow
   424     for (int i2 = 0; i2 < implementors_limit; i2++)
   425       oop_store_without_check((oop*)&_implementors[i2], NULL);
   426   }
   428   // The implementor also implements the transitive_interfaces
   429   for (int index = 0; index < local_interfaces()->length(); index++) {
   430     instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k);
   431   }
   432 }
   434 void instanceKlass::init_implementor() {
   435   for (int i = 0; i < implementors_limit; i++)
   436     oop_store_without_check((oop*)&_implementors[i], NULL);
   437   _nof_implementors = 0;
   438 }
   441 void instanceKlass::process_interfaces(Thread *thread) {
   442   // link this class into the implementors list of every interface it implements
   443   KlassHandle this_as_oop (thread, this->as_klassOop());
   444   for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
   445     assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass");
   446     instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i)));
   447     assert(interf->is_interface(), "expected interface");
   448     interf->add_implementor(this_as_oop());
   449   }
   450 }
   452 bool instanceKlass::can_be_primary_super_slow() const {
   453   if (is_interface())
   454     return false;
   455   else
   456     return Klass::can_be_primary_super_slow();
   457 }
   459 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
   460   // The secondaries are the implemented interfaces.
   461   instanceKlass* ik = instanceKlass::cast(as_klassOop());
   462   objArrayHandle interfaces (THREAD, ik->transitive_interfaces());
   463   int num_secondaries = num_extra_slots + interfaces->length();
   464   if (num_secondaries == 0) {
   465     return Universe::the_empty_system_obj_array();
   466   } else if (num_extra_slots == 0) {
   467     return interfaces();
   468   } else {
   469     // a mix of both
   470     objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
   471     for (int i = 0; i < interfaces->length(); i++) {
   472       secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i));
   473     }
   474     return secondaries;
   475   }
   476 }
   478 bool instanceKlass::compute_is_subtype_of(klassOop k) {
   479   if (Klass::cast(k)->is_interface()) {
   480     return implements_interface(k);
   481   } else {
   482     return Klass::compute_is_subtype_of(k);
   483   }
   484 }
   486 bool instanceKlass::implements_interface(klassOop k) const {
   487   if (as_klassOop() == k) return true;
   488   assert(Klass::cast(k)->is_interface(), "should be an interface class");
   489   for (int i = 0; i < transitive_interfaces()->length(); i++) {
   490     if (transitive_interfaces()->obj_at(i) == k) {
   491       return true;
   492     }
   493   }
   494   return false;
   495 }
   497 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) {
   498   if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
   499   if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
   500     THROW_OOP_0(Universe::out_of_memory_error_array_size());
   501   }
   502   int size = objArrayOopDesc::object_size(length);
   503   klassOop ak = array_klass(n, CHECK_NULL);
   504   KlassHandle h_ak (THREAD, ak);
   505   objArrayOop o =
   506     (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL);
   507   return o;
   508 }
   510 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) {
   511   if (TraceFinalizerRegistration) {
   512     tty->print("Registered ");
   513     i->print_value_on(tty);
   514     tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i);
   515   }
   516   instanceHandle h_i(THREAD, i);
   517   // Pass the handle as argument, JavaCalls::call expects oop as jobjects
   518   JavaValue result(T_VOID);
   519   JavaCallArguments args(h_i);
   520   methodHandle mh (THREAD, Universe::finalizer_register_method());
   521   JavaCalls::call(&result, mh, &args, CHECK_NULL);
   522   return h_i();
   523 }
   525 instanceOop instanceKlass::allocate_instance(TRAPS) {
   526   bool has_finalizer_flag = has_finalizer(); // Query before possible GC
   527   int size = size_helper();  // Query before forming handle.
   529   KlassHandle h_k(THREAD, as_klassOop());
   531   instanceOop i;
   533   i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
   534   if (has_finalizer_flag && !RegisterFinalizersAtInit) {
   535     i = register_finalizer(i, CHECK_NULL);
   536   }
   537   return i;
   538 }
   540 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) {
   541   // Finalizer registration occurs in the Object.<init> constructor
   542   // and constructors normally aren't run when allocating perm
   543   // instances so simply disallow finalizable perm objects.  This can
   544   // be relaxed if a need for it is found.
   545   assert(!has_finalizer(), "perm objects not allowed to have finalizers");
   546   int size = size_helper();  // Query before forming handle.
   547   KlassHandle h_k(THREAD, as_klassOop());
   548   instanceOop i = (instanceOop)
   549     CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
   550   return i;
   551 }
   553 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
   554   if (is_interface() || is_abstract()) {
   555     ResourceMark rm(THREAD);
   556     THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
   557               : vmSymbols::java_lang_InstantiationException(), external_name());
   558   }
   559   if (as_klassOop() == SystemDictionary::class_klass()) {
   560     ResourceMark rm(THREAD);
   561     THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
   562               : vmSymbols::java_lang_IllegalAccessException(), external_name());
   563   }
   564 }
   566 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
   567   instanceKlassHandle this_oop(THREAD, as_klassOop());
   568   return array_klass_impl(this_oop, or_null, n, THREAD);
   569 }
   571 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
   572   if (this_oop->array_klasses() == NULL) {
   573     if (or_null) return NULL;
   575     ResourceMark rm;
   576     JavaThread *jt = (JavaThread *)THREAD;
   577     {
   578       // Atomic creation of array_klasses
   579       MutexLocker mc(Compile_lock, THREAD);   // for vtables
   580       MutexLocker ma(MultiArray_lock, THREAD);
   582       // Check if update has already taken place
   583       if (this_oop->array_klasses() == NULL) {
   584         objArrayKlassKlass* oakk =
   585           (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
   587         klassOop  k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);
   588         this_oop->set_array_klasses(k);
   589       }
   590     }
   591   }
   592   // _this will always be set at this point
   593   objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part();
   594   if (or_null) {
   595     return oak->array_klass_or_null(n);
   596   }
   597   return oak->array_klass(n, CHECK_NULL);
   598 }
   600 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) {
   601   return array_klass_impl(or_null, 1, THREAD);
   602 }
   604 void instanceKlass::call_class_initializer(TRAPS) {
   605   instanceKlassHandle ik (THREAD, as_klassOop());
   606   call_class_initializer_impl(ik, THREAD);
   607 }
   609 static int call_class_initializer_impl_counter = 0;   // for debugging
   611 methodOop instanceKlass::class_initializer() {
   612   return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
   613 }
   615 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
   616   methodHandle h_method(THREAD, this_oop->class_initializer());
   617   assert(!this_oop->is_initialized(), "we cannot initialize twice");
   618   if (TraceClassInitialization) {
   619     tty->print("%d Initializing ", call_class_initializer_impl_counter++);
   620     this_oop->name()->print_value();
   621     tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
   622   }
   623   if (h_method() != NULL) {
   624     JavaCallArguments args; // No arguments
   625     JavaValue result(T_VOID);
   626     JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
   627   }
   628 }
   631 void instanceKlass::mask_for(methodHandle method, int bci,
   632   InterpreterOopMap* entry_for) {
   633   // Dirty read, then double-check under a lock.
   634   if (_oop_map_cache == NULL) {
   635     // Otherwise, allocate a new one.
   636     MutexLocker x(OopMapCacheAlloc_lock);
   637     // First time use. Allocate a cache in C heap
   638     if (_oop_map_cache == NULL) {
   639       _oop_map_cache = new OopMapCache();
   640     }
   641   }
   642   // _oop_map_cache is constant after init; lookup below does is own locking.
   643   _oop_map_cache->lookup(method, bci, entry_for);
   644 }
   647 bool instanceKlass::find_local_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
   648   const int n = fields()->length();
   649   for (int i = 0; i < n; i += next_offset ) {
   650     int name_index = fields()->ushort_at(i + name_index_offset);
   651     int sig_index  = fields()->ushort_at(i + signature_index_offset);
   652     symbolOop f_name = constants()->symbol_at(name_index);
   653     symbolOop f_sig  = constants()->symbol_at(sig_index);
   654     if (f_name == name && f_sig == sig) {
   655       fd->initialize(as_klassOop(), i);
   656       return true;
   657     }
   658   }
   659   return false;
   660 }
   663 void instanceKlass::field_names_and_sigs_iterate(OopClosure* closure) {
   664   const int n = fields()->length();
   665   for (int i = 0; i < n; i += next_offset ) {
   666     int name_index = fields()->ushort_at(i + name_index_offset);
   667     symbolOop name = constants()->symbol_at(name_index);
   668     closure->do_oop((oop*)&name);
   670     int sig_index  = fields()->ushort_at(i + signature_index_offset);
   671     symbolOop sig = constants()->symbol_at(sig_index);
   672     closure->do_oop((oop*)&sig);
   673   }
   674 }
   677 klassOop instanceKlass::find_interface_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
   678   const int n = local_interfaces()->length();
   679   for (int i = 0; i < n; i++) {
   680     klassOop intf1 = klassOop(local_interfaces()->obj_at(i));
   681     assert(Klass::cast(intf1)->is_interface(), "just checking type");
   682     // search for field in current interface
   683     if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) {
   684       assert(fd->is_static(), "interface field must be static");
   685       return intf1;
   686     }
   687     // search for field in direct superinterfaces
   688     klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd);
   689     if (intf2 != NULL) return intf2;
   690   }
   691   // otherwise field lookup fails
   692   return NULL;
   693 }
   696 klassOop instanceKlass::find_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
   697   // search order according to newest JVM spec (5.4.3.2, p.167).
   698   // 1) search for field in current klass
   699   if (find_local_field(name, sig, fd)) {
   700     return as_klassOop();
   701   }
   702   // 2) search for field recursively in direct superinterfaces
   703   { klassOop intf = find_interface_field(name, sig, fd);
   704     if (intf != NULL) return intf;
   705   }
   706   // 3) apply field lookup recursively if superclass exists
   707   { klassOop supr = super();
   708     if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd);
   709   }
   710   // 4) otherwise field lookup fails
   711   return NULL;
   712 }
   715 klassOop instanceKlass::find_field(symbolOop name, symbolOop sig, bool is_static, fieldDescriptor* fd) const {
   716   // search order according to newest JVM spec (5.4.3.2, p.167).
   717   // 1) search for field in current klass
   718   if (find_local_field(name, sig, fd)) {
   719     if (fd->is_static() == is_static) return as_klassOop();
   720   }
   721   // 2) search for field recursively in direct superinterfaces
   722   if (is_static) {
   723     klassOop intf = find_interface_field(name, sig, fd);
   724     if (intf != NULL) return intf;
   725   }
   726   // 3) apply field lookup recursively if superclass exists
   727   { klassOop supr = super();
   728     if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd);
   729   }
   730   // 4) otherwise field lookup fails
   731   return NULL;
   732 }
   735 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
   736   int length = fields()->length();
   737   for (int i = 0; i < length; i += next_offset) {
   738     if (offset_from_fields( i ) == offset) {
   739       fd->initialize(as_klassOop(), i);
   740       if (fd->is_static() == is_static) return true;
   741     }
   742   }
   743   return false;
   744 }
   747 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
   748   klassOop klass = as_klassOop();
   749   while (klass != NULL) {
   750     if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) {
   751       return true;
   752     }
   753     klass = Klass::cast(klass)->super();
   754   }
   755   return false;
   756 }
   759 void instanceKlass::methods_do(void f(methodOop method)) {
   760   int len = methods()->length();
   761   for (int index = 0; index < len; index++) {
   762     methodOop m = methodOop(methods()->obj_at(index));
   763     assert(m->is_method(), "must be method");
   764     f(m);
   765   }
   766 }
   768 void instanceKlass::do_local_static_fields(FieldClosure* cl) {
   769   fieldDescriptor fd;
   770   int length = fields()->length();
   771   for (int i = 0; i < length; i += next_offset) {
   772     fd.initialize(as_klassOop(), i);
   773     if (fd.is_static()) cl->do_field(&fd);
   774   }
   775 }
   778 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
   779   instanceKlassHandle h_this(THREAD, as_klassOop());
   780   do_local_static_fields_impl(h_this, f, CHECK);
   781 }
   784 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
   785   fieldDescriptor fd;
   786   int length = this_oop->fields()->length();
   787   for (int i = 0; i < length; i += next_offset) {
   788     fd.initialize(this_oop(), i);
   789     if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
   790   }
   791 }
   794 static int compare_fields_by_offset(int* a, int* b) {
   795   return a[0] - b[0];
   796 }
   798 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
   799   instanceKlass* super = superklass();
   800   if (super != NULL) {
   801     super->do_nonstatic_fields(cl);
   802   }
   803   fieldDescriptor fd;
   804   int length = fields()->length();
   805   // In DebugInfo nonstatic fields are sorted by offset.
   806   int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
   807   int j = 0;
   808   for (int i = 0; i < length; i += next_offset) {
   809     fd.initialize(as_klassOop(), i);
   810     if (!fd.is_static()) {
   811       fields_sorted[j + 0] = fd.offset();
   812       fields_sorted[j + 1] = i;
   813       j += 2;
   814     }
   815   }
   816   if (j > 0) {
   817     length = j;
   818     // _sort_Fn is defined in growableArray.hpp.
   819     qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
   820     for (int i = 0; i < length; i += 2) {
   821       fd.initialize(as_klassOop(), fields_sorted[i + 1]);
   822       assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
   823       cl->do_field(&fd);
   824     }
   825   }
   826   FREE_C_HEAP_ARRAY(int, fields_sorted);
   827 }
   830 void instanceKlass::array_klasses_do(void f(klassOop k)) {
   831   if (array_klasses() != NULL)
   832     arrayKlass::cast(array_klasses())->array_klasses_do(f);
   833 }
   836 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
   837   f(as_klassOop());
   838   array_klasses_do(f);
   839 }
   841 #ifdef ASSERT
   842 static int linear_search(objArrayOop methods, symbolOop name, symbolOop signature) {
   843   int len = methods->length();
   844   for (int index = 0; index < len; index++) {
   845     methodOop m = (methodOop)(methods->obj_at(index));
   846     assert(m->is_method(), "must be method");
   847     if (m->signature() == signature && m->name() == name) {
   848        return index;
   849     }
   850   }
   851   return -1;
   852 }
   853 #endif
   855 methodOop instanceKlass::find_method(symbolOop name, symbolOop signature) const {
   856   return instanceKlass::find_method(methods(), name, signature);
   857 }
   859 methodOop instanceKlass::find_method(objArrayOop methods, symbolOop name, symbolOop signature) {
   860   int len = methods->length();
   861   // methods are sorted, so do binary search
   862   int l = 0;
   863   int h = len - 1;
   864   while (l <= h) {
   865     int mid = (l + h) >> 1;
   866     methodOop m = (methodOop)methods->obj_at(mid);
   867     assert(m->is_method(), "must be method");
   868     int res = m->name()->fast_compare(name);
   869     if (res == 0) {
   870       // found matching name; do linear search to find matching signature
   871       // first, quick check for common case
   872       if (m->signature() == signature) return m;
   873       // search downwards through overloaded methods
   874       int i;
   875       for (i = mid - 1; i >= l; i--) {
   876         methodOop m = (methodOop)methods->obj_at(i);
   877         assert(m->is_method(), "must be method");
   878         if (m->name() != name) break;
   879         if (m->signature() == signature) return m;
   880       }
   881       // search upwards
   882       for (i = mid + 1; i <= h; i++) {
   883         methodOop m = (methodOop)methods->obj_at(i);
   884         assert(m->is_method(), "must be method");
   885         if (m->name() != name) break;
   886         if (m->signature() == signature) return m;
   887       }
   888       // not found
   889 #ifdef ASSERT
   890       int index = linear_search(methods, name, signature);
   891       if (index != -1) fatal1("binary search bug: should have found entry %d", index);
   892 #endif
   893       return NULL;
   894     } else if (res < 0) {
   895       l = mid + 1;
   896     } else {
   897       h = mid - 1;
   898     }
   899   }
   900 #ifdef ASSERT
   901   int index = linear_search(methods, name, signature);
   902   if (index != -1) fatal1("binary search bug: should have found entry %d", index);
   903 #endif
   904   return NULL;
   905 }
   907 methodOop instanceKlass::uncached_lookup_method(symbolOop name, symbolOop signature) const {
   908   klassOop klass = as_klassOop();
   909   while (klass != NULL) {
   910     methodOop method = instanceKlass::cast(klass)->find_method(name, signature);
   911     if (method != NULL) return method;
   912     klass = instanceKlass::cast(klass)->super();
   913   }
   914   return NULL;
   915 }
   917 // lookup a method in all the interfaces that this class implements
   918 methodOop instanceKlass::lookup_method_in_all_interfaces(symbolOop name,
   919                                                          symbolOop signature) const {
   920   objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces();
   921   int num_ifs = all_ifs->length();
   922   instanceKlass *ik = NULL;
   923   for (int i = 0; i < num_ifs; i++) {
   924     ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i)));
   925     methodOop m = ik->lookup_method(name, signature);
   926     if (m != NULL) {
   927       return m;
   928     }
   929   }
   930   return NULL;
   931 }
   933 /* jni_id_for_impl for jfieldIds only */
   934 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
   935   MutexLocker ml(JfieldIdCreation_lock);
   936   // Retry lookup after we got the lock
   937   JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
   938   if (probe == NULL) {
   939     // Slow case, allocate new static field identifier
   940     probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids());
   941     this_oop->set_jni_ids(probe);
   942   }
   943   return probe;
   944 }
   947 /* jni_id_for for jfieldIds only */
   948 JNIid* instanceKlass::jni_id_for(int offset) {
   949   JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset);
   950   if (probe == NULL) {
   951     probe = jni_id_for_impl(this->as_klassOop(), offset);
   952   }
   953   return probe;
   954 }
   957 // Lookup or create a jmethodID.
   958 // This code can be called by the VM thread.  For this reason it is critical that
   959 // there are no blocking operations (safepoints) while the lock is held -- or a
   960 // deadlock can occur.
   961 jmethodID instanceKlass::jmethod_id_for_impl(instanceKlassHandle ik_h, methodHandle method_h) {
   962   size_t idnum = (size_t)method_h->method_idnum();
   963   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
   964   size_t length = 0;
   965   jmethodID id = NULL;
   966   // array length stored in first element, other elements offset by one
   967   if (jmeths == NULL ||                         // If there is no jmethodID array,
   968       (length = (size_t)jmeths[0]) <= idnum ||  // or if it is too short,
   969       (id = jmeths[idnum+1]) == NULL) {         // or if this jmethodID isn't allocated
   971     // Do all the safepointing things (allocations) before grabbing the lock.
   972     // These allocations will have to be freed if they are unused.
   974     // Allocate a new array of methods.
   975     jmethodID* new_jmeths = NULL;
   976     if (length <= idnum) {
   977       // A new array will be needed (unless some other thread beats us to it)
   978       size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
   979       new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
   980       memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
   981       new_jmeths[0] =(jmethodID)size;  // array size held in the first element
   982     }
   984     // Allocate a new method ID.
   985     jmethodID new_id = NULL;
   986     if (method_h->is_old() && !method_h->is_obsolete()) {
   987       // The method passed in is old (but not obsolete), we need to use the current version
   988       methodOop current_method = ik_h->method_with_idnum((int)idnum);
   989       assert(current_method != NULL, "old and but not obsolete, so should exist");
   990       methodHandle current_method_h(current_method == NULL? method_h() : current_method);
   991       new_id = JNIHandles::make_jmethod_id(current_method_h);
   992     } else {
   993       // It is the current version of the method or an obsolete method,
   994       // use the version passed in
   995       new_id = JNIHandles::make_jmethod_id(method_h);
   996     }
   998     if (Threads::number_of_threads() == 0 || SafepointSynchronize::is_at_safepoint()) {
   999       // No need and unsafe to lock the JmethodIdCreation_lock at safepoint.
  1000       id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths);
  1001     } else {
  1002       MutexLocker ml(JmethodIdCreation_lock);
  1003       id = get_jmethod_id(ik_h, idnum, new_id, new_jmeths);
  1006   return id;
  1010 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, size_t idnum,
  1011                                         jmethodID new_id, jmethodID* new_jmeths) {
  1012   // Retry lookup after we got the lock or ensured we are at safepoint
  1013   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
  1014   jmethodID  id                = NULL;
  1015   jmethodID  to_dealloc_id     = NULL;
  1016   jmethodID* to_dealloc_jmeths = NULL;
  1017   size_t     length;
  1019   if (jmeths == NULL || (length = (size_t)jmeths[0]) <= idnum) {
  1020     if (jmeths != NULL) {
  1021       // We have grown the array: copy the existing entries, and delete the old array
  1022       for (size_t index = 0; index < length; index++) {
  1023         new_jmeths[index+1] = jmeths[index+1];
  1025       to_dealloc_jmeths = jmeths; // using the new jmeths, deallocate the old one
  1027     ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
  1028   } else {
  1029     id = jmeths[idnum+1];
  1030     to_dealloc_jmeths = new_jmeths; // using the old jmeths, deallocate the new one
  1032   if (id == NULL) {
  1033     id = new_id;
  1034     jmeths[idnum+1] = id;  // install the new method ID
  1035   } else {
  1036     to_dealloc_id = new_id; // the new id wasn't used, mark it for deallocation
  1039   // Free up unneeded or no longer needed resources
  1040   FreeHeap(to_dealloc_jmeths);
  1041   if (to_dealloc_id != NULL) {
  1042     JNIHandles::destroy_jmethod_id(to_dealloc_id);
  1044   return id;
  1048 // Lookup a jmethodID, NULL if not found.  Do no blocking, no allocations, no handles
  1049 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
  1050   size_t idnum = (size_t)method->method_idnum();
  1051   jmethodID* jmeths = methods_jmethod_ids_acquire();
  1052   size_t length;                                // length assigned as debugging crumb
  1053   jmethodID id = NULL;
  1054   if (jmeths != NULL &&                         // If there is a jmethodID array,
  1055       (length = (size_t)jmeths[0]) > idnum) {   // and if it is long enough,
  1056     id = jmeths[idnum+1];                       // Look up the id (may be NULL)
  1058   return id;
  1062 // Cache an itable index
  1063 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
  1064   int* indices = methods_cached_itable_indices_acquire();
  1065   if (indices == NULL ||                         // If there is no index array,
  1066       ((size_t)indices[0]) <= idnum) {           // or if it is too short
  1067     // Lock before we allocate the array so we don't leak
  1068     MutexLocker ml(JNICachedItableIndex_lock);
  1069     // Retry lookup after we got the lock
  1070     indices = methods_cached_itable_indices_acquire();
  1071     size_t length = 0;
  1072     // array length stored in first element, other elements offset by one
  1073     if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
  1074       size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
  1075       int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
  1076       // Copy the existing entries, if any
  1077       size_t i;
  1078       for (i = 0; i < length; i++) {
  1079         new_indices[i+1] = indices[i+1];
  1081       // Set all the rest to -1
  1082       for (i = length; i < size; i++) {
  1083         new_indices[i+1] = -1;
  1085       if (indices != NULL) {
  1086         FreeHeap(indices);  // delete any old indices
  1088       release_set_methods_cached_itable_indices(indices = new_indices);
  1090   } else {
  1091     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  1093   // This is a cache, if there is a race to set it, it doesn't matter
  1094   indices[idnum+1] = index;
  1098 // Retrieve a cached itable index
  1099 int instanceKlass::cached_itable_index(size_t idnum) {
  1100   int* indices = methods_cached_itable_indices_acquire();
  1101   if (indices != NULL && ((size_t)indices[0]) > idnum) {
  1102      // indices exist and are long enough, retrieve possible cached
  1103     return indices[idnum+1];
  1105   return -1;
  1109 //
  1110 // nmethodBucket is used to record dependent nmethods for
  1111 // deoptimization.  nmethod dependencies are actually <klass, method>
  1112 // pairs but we really only care about the klass part for purposes of
  1113 // finding nmethods which might need to be deoptimized.  Instead of
  1114 // recording the method, a count of how many times a particular nmethod
  1115 // was recorded is kept.  This ensures that any recording errors are
  1116 // noticed since an nmethod should be removed as many times are it's
  1117 // added.
  1118 //
  1119 class nmethodBucket {
  1120  private:
  1121   nmethod*       _nmethod;
  1122   int            _count;
  1123   nmethodBucket* _next;
  1125  public:
  1126   nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
  1127     _nmethod = nmethod;
  1128     _next = next;
  1129     _count = 1;
  1131   int count()                             { return _count; }
  1132   int increment()                         { _count += 1; return _count; }
  1133   int decrement()                         { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
  1134   nmethodBucket* next()                   { return _next; }
  1135   void set_next(nmethodBucket* b)         { _next = b; }
  1136   nmethod* get_nmethod()                  { return _nmethod; }
  1137 };
  1140 //
  1141 // Walk the list of dependent nmethods searching for nmethods which
  1142 // are dependent on the klassOop that was passed in and mark them for
  1143 // deoptimization.  Returns the number of nmethods found.
  1144 //
  1145 int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
  1146   assert_locked_or_safepoint(CodeCache_lock);
  1147   int found = 0;
  1148   nmethodBucket* b = _dependencies;
  1149   while (b != NULL) {
  1150     nmethod* nm = b->get_nmethod();
  1151     // since dependencies aren't removed until an nmethod becomes a zombie,
  1152     // the dependency list may contain nmethods which aren't alive.
  1153     if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
  1154       if (TraceDependencies) {
  1155         ResourceMark rm;
  1156         tty->print_cr("Marked for deoptimization");
  1157         tty->print_cr("  context = %s", this->external_name());
  1158         changes.print();
  1159         nm->print();
  1160         nm->print_dependencies();
  1162       nm->mark_for_deoptimization();
  1163       found++;
  1165     b = b->next();
  1167   return found;
  1171 //
  1172 // Add an nmethodBucket to the list of dependencies for this nmethod.
  1173 // It's possible that an nmethod has multiple dependencies on this klass
  1174 // so a count is kept for each bucket to guarantee that creation and
  1175 // deletion of dependencies is consistent.
  1176 //
  1177 void instanceKlass::add_dependent_nmethod(nmethod* nm) {
  1178   assert_locked_or_safepoint(CodeCache_lock);
  1179   nmethodBucket* b = _dependencies;
  1180   nmethodBucket* last = NULL;
  1181   while (b != NULL) {
  1182     if (nm == b->get_nmethod()) {
  1183       b->increment();
  1184       return;
  1186     b = b->next();
  1188   _dependencies = new nmethodBucket(nm, _dependencies);
  1192 //
  1193 // Decrement count of the nmethod in the dependency list and remove
  1194 // the bucket competely when the count goes to 0.  This method must
  1195 // find a corresponding bucket otherwise there's a bug in the
  1196 // recording of dependecies.
  1197 //
  1198 void instanceKlass::remove_dependent_nmethod(nmethod* nm) {
  1199   assert_locked_or_safepoint(CodeCache_lock);
  1200   nmethodBucket* b = _dependencies;
  1201   nmethodBucket* last = NULL;
  1202   while (b != NULL) {
  1203     if (nm == b->get_nmethod()) {
  1204       if (b->decrement() == 0) {
  1205         if (last == NULL) {
  1206           _dependencies = b->next();
  1207         } else {
  1208           last->set_next(b->next());
  1210         delete b;
  1212       return;
  1214     last = b;
  1215     b = b->next();
  1217 #ifdef ASSERT
  1218   tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
  1219   nm->print();
  1220 #endif // ASSERT
  1221   ShouldNotReachHere();
  1225 #ifndef PRODUCT
  1226 void instanceKlass::print_dependent_nmethods(bool verbose) {
  1227   nmethodBucket* b = _dependencies;
  1228   int idx = 0;
  1229   while (b != NULL) {
  1230     nmethod* nm = b->get_nmethod();
  1231     tty->print("[%d] count=%d { ", idx++, b->count());
  1232     if (!verbose) {
  1233       nm->print_on(tty, "nmethod");
  1234       tty->print_cr(" } ");
  1235     } else {
  1236       nm->print();
  1237       nm->print_dependencies();
  1238       tty->print_cr("--- } ");
  1240     b = b->next();
  1245 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
  1246   nmethodBucket* b = _dependencies;
  1247   while (b != NULL) {
  1248     if (nm == b->get_nmethod()) {
  1249       return true;
  1251     b = b->next();
  1253   return false;
  1255 #endif //PRODUCT
  1258 void instanceKlass::follow_static_fields() {
  1259   oop* start = start_of_static_fields();
  1260   oop* end   = start + static_oop_field_size();
  1261   while (start < end) {
  1262     if (*start != NULL) {
  1263       assert(Universe::heap()->is_in_closed_subset(*start),
  1264              "should be in heap");
  1265       MarkSweep::mark_and_push(start);
  1267     start++;
  1271 #ifndef SERIALGC
  1272 void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
  1273   oop* start = start_of_static_fields();
  1274   oop* end   = start + static_oop_field_size();
  1275   while (start < end) {
  1276     if (*start != NULL) {
  1277       assert(Universe::heap()->is_in(*start), "should be in heap");
  1278       PSParallelCompact::mark_and_push(cm, start);
  1280     start++;
  1283 #endif // SERIALGC
  1286 void instanceKlass::adjust_static_fields() {
  1287   oop* start = start_of_static_fields();
  1288   oop* end   = start + static_oop_field_size();
  1289   while (start < end) {
  1290     MarkSweep::adjust_pointer(start);
  1291     start++;
  1295 #ifndef SERIALGC
  1296 void instanceKlass::update_static_fields() {
  1297   oop* const start = start_of_static_fields();
  1298   oop* const beg_oop = start;
  1299   oop* const end_oop = start + static_oop_field_size();
  1300   for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
  1301     PSParallelCompact::adjust_pointer(cur_oop);
  1305 void
  1306 instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
  1307   oop* const start = start_of_static_fields();
  1308   oop* const beg_oop = MAX2((oop*)beg_addr, start);
  1309   oop* const end_oop = MIN2((oop*)end_addr, start + static_oop_field_size());
  1310   for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
  1311     PSParallelCompact::adjust_pointer(cur_oop);
  1314 #endif // SERIALGC
  1316 void instanceKlass::oop_follow_contents(oop obj) {
  1317   assert (obj!=NULL, "can't follow the content of NULL object");
  1318   obj->follow_header();
  1319   OopMapBlock* map     = start_of_nonstatic_oop_maps();
  1320   OopMapBlock* end_map = map + nonstatic_oop_map_size();
  1321   while (map < end_map) {
  1322     oop* start = obj->obj_field_addr(map->offset());
  1323     oop* end   = start + map->length();
  1324     while (start < end) {
  1325       if (*start != NULL) {
  1326         assert(Universe::heap()->is_in_closed_subset(*start),
  1327                "should be in heap");
  1328         MarkSweep::mark_and_push(start);
  1330       start++;
  1332     map++;
  1336 #ifndef SERIALGC
  1337 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
  1338                                         oop obj) {
  1339   assert (obj!=NULL, "can't follow the content of NULL object");
  1340   obj->follow_header(cm);
  1341   OopMapBlock* map     = start_of_nonstatic_oop_maps();
  1342   OopMapBlock* end_map = map + nonstatic_oop_map_size();
  1343   while (map < end_map) {
  1344     oop* start = obj->obj_field_addr(map->offset());
  1345     oop* end   = start + map->length();
  1346     while (start < end) {
  1347       if (*start != NULL) {
  1348         assert(Universe::heap()->is_in(*start), "should be in heap");
  1349         PSParallelCompact::mark_and_push(cm, start);
  1351       start++;
  1353     map++;
  1356 #endif // SERIALGC
  1358 #define invoke_closure_on(start, closure, nv_suffix) {                          \
  1359   oop obj = *(start);                                                           \
  1360   if (obj != NULL) {                                                            \
  1361     assert(Universe::heap()->is_in_closed_subset(obj), "should be in heap");    \
  1362     (closure)->do_oop##nv_suffix(start);                                        \
  1363   }                                                                             \
  1366 // closure's do_header() method dicates whether the given closure should be
  1367 // applied to the klass ptr in the object header.
  1369 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)           \
  1371 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj,                          \
  1372                                               OopClosureType* closure) {        \
  1373   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
  1374   /* header */                                                                  \
  1375   if (closure->do_header()) {                                                   \
  1376     obj->oop_iterate_header(closure);                                           \
  1377   }                                                                             \
  1378   /* instance variables */                                                      \
  1379   OopMapBlock* map     = start_of_nonstatic_oop_maps();                         \
  1380   OopMapBlock* const end_map = map + nonstatic_oop_map_size();                  \
  1381   const intx field_offset    = PrefetchFieldsAhead;                             \
  1382   if (field_offset > 0) {                                                       \
  1383     while (map < end_map) {                                                     \
  1384       oop* start = obj->obj_field_addr(map->offset());                          \
  1385       oop* const end   = start + map->length();                                 \
  1386       while (start < end) {                                                     \
  1387         prefetch_beyond(start, (oop*)end, field_offset,                         \
  1388                         closure->prefetch_style());                             \
  1389         SpecializationStats::                                                   \
  1390           record_do_oop_call##nv_suffix(SpecializationStats::ik);               \
  1391         invoke_closure_on(start, closure, nv_suffix);                           \
  1392         start++;                                                                \
  1393       }                                                                         \
  1394       map++;                                                                    \
  1395     }                                                                           \
  1396   } else {                                                                      \
  1397     while (map < end_map) {                                                     \
  1398       oop* start = obj->obj_field_addr(map->offset());                          \
  1399       oop* const end   = start + map->length();                                 \
  1400       while (start < end) {                                                     \
  1401         SpecializationStats::                                                   \
  1402           record_do_oop_call##nv_suffix(SpecializationStats::ik);               \
  1403         invoke_closure_on(start, closure, nv_suffix);                           \
  1404         start++;                                                                \
  1405       }                                                                         \
  1406       map++;                                                                    \
  1407     }                                                                           \
  1408   }                                                                             \
  1409   return size_helper();                                                         \
  1412 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix)         \
  1414 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj,                      \
  1415                                                   OopClosureType* closure,      \
  1416                                                   MemRegion mr) {               \
  1417   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
  1418   /* header */                                                                  \
  1419   if (closure->do_header()) {                                                   \
  1420     obj->oop_iterate_header(closure, mr);                                       \
  1421   }                                                                             \
  1422   /* instance variables */                                                      \
  1423   OopMapBlock* map     = start_of_nonstatic_oop_maps();                         \
  1424   OopMapBlock* const end_map = map + nonstatic_oop_map_size();                  \
  1425   HeapWord* bot = mr.start();                                                   \
  1426   HeapWord* top = mr.end();                                                     \
  1427   oop* start = obj->obj_field_addr(map->offset());                              \
  1428   HeapWord* end = MIN2((HeapWord*)(start + map->length()), top);                \
  1429   /* Find the first map entry that extends onto mr. */                          \
  1430   while (map < end_map && end <= bot) {                                         \
  1431     map++;                                                                      \
  1432     start = obj->obj_field_addr(map->offset());                                 \
  1433     end = MIN2((HeapWord*)(start + map->length()), top);                        \
  1434   }                                                                             \
  1435   if (map != end_map) {                                                         \
  1436     /* The current map's end is past the start of "mr".  Skip up to the first   \
  1437        entry on "mr". */                                                        \
  1438     while ((HeapWord*)start < bot) {                                            \
  1439       start++;                                                                  \
  1440     }                                                                           \
  1441     const intx field_offset = PrefetchFieldsAhead;                              \
  1442     for (;;) {                                                                  \
  1443       if (field_offset > 0) {                                                   \
  1444         while ((HeapWord*)start < end) {                                        \
  1445           prefetch_beyond(start, (oop*)end, field_offset,                       \
  1446                           closure->prefetch_style());                           \
  1447           invoke_closure_on(start, closure, nv_suffix);                         \
  1448           start++;                                                              \
  1449         }                                                                       \
  1450       } else {                                                                  \
  1451         while ((HeapWord*)start < end) {                                        \
  1452           invoke_closure_on(start, closure, nv_suffix);                         \
  1453           start++;                                                              \
  1454         }                                                                       \
  1455       }                                                                         \
  1456       /* Go to the next map. */                                                 \
  1457       map++;                                                                    \
  1458       if (map == end_map) {                                                     \
  1459         break;                                                                  \
  1460       }                                                                         \
  1461       /* Otherwise,  */                                                         \
  1462       start = obj->obj_field_addr(map->offset());                               \
  1463       if ((HeapWord*)start >= top) {                                            \
  1464         break;                                                                  \
  1465       }                                                                         \
  1466       end = MIN2((HeapWord*)(start + map->length()), top);                      \
  1467     }                                                                           \
  1468   }                                                                             \
  1469   return size_helper();                                                         \
  1472 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
  1473 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN)
  1474 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
  1475 ALL_OOP_OOP_ITERATE_CLOSURES_3(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
  1478 void instanceKlass::iterate_static_fields(OopClosure* closure) {
  1479   oop* start = start_of_static_fields();
  1480   oop* end   = start + static_oop_field_size();
  1481   while (start < end) {
  1482     assert(Universe::heap()->is_in_reserved_or_null(*start), "should be in heap");
  1483     closure->do_oop(start);
  1484     start++;
  1488 void instanceKlass::iterate_static_fields(OopClosure* closure,
  1489                                           MemRegion mr) {
  1490   oop* start = start_of_static_fields();
  1491   oop* end   = start + static_oop_field_size();
  1492   // I gather that the the static fields of reference types come first,
  1493   // hence the name of "oop_field_size", and that is what makes this safe.
  1494   assert((intptr_t)mr.start() ==
  1495          align_size_up((intptr_t)mr.start(), sizeof(oop)) &&
  1496          (intptr_t)mr.end() == align_size_up((intptr_t)mr.end(), sizeof(oop)),
  1497          "Memregion must be oop-aligned.");
  1498   if ((HeapWord*)start < mr.start()) start = (oop*)mr.start();
  1499   if ((HeapWord*)end   > mr.end())   end   = (oop*)mr.end();
  1500   while (start < end) {
  1501     invoke_closure_on(start, closure,_v);
  1502     start++;
  1507 int instanceKlass::oop_adjust_pointers(oop obj) {
  1508   int size = size_helper();
  1510   // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
  1511   OopMapBlock* map     = start_of_nonstatic_oop_maps();
  1512   OopMapBlock* const end_map = map + nonstatic_oop_map_size();
  1513   // Iterate over oopmap blocks
  1514   while (map < end_map) {
  1515     // Compute oop range for this block
  1516     oop* start = obj->obj_field_addr(map->offset());
  1517     oop* end   = start + map->length();
  1518     // Iterate over oops
  1519     while (start < end) {
  1520       assert(Universe::heap()->is_in_or_null(*start), "should be in heap");
  1521       MarkSweep::adjust_pointer(start);
  1522       start++;
  1524     map++;
  1527   obj->adjust_header();
  1528   return size;
  1531 #ifndef SERIALGC
  1532 void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
  1533   assert(!pm->depth_first(), "invariant");
  1534   // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
  1535   OopMapBlock* start_map = start_of_nonstatic_oop_maps();
  1536   OopMapBlock* map       = start_map + nonstatic_oop_map_size();
  1538   // Iterate over oopmap blocks
  1539   while (start_map < map) {
  1540     --map;
  1541     // Compute oop range for this block
  1542     oop* start = obj->obj_field_addr(map->offset());
  1543     oop* curr  = start + map->length();
  1544     // Iterate over oops
  1545     while (start < curr) {
  1546       --curr;
  1547       if (PSScavenge::should_scavenge(*curr)) {
  1548         assert(Universe::heap()->is_in(*curr), "should be in heap");
  1549         pm->claim_or_forward_breadth(curr);
  1555 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
  1556   assert(pm->depth_first(), "invariant");
  1557   // Compute oopmap block range. The common case is nonstatic_oop_map_size == 1.
  1558   OopMapBlock* start_map = start_of_nonstatic_oop_maps();
  1559   OopMapBlock* map       = start_map + nonstatic_oop_map_size();
  1561   // Iterate over oopmap blocks
  1562   while (start_map < map) {
  1563     --map;
  1564     // Compute oop range for this block
  1565     oop* start = obj->obj_field_addr(map->offset());
  1566     oop* curr  = start + map->length();
  1567     // Iterate over oops
  1568     while (start < curr) {
  1569       --curr;
  1570       if (PSScavenge::should_scavenge(*curr)) {
  1571         assert(Universe::heap()->is_in(*curr), "should be in heap");
  1572         pm->claim_or_forward_depth(curr);
  1578 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
  1579   // Compute oopmap block range.  The common case is nonstatic_oop_map_size==1.
  1580   OopMapBlock* map           = start_of_nonstatic_oop_maps();
  1581   OopMapBlock* const end_map = map + nonstatic_oop_map_size();
  1582   // Iterate over oopmap blocks
  1583   while (map < end_map) {
  1584     // Compute oop range for this oopmap block.
  1585     oop* const map_start = obj->obj_field_addr(map->offset());
  1586     oop* const beg_oop = map_start;
  1587     oop* const end_oop = map_start + map->length();
  1588     for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
  1589       PSParallelCompact::adjust_pointer(cur_oop);
  1591     ++map;
  1594   return size_helper();
  1597 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
  1598                                        HeapWord* beg_addr, HeapWord* end_addr) {
  1599   // Compute oopmap block range.  The common case is nonstatic_oop_map_size==1.
  1600   OopMapBlock* map           = start_of_nonstatic_oop_maps();
  1601   OopMapBlock* const end_map = map + nonstatic_oop_map_size();
  1602   // Iterate over oopmap blocks
  1603   while (map < end_map) {
  1604     // Compute oop range for this oopmap block.
  1605     oop* const map_start = obj->obj_field_addr(map->offset());
  1606     oop* const beg_oop = MAX2((oop*)beg_addr, map_start);
  1607     oop* const end_oop = MIN2((oop*)end_addr, map_start + map->length());
  1608     for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
  1609       PSParallelCompact::adjust_pointer(cur_oop);
  1611     ++map;
  1614   return size_helper();
  1617 void instanceKlass::copy_static_fields(PSPromotionManager* pm) {
  1618   assert(!pm->depth_first(), "invariant");
  1619   // Compute oop range
  1620   oop* start = start_of_static_fields();
  1621   oop* end   = start + static_oop_field_size();
  1622   // Iterate over oops
  1623   while (start < end) {
  1624     if (PSScavenge::should_scavenge(*start)) {
  1625       assert(Universe::heap()->is_in(*start), "should be in heap");
  1626       pm->claim_or_forward_breadth(start);
  1628     start++;
  1632 void instanceKlass::push_static_fields(PSPromotionManager* pm) {
  1633   assert(pm->depth_first(), "invariant");
  1634   // Compute oop range
  1635   oop* start = start_of_static_fields();
  1636   oop* end   = start + static_oop_field_size();
  1637   // Iterate over oops
  1638   while (start < end) {
  1639     if (PSScavenge::should_scavenge(*start)) {
  1640       assert(Universe::heap()->is_in(*start), "should be in heap");
  1641       pm->claim_or_forward_depth(start);
  1643     start++;
  1647 void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
  1648   // Compute oop range
  1649   oop* start = start_of_static_fields();
  1650   oop* end   = start + static_oop_field_size();
  1651   // Iterate over oops
  1652   while (start < end) {
  1653     if (*start != NULL) {
  1654       assert(Universe::heap()->is_in(*start), "should be in heap");
  1655       // *start = (oop) cm->summary_data()->calc_new_pointer(*start);
  1656       PSParallelCompact::adjust_pointer(start);
  1658     start++;
  1661 #endif // SERIALGC
  1663 // This klass is alive but the implementor link is not followed/updated.
  1664 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
  1666 void instanceKlass::follow_weak_klass_links(
  1667   BoolObjectClosure* is_alive, OopClosure* keep_alive) {
  1668   assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
  1669   if (ClassUnloading) {
  1670     for (int i = 0; i < implementors_limit; i++) {
  1671       klassOop impl = _implementors[i];
  1672       if (impl == NULL)  break;  // no more in the list
  1673       if (!is_alive->do_object_b(impl)) {
  1674         // remove this guy from the list by overwriting him with the tail
  1675         int lasti = --_nof_implementors;
  1676         assert(lasti >= i && lasti < implementors_limit, "just checking");
  1677         _implementors[i] = _implementors[lasti];
  1678         _implementors[lasti] = NULL;
  1679         --i; // rerun the loop at this index
  1682   } else {
  1683     for (int i = 0; i < implementors_limit; i++) {
  1684       keep_alive->do_oop(&adr_implementors()[i]);
  1687   Klass::follow_weak_klass_links(is_alive, keep_alive);
  1691 void instanceKlass::remove_unshareable_info() {
  1692   Klass::remove_unshareable_info();
  1693   init_implementor();
  1697 static void clear_all_breakpoints(methodOop m) {
  1698   m->clear_all_breakpoints();
  1702 void instanceKlass::release_C_heap_structures() {
  1703   // Deallocate oop map cache
  1704   if (_oop_map_cache != NULL) {
  1705     delete _oop_map_cache;
  1706     _oop_map_cache = NULL;
  1709   // Deallocate JNI identifiers for jfieldIDs
  1710   JNIid::deallocate(jni_ids());
  1711   set_jni_ids(NULL);
  1713   jmethodID* jmeths = methods_jmethod_ids_acquire();
  1714   if (jmeths != (jmethodID*)NULL) {
  1715     release_set_methods_jmethod_ids(NULL);
  1716     FreeHeap(jmeths);
  1719   int* indices = methods_cached_itable_indices_acquire();
  1720   if (indices != (int*)NULL) {
  1721     release_set_methods_cached_itable_indices(NULL);
  1722     FreeHeap(indices);
  1725   // release dependencies
  1726   nmethodBucket* b = _dependencies;
  1727   _dependencies = NULL;
  1728   while (b != NULL) {
  1729     nmethodBucket* next = b->next();
  1730     delete b;
  1731     b = next;
  1734   // Deallocate breakpoint records
  1735   if (breakpoints() != 0x0) {
  1736     methods_do(clear_all_breakpoints);
  1737     assert(breakpoints() == 0x0, "should have cleared breakpoints");
  1740   // deallocate information about previous versions
  1741   if (_previous_versions != NULL) {
  1742     for (int i = _previous_versions->length() - 1; i >= 0; i--) {
  1743       PreviousVersionNode * pv_node = _previous_versions->at(i);
  1744       delete pv_node;
  1746     delete _previous_versions;
  1747     _previous_versions = NULL;
  1750   // deallocate the cached class file
  1751   if (_cached_class_file_bytes != NULL) {
  1752     os::free(_cached_class_file_bytes);
  1753     _cached_class_file_bytes = NULL;
  1754     _cached_class_file_len = 0;
  1758 char* instanceKlass::signature_name() const {
  1759   const char* src = (const char*) (name()->as_C_string());
  1760   const int src_length = (int)strlen(src);
  1761   char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
  1762   int src_index = 0;
  1763   int dest_index = 0;
  1764   dest[dest_index++] = 'L';
  1765   while (src_index < src_length) {
  1766     dest[dest_index++] = src[src_index++];
  1768   dest[dest_index++] = ';';
  1769   dest[dest_index] = '\0';
  1770   return dest;
  1773 // different verisons of is_same_class_package
  1774 bool instanceKlass::is_same_class_package(klassOop class2) {
  1775   klassOop class1 = as_klassOop();
  1776   oop classloader1 = instanceKlass::cast(class1)->class_loader();
  1777   symbolOop classname1 = Klass::cast(class1)->name();
  1779   if (Klass::cast(class2)->oop_is_objArray()) {
  1780     class2 = objArrayKlass::cast(class2)->bottom_klass();
  1782   oop classloader2;
  1783   if (Klass::cast(class2)->oop_is_instance()) {
  1784     classloader2 = instanceKlass::cast(class2)->class_loader();
  1785   } else {
  1786     assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array");
  1787     classloader2 = NULL;
  1789   symbolOop classname2 = Klass::cast(class2)->name();
  1791   return instanceKlass::is_same_class_package(classloader1, classname1,
  1792                                               classloader2, classname2);
  1795 bool instanceKlass::is_same_class_package(oop classloader2, symbolOop classname2) {
  1796   klassOop class1 = as_klassOop();
  1797   oop classloader1 = instanceKlass::cast(class1)->class_loader();
  1798   symbolOop classname1 = Klass::cast(class1)->name();
  1800   return instanceKlass::is_same_class_package(classloader1, classname1,
  1801                                               classloader2, classname2);
  1804 // return true if two classes are in the same package, classloader
  1805 // and classname information is enough to determine a class's package
  1806 bool instanceKlass::is_same_class_package(oop class_loader1, symbolOop class_name1,
  1807                                           oop class_loader2, symbolOop class_name2) {
  1808   if (class_loader1 != class_loader2) {
  1809     return false;
  1810   } else {
  1811     ResourceMark rm;
  1813     // The symbolOop's are in UTF8 encoding. Since we only need to check explicitly
  1814     // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
  1815     // Otherwise, we just compare jbyte values between the strings.
  1816     jbyte *name1 = class_name1->base();
  1817     jbyte *name2 = class_name2->base();
  1819     jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
  1820     jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
  1822     if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
  1823       // One of the two doesn't have a package.  Only return true
  1824       // if the other one also doesn't have a package.
  1825       return last_slash1 == last_slash2;
  1826     } else {
  1827       // Skip over '['s
  1828       if (*name1 == '[') {
  1829         do {
  1830           name1++;
  1831         } while (*name1 == '[');
  1832         if (*name1 != 'L') {
  1833           // Something is terribly wrong.  Shouldn't be here.
  1834           return false;
  1837       if (*name2 == '[') {
  1838         do {
  1839           name2++;
  1840         } while (*name2 == '[');
  1841         if (*name2 != 'L') {
  1842           // Something is terribly wrong.  Shouldn't be here.
  1843           return false;
  1847       // Check that package part is identical
  1848       int length1 = last_slash1 - name1;
  1849       int length2 = last_slash2 - name2;
  1851       return UTF8::equal(name1, length1, name2, length2);
  1857 jint instanceKlass::compute_modifier_flags(TRAPS) const {
  1858   klassOop k = as_klassOop();
  1859   jint access = access_flags().as_int();
  1861   // But check if it happens to be member class.
  1862   typeArrayOop inner_class_list = inner_classes();
  1863   int length = (inner_class_list == NULL) ? 0 : inner_class_list->length();
  1864   assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
  1865   if (length > 0) {
  1866     typeArrayHandle inner_class_list_h(THREAD, inner_class_list);
  1867     instanceKlassHandle ik(THREAD, k);
  1868     for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
  1869       int ioff = inner_class_list_h->ushort_at(
  1870                       i + instanceKlass::inner_class_inner_class_info_offset);
  1872       // Inner class attribute can be zero, skip it.
  1873       // Strange but true:  JVM spec. allows null inner class refs.
  1874       if (ioff == 0) continue;
  1876       // only look at classes that are already loaded
  1877       // since we are looking for the flags for our self.
  1878       symbolOop inner_name = ik->constants()->klass_name_at(ioff);
  1879       if ((ik->name() == inner_name)) {
  1880         // This is really a member class.
  1881         access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset);
  1882         break;
  1886   // Remember to strip ACC_SUPER bit
  1887   return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS;
  1890 jint instanceKlass::jvmti_class_status() const {
  1891   jint result = 0;
  1893   if (is_linked()) {
  1894     result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED;
  1897   if (is_initialized()) {
  1898     assert(is_linked(), "Class status is not consistent");
  1899     result |= JVMTI_CLASS_STATUS_INITIALIZED;
  1901   if (is_in_error_state()) {
  1902     result |= JVMTI_CLASS_STATUS_ERROR;
  1904   return result;
  1907 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) {
  1908   itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
  1909   int method_table_offset_in_words = ioe->offset()/wordSize;
  1910   int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
  1911                        / itableOffsetEntry::size();
  1913   for (int cnt = 0 ; ; cnt ++, ioe ++) {
  1914     // If the interface isn't implemented by the reciever class,
  1915     // the VM should throw IncompatibleClassChangeError.
  1916     if (cnt >= nof_interfaces) {
  1917       THROW_OOP_0(vmSymbols::java_lang_IncompatibleClassChangeError());
  1920     klassOop ik = ioe->interface_klass();
  1921     if (ik == holder) break;
  1924   itableMethodEntry* ime = ioe->first_method_entry(as_klassOop());
  1925   methodOop m = ime[index].method();
  1926   if (m == NULL) {
  1927     THROW_OOP_0(vmSymbols::java_lang_AbstractMethodError());
  1929   return m;
  1932 // On-stack replacement stuff
  1933 void instanceKlass::add_osr_nmethod(nmethod* n) {
  1934   // only one compilation can be active
  1935   NEEDS_CLEANUP
  1936   // This is a short non-blocking critical region, so the no safepoint check is ok.
  1937   OsrList_lock->lock_without_safepoint_check();
  1938   assert(n->is_osr_method(), "wrong kind of nmethod");
  1939   n->set_link(osr_nmethods_head());
  1940   set_osr_nmethods_head(n);
  1941   // Remember to unlock again
  1942   OsrList_lock->unlock();
  1946 void instanceKlass::remove_osr_nmethod(nmethod* n) {
  1947   // This is a short non-blocking critical region, so the no safepoint check is ok.
  1948   OsrList_lock->lock_without_safepoint_check();
  1949   assert(n->is_osr_method(), "wrong kind of nmethod");
  1950   nmethod* last = NULL;
  1951   nmethod* cur  = osr_nmethods_head();
  1952   // Search for match
  1953   while(cur != NULL && cur != n) {
  1954     last = cur;
  1955     cur = cur->link();
  1957   if (cur == n) {
  1958     if (last == NULL) {
  1959       // Remove first element
  1960       set_osr_nmethods_head(osr_nmethods_head()->link());
  1961     } else {
  1962       last->set_link(cur->link());
  1965   n->set_link(NULL);
  1966   // Remember to unlock again
  1967   OsrList_lock->unlock();
  1970 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci) const {
  1971   // This is a short non-blocking critical region, so the no safepoint check is ok.
  1972   OsrList_lock->lock_without_safepoint_check();
  1973   nmethod* osr = osr_nmethods_head();
  1974   while (osr != NULL) {
  1975     assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
  1976     if (osr->method() == m &&
  1977         (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
  1978       // Found a match - return it.
  1979       OsrList_lock->unlock();
  1980       return osr;
  1982     osr = osr->link();
  1984   OsrList_lock->unlock();
  1985   return NULL;
  1988 // -----------------------------------------------------------------------------------------------------
  1989 #ifndef PRODUCT
  1991 // Printing
  1993 void FieldPrinter::do_field(fieldDescriptor* fd) {
  1994    if (fd->is_static() == (_obj == NULL)) {
  1995      _st->print("   - ");
  1996      fd->print_on(_st);
  1997      _st->cr();
  1998    } else {
  1999      fd->print_on_for(_st, _obj);
  2000      _st->cr();
  2005 void instanceKlass::oop_print_on(oop obj, outputStream* st) {
  2006   Klass::oop_print_on(obj, st);
  2008   if (as_klassOop() == SystemDictionary::string_klass()) {
  2009     typeArrayOop value  = java_lang_String::value(obj);
  2010     juint        offset = java_lang_String::offset(obj);
  2011     juint        length = java_lang_String::length(obj);
  2012     if (value != NULL &&
  2013         value->is_typeArray() &&
  2014         offset          <= (juint) value->length() &&
  2015         offset + length <= (juint) value->length()) {
  2016       st->print("string: ");
  2017       Handle h_obj(obj);
  2018       java_lang_String::print(h_obj, st);
  2019       st->cr();
  2020       if (!WizardMode)  return;  // that is enough
  2024   st->print_cr("fields:");
  2025   FieldPrinter print_nonstatic_field(st, obj);
  2026   do_nonstatic_fields(&print_nonstatic_field);
  2028   if (as_klassOop() == SystemDictionary::class_klass()) {
  2029     klassOop mirrored_klass = java_lang_Class::as_klassOop(obj);
  2030     st->print("   - fake entry for mirror: ");
  2031     mirrored_klass->print_value_on(st);
  2032     st->cr();
  2033     st->print("   - fake entry resolved_constructor: ");
  2034     methodOop ctor = java_lang_Class::resolved_constructor(obj);
  2035     ctor->print_value_on(st);
  2036     klassOop array_klass = java_lang_Class::array_klass(obj);
  2037     st->print("   - fake entry for array: ");
  2038     array_klass->print_value_on(st);
  2039     st->cr();
  2040     st->cr();
  2044 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
  2045   st->print("a ");
  2046   name()->print_value_on(st);
  2047   obj->print_address_on(st);
  2050 #endif
  2052 const char* instanceKlass::internal_name() const {
  2053   return external_name();
  2058 // Verification
  2060 class VerifyFieldClosure: public OopClosure {
  2061  public:
  2062   void do_oop(oop* p) {
  2063     guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
  2064     if (!(*p)->is_oop_or_null()) {
  2065       tty->print_cr("Failed: %p -> %p",p,(address)*p);
  2066       Universe::print();
  2067       guarantee(false, "boom");
  2070 };
  2073 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
  2074   Klass::oop_verify_on(obj, st);
  2075   VerifyFieldClosure blk;
  2076   oop_oop_iterate(obj, &blk);
  2079 #ifndef PRODUCT
  2081 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
  2082   // This verification code is disabled.  JDK_Version::is_gte_jdk14x_version()
  2083   // cannot be called since this function is called before the VM is
  2084   // able to determine what JDK version is running with.
  2085   // The check below always is false since 1.4.
  2086   return;
  2088   // This verification code temporarily disabled for the 1.4
  2089   // reflection implementation since java.lang.Class now has
  2090   // Java-level instance fields. Should rewrite this to handle this
  2091   // case.
  2092   if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
  2093     // Verify that java.lang.Class instances have a fake oop field added.
  2094     instanceKlass* ik = instanceKlass::cast(k);
  2096     // Check that we have the right class
  2097     static bool first_time = true;
  2098     guarantee(k == SystemDictionary::class_klass() && first_time, "Invalid verify of maps");
  2099     first_time = false;
  2100     const int extra = java_lang_Class::number_of_fake_oop_fields;
  2101     guarantee(ik->nonstatic_field_size() == extra, "just checking");
  2102     guarantee(ik->nonstatic_oop_map_size() == 1, "just checking");
  2103     guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
  2105     // Check that the map is (2,extra)
  2106     int offset = java_lang_Class::klass_offset;
  2108     OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
  2109     guarantee(map->offset() == offset && map->length() == extra, "just checking");
  2113 #endif
  2116 /* JNIid class for jfieldIDs only */
  2117  JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
  2118    _holder = holder;
  2119    _offset = offset;
  2120    _next = next;
  2121    debug_only(_is_static_field_id = false;)
  2125  JNIid* JNIid::find(int offset) {
  2126    JNIid* current = this;
  2127    while (current != NULL) {
  2128      if (current->offset() == offset) return current;
  2129      current = current->next();
  2131    return NULL;
  2134 void JNIid::oops_do(OopClosure* f) {
  2135   for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
  2136     f->do_oop(cur->holder_addr());
  2140 void JNIid::deallocate(JNIid* current) {
  2141    while (current != NULL) {
  2142      JNIid* next = current->next();
  2143      delete current;
  2144      current = next;
  2149  void JNIid::verify(klassOop holder) {
  2150    int first_field_offset  = instanceKlass::cast(holder)->offset_of_static_fields();
  2151    int end_field_offset;
  2152    end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
  2154    JNIid* current = this;
  2155    while (current != NULL) {
  2156      guarantee(current->holder() == holder, "Invalid klass in JNIid");
  2157  #ifdef ASSERT
  2158      int o = current->offset();
  2159      if (current->is_static_field_id()) {
  2160        guarantee(o >= first_field_offset  && o < end_field_offset,  "Invalid static field offset in JNIid");
  2162  #endif
  2163      current = current->next();
  2168 #ifdef ASSERT
  2169   void instanceKlass::set_init_state(ClassState state) {
  2170     bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
  2171                                                  : (_init_state < state);
  2172     assert(good_state || state == allocated, "illegal state transition");
  2173     _init_state = state;
  2175 #endif
  2178 // RedefineClasses() support for previous versions:
  2180 // Add an information node that contains weak references to the
  2181 // interesting parts of the previous version of the_class.
  2182 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
  2183        BitMap * emcp_methods, int emcp_method_count) {
  2184   assert(Thread::current()->is_VM_thread(),
  2185     "only VMThread can add previous versions");
  2187   if (_previous_versions == NULL) {
  2188     // This is the first previous version so make some space.
  2189     // Start with 2 elements under the assumption that the class
  2190     // won't be redefined much.
  2191     _previous_versions =  new (ResourceObj::C_HEAP)
  2192                             GrowableArray<PreviousVersionNode *>(2, true);
  2195   // RC_TRACE macro has an embedded ResourceMark
  2196   RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
  2197     ikh->external_name(), _previous_versions->length(), emcp_method_count));
  2198   constantPoolHandle cp_h(ikh->constants());
  2199   jobject cp_ref;
  2200   if (cp_h->is_shared()) {
  2201     // a shared ConstantPool requires a regular reference; a weak
  2202     // reference would be collectible
  2203     cp_ref = JNIHandles::make_global(cp_h);
  2204   } else {
  2205     cp_ref = JNIHandles::make_weak_global(cp_h);
  2207   PreviousVersionNode * pv_node = NULL;
  2208   objArrayOop old_methods = ikh->methods();
  2210   if (emcp_method_count == 0) {
  2211     // non-shared ConstantPool gets a weak reference
  2212     pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL);
  2213     RC_TRACE(0x00000400,
  2214       ("add: all methods are obsolete; flushing any EMCP weak refs"));
  2215   } else {
  2216     int local_count = 0;
  2217     GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP)
  2218       GrowableArray<jweak>(emcp_method_count, true);
  2219     for (int i = 0; i < old_methods->length(); i++) {
  2220       if (emcp_methods->at(i)) {
  2221         // this old method is EMCP so save a weak ref
  2222         methodOop old_method = (methodOop) old_methods->obj_at(i);
  2223         methodHandle old_method_h(old_method);
  2224         jweak method_ref = JNIHandles::make_weak_global(old_method_h);
  2225         method_refs->append(method_ref);
  2226         if (++local_count >= emcp_method_count) {
  2227           // no more EMCP methods so bail out now
  2228           break;
  2232     // non-shared ConstantPool gets a weak reference
  2233     pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs);
  2236   _previous_versions->append(pv_node);
  2238   // Using weak references allows the interesting parts of previous
  2239   // classes to be GC'ed when they are no longer needed. Since the
  2240   // caller is the VMThread and we are at a safepoint, this is a good
  2241   // time to clear out unused weak references.
  2243   RC_TRACE(0x00000400, ("add: previous version length=%d",
  2244     _previous_versions->length()));
  2246   // skip the last entry since we just added it
  2247   for (int i = _previous_versions->length() - 2; i >= 0; i--) {
  2248     // check the previous versions array for a GC'ed weak refs
  2249     pv_node = _previous_versions->at(i);
  2250     cp_ref = pv_node->prev_constant_pool();
  2251     assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
  2252     if (cp_ref == NULL) {
  2253       delete pv_node;
  2254       _previous_versions->remove_at(i);
  2255       // Since we are traversing the array backwards, we don't have to
  2256       // do anything special with the index.
  2257       continue;  // robustness
  2260     constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
  2261     if (cp == NULL) {
  2262       // this entry has been GC'ed so remove it
  2263       delete pv_node;
  2264       _previous_versions->remove_at(i);
  2265       // Since we are traversing the array backwards, we don't have to
  2266       // do anything special with the index.
  2267       continue;
  2268     } else {
  2269       RC_TRACE(0x00000400, ("add: previous version @%d is alive", i));
  2272     GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
  2273     if (method_refs != NULL) {
  2274       RC_TRACE(0x00000400, ("add: previous methods length=%d",
  2275         method_refs->length()));
  2276       for (int j = method_refs->length() - 1; j >= 0; j--) {
  2277         jweak method_ref = method_refs->at(j);
  2278         assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
  2279         if (method_ref == NULL) {
  2280           method_refs->remove_at(j);
  2281           // Since we are traversing the array backwards, we don't have to
  2282           // do anything special with the index.
  2283           continue;  // robustness
  2286         methodOop method = (methodOop)JNIHandles::resolve(method_ref);
  2287         if (method == NULL || emcp_method_count == 0) {
  2288           // This method entry has been GC'ed or the current
  2289           // RedefineClasses() call has made all methods obsolete
  2290           // so remove it.
  2291           JNIHandles::destroy_weak_global(method_ref);
  2292           method_refs->remove_at(j);
  2293         } else {
  2294           // RC_TRACE macro has an embedded ResourceMark
  2295           RC_TRACE(0x00000400,
  2296             ("add: %s(%s): previous method @%d in version @%d is alive",
  2297             method->name()->as_C_string(), method->signature()->as_C_string(),
  2298             j, i));
  2304   int obsolete_method_count = old_methods->length() - emcp_method_count;
  2306   if (emcp_method_count != 0 && obsolete_method_count != 0 &&
  2307       _previous_versions->length() > 1) {
  2308     // We have a mix of obsolete and EMCP methods. If there is more
  2309     // than the previous version that we just added, then we have to
  2310     // clear out any matching EMCP method entries the hard way.
  2311     int local_count = 0;
  2312     for (int i = 0; i < old_methods->length(); i++) {
  2313       if (!emcp_methods->at(i)) {
  2314         // only obsolete methods are interesting
  2315         methodOop old_method = (methodOop) old_methods->obj_at(i);
  2316         symbolOop m_name = old_method->name();
  2317         symbolOop m_signature = old_method->signature();
  2319         // skip the last entry since we just added it
  2320         for (int j = _previous_versions->length() - 2; j >= 0; j--) {
  2321           // check the previous versions array for a GC'ed weak refs
  2322           pv_node = _previous_versions->at(j);
  2323           cp_ref = pv_node->prev_constant_pool();
  2324           assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
  2325           if (cp_ref == NULL) {
  2326             delete pv_node;
  2327             _previous_versions->remove_at(j);
  2328             // Since we are traversing the array backwards, we don't have to
  2329             // do anything special with the index.
  2330             continue;  // robustness
  2333           constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
  2334           if (cp == NULL) {
  2335             // this entry has been GC'ed so remove it
  2336             delete pv_node;
  2337             _previous_versions->remove_at(j);
  2338             // Since we are traversing the array backwards, we don't have to
  2339             // do anything special with the index.
  2340             continue;
  2343           GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
  2344           if (method_refs == NULL) {
  2345             // We have run into a PreviousVersion generation where
  2346             // all methods were made obsolete during that generation's
  2347             // RedefineClasses() operation. At the time of that
  2348             // operation, all EMCP methods were flushed so we don't
  2349             // have to go back any further.
  2350             //
  2351             // A NULL method_refs is different than an empty method_refs.
  2352             // We cannot infer any optimizations about older generations
  2353             // from an empty method_refs for the current generation.
  2354             break;
  2357           for (int k = method_refs->length() - 1; k >= 0; k--) {
  2358             jweak method_ref = method_refs->at(k);
  2359             assert(method_ref != NULL,
  2360               "weak method ref was unexpectedly cleared");
  2361             if (method_ref == NULL) {
  2362               method_refs->remove_at(k);
  2363               // Since we are traversing the array backwards, we don't
  2364               // have to do anything special with the index.
  2365               continue;  // robustness
  2368             methodOop method = (methodOop)JNIHandles::resolve(method_ref);
  2369             if (method == NULL) {
  2370               // this method entry has been GC'ed so skip it
  2371               JNIHandles::destroy_weak_global(method_ref);
  2372               method_refs->remove_at(k);
  2373               continue;
  2376             if (method->name() == m_name &&
  2377                 method->signature() == m_signature) {
  2378               // The current RedefineClasses() call has made all EMCP
  2379               // versions of this method obsolete so mark it as obsolete
  2380               // and remove the weak ref.
  2381               RC_TRACE(0x00000400,
  2382                 ("add: %s(%s): flush obsolete method @%d in version @%d",
  2383                 m_name->as_C_string(), m_signature->as_C_string(), k, j));
  2385               method->set_is_obsolete();
  2386               JNIHandles::destroy_weak_global(method_ref);
  2387               method_refs->remove_at(k);
  2388               break;
  2392           // The previous loop may not find a matching EMCP method, but
  2393           // that doesn't mean that we can optimize and not go any
  2394           // further back in the PreviousVersion generations. The EMCP
  2395           // method for this generation could have already been GC'ed,
  2396           // but there still may be an older EMCP method that has not
  2397           // been GC'ed.
  2400         if (++local_count >= obsolete_method_count) {
  2401           // no more obsolete methods so bail out now
  2402           break;
  2407 } // end add_previous_version()
  2410 // Determine if instanceKlass has a previous version.
  2411 bool instanceKlass::has_previous_version() const {
  2412   if (_previous_versions == NULL) {
  2413     // no previous versions array so answer is easy
  2414     return false;
  2417   for (int i = _previous_versions->length() - 1; i >= 0; i--) {
  2418     // Check the previous versions array for an info node that hasn't
  2419     // been GC'ed
  2420     PreviousVersionNode * pv_node = _previous_versions->at(i);
  2422     jobject cp_ref = pv_node->prev_constant_pool();
  2423     assert(cp_ref != NULL, "cp reference was unexpectedly cleared");
  2424     if (cp_ref == NULL) {
  2425       continue;  // robustness
  2428     constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
  2429     if (cp != NULL) {
  2430       // we have at least one previous version
  2431       return true;
  2434     // We don't have to check the method refs. If the constant pool has
  2435     // been GC'ed then so have the methods.
  2438   // all of the underlying nodes' info has been GC'ed
  2439   return false;
  2440 } // end has_previous_version()
  2442 methodOop instanceKlass::method_with_idnum(int idnum) {
  2443   methodOop m = NULL;
  2444   if (idnum < methods()->length()) {
  2445     m = (methodOop) methods()->obj_at(idnum);
  2447   if (m == NULL || m->method_idnum() != idnum) {
  2448     for (int index = 0; index < methods()->length(); ++index) {
  2449       m = (methodOop) methods()->obj_at(index);
  2450       if (m->method_idnum() == idnum) {
  2451         return m;
  2455   return m;
  2459 // Set the annotation at 'idnum' to 'anno'.
  2460 // We don't want to create or extend the array if 'anno' is NULL, since that is the
  2461 // default value.  However, if the array exists and is long enough, we must set NULL values.
  2462 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) {
  2463   objArrayOop md = *md_p;
  2464   if (md != NULL && md->length() > idnum) {
  2465     md->obj_at_put(idnum, anno);
  2466   } else if (anno != NULL) {
  2467     // create the array
  2468     int length = MAX2(idnum+1, (int)_idnum_allocated_count);
  2469     md = oopFactory::new_system_objArray(length, Thread::current());
  2470     if (*md_p != NULL) {
  2471       // copy the existing entries
  2472       for (int index = 0; index < (*md_p)->length(); index++) {
  2473         md->obj_at_put(index, (*md_p)->obj_at(index));
  2476     set_annotations(md, md_p);
  2477     md->obj_at_put(idnum, anno);
  2478   } // if no array and idnum isn't included there is nothing to do
  2481 // Construct a PreviousVersionNode entry for the array hung off
  2482 // the instanceKlass.
  2483 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool,
  2484   bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) {
  2486   _prev_constant_pool = prev_constant_pool;
  2487   _prev_cp_is_weak = prev_cp_is_weak;
  2488   _prev_EMCP_methods = prev_EMCP_methods;
  2492 // Destroy a PreviousVersionNode
  2493 PreviousVersionNode::~PreviousVersionNode() {
  2494   if (_prev_constant_pool != NULL) {
  2495     if (_prev_cp_is_weak) {
  2496       JNIHandles::destroy_weak_global(_prev_constant_pool);
  2497     } else {
  2498       JNIHandles::destroy_global(_prev_constant_pool);
  2502   if (_prev_EMCP_methods != NULL) {
  2503     for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) {
  2504       jweak method_ref = _prev_EMCP_methods->at(i);
  2505       if (method_ref != NULL) {
  2506         JNIHandles::destroy_weak_global(method_ref);
  2509     delete _prev_EMCP_methods;
  2514 // Construct a PreviousVersionInfo entry
  2515 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
  2516   _prev_constant_pool_handle = constantPoolHandle();  // NULL handle
  2517   _prev_EMCP_method_handles = NULL;
  2519   jobject cp_ref = pv_node->prev_constant_pool();
  2520   assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared");
  2521   if (cp_ref == NULL) {
  2522     return;  // robustness
  2525   constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
  2526   if (cp == NULL) {
  2527     // Weak reference has been GC'ed. Since the constant pool has been
  2528     // GC'ed, the methods have also been GC'ed.
  2529     return;
  2532   // make the constantPoolOop safe to return
  2533   _prev_constant_pool_handle = constantPoolHandle(cp);
  2535   GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
  2536   if (method_refs == NULL) {
  2537     // the instanceKlass did not have any EMCP methods
  2538     return;
  2541   _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
  2543   int n_methods = method_refs->length();
  2544   for (int i = 0; i < n_methods; i++) {
  2545     jweak method_ref = method_refs->at(i);
  2546     assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
  2547     if (method_ref == NULL) {
  2548       continue;  // robustness
  2551     methodOop method = (methodOop)JNIHandles::resolve(method_ref);
  2552     if (method == NULL) {
  2553       // this entry has been GC'ed so skip it
  2554       continue;
  2557     // make the methodOop safe to return
  2558     _prev_EMCP_method_handles->append(methodHandle(method));
  2563 // Destroy a PreviousVersionInfo
  2564 PreviousVersionInfo::~PreviousVersionInfo() {
  2565   // Since _prev_EMCP_method_handles is not C-heap allocated, we
  2566   // don't have to delete it.
  2570 // Construct a helper for walking the previous versions array
  2571 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) {
  2572   _previous_versions = ik->previous_versions();
  2573   _current_index = 0;
  2574   // _hm needs no initialization
  2575   _current_p = NULL;
  2579 // Destroy a PreviousVersionWalker
  2580 PreviousVersionWalker::~PreviousVersionWalker() {
  2581   // Delete the current info just in case the caller didn't walk to
  2582   // the end of the previous versions list. No harm if _current_p is
  2583   // already NULL.
  2584   delete _current_p;
  2586   // When _hm is destroyed, all the Handles returned in
  2587   // PreviousVersionInfo objects will be destroyed.
  2588   // Also, after this destructor is finished it will be
  2589   // safe to delete the GrowableArray allocated in the
  2590   // PreviousVersionInfo objects.
  2594 // Return the interesting information for the next previous version
  2595 // of the klass. Returns NULL if there are no more previous versions.
  2596 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
  2597   if (_previous_versions == NULL) {
  2598     // no previous versions so nothing to return
  2599     return NULL;
  2602   delete _current_p;  // cleanup the previous info for the caller
  2603   _current_p = NULL;  // reset to NULL so we don't delete same object twice
  2605   int length = _previous_versions->length();
  2607   while (_current_index < length) {
  2608     PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
  2609     PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP)
  2610                                           PreviousVersionInfo(pv_node);
  2612     constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
  2613     if (cp_h.is_null()) {
  2614       delete pv_info;
  2616       // The underlying node's info has been GC'ed so try the next one.
  2617       // We don't have to check the methods. If the constant pool has
  2618       // GC'ed then so have the methods.
  2619       continue;
  2622     // Found a node with non GC'ed info so return it. The caller will
  2623     // need to delete pv_info when they are done with it.
  2624     _current_p = pv_info;
  2625     return pv_info;
  2628   // all of the underlying nodes' info has been GC'ed
  2629   return NULL;
  2630 } // end next_previous_version()

mercurial