Wed, 06 Jan 2010 22:21:39 -0800
Merge
1 /*
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_instanceKlass.cpp.incl"
28 bool instanceKlass::should_be_initialized() const {
29 return !is_initialized();
30 }
32 klassVtable* instanceKlass::vtable() const {
33 return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size());
34 }
36 klassItable* instanceKlass::itable() const {
37 return new klassItable(as_klassOop());
38 }
40 void instanceKlass::eager_initialize(Thread *thread) {
41 if (!EagerInitialization) return;
43 if (this->is_not_initialized()) {
44 // abort if the the class has a class initializer
45 if (this->class_initializer() != NULL) return;
47 // abort if it is java.lang.Object (initialization is handled in genesis)
48 klassOop super = this->super();
49 if (super == NULL) return;
51 // abort if the super class should be initialized
52 if (!instanceKlass::cast(super)->is_initialized()) return;
54 // call body to expose the this pointer
55 instanceKlassHandle this_oop(thread, this->as_klassOop());
56 eager_initialize_impl(this_oop);
57 }
58 }
61 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
62 EXCEPTION_MARK;
63 ObjectLocker ol(this_oop, THREAD);
65 // abort if someone beat us to the initialization
66 if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized()
68 ClassState old_state = this_oop->_init_state;
69 link_class_impl(this_oop, true, THREAD);
70 if (HAS_PENDING_EXCEPTION) {
71 CLEAR_PENDING_EXCEPTION;
72 // Abort if linking the class throws an exception.
74 // Use a test to avoid redundantly resetting the state if there's
75 // no change. Set_init_state() asserts that state changes make
76 // progress, whereas here we might just be spinning in place.
77 if( old_state != this_oop->_init_state )
78 this_oop->set_init_state (old_state);
79 } else {
80 // linking successfull, mark class as initialized
81 this_oop->set_init_state (fully_initialized);
82 // trace
83 if (TraceClassInitialization) {
84 ResourceMark rm(THREAD);
85 tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
86 }
87 }
88 }
91 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
92 // process. The step comments refers to the procedure described in that section.
93 // Note: implementation moved to static method to expose the this pointer.
94 void instanceKlass::initialize(TRAPS) {
95 if (this->should_be_initialized()) {
96 HandleMark hm(THREAD);
97 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
98 initialize_impl(this_oop, CHECK);
99 // Note: at this point the class may be initialized
100 // OR it may be in the state of being initialized
101 // in case of recursive initialization!
102 } else {
103 assert(is_initialized(), "sanity check");
104 }
105 }
108 bool instanceKlass::verify_code(
109 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
110 // 1) Verify the bytecodes
111 Verifier::Mode mode =
112 throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
113 return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
114 }
117 // Used exclusively by the shared spaces dump mechanism to prevent
118 // classes mapped into the shared regions in new VMs from appearing linked.
120 void instanceKlass::unlink_class() {
121 assert(is_linked(), "must be linked");
122 _init_state = loaded;
123 }
125 void instanceKlass::link_class(TRAPS) {
126 assert(is_loaded(), "must be loaded");
127 if (!is_linked()) {
128 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
129 link_class_impl(this_oop, true, CHECK);
130 }
131 }
133 // Called to verify that a class can link during initialization, without
134 // throwing a VerifyError.
135 bool instanceKlass::link_class_or_fail(TRAPS) {
136 assert(is_loaded(), "must be loaded");
137 if (!is_linked()) {
138 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
139 link_class_impl(this_oop, false, CHECK_false);
140 }
141 return is_linked();
142 }
144 bool instanceKlass::link_class_impl(
145 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
146 // check for error state
147 if (this_oop->is_in_error_state()) {
148 ResourceMark rm(THREAD);
149 THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
150 this_oop->external_name(), false);
151 }
152 // return if already verified
153 if (this_oop->is_linked()) {
154 return true;
155 }
157 // Timing
158 // timer handles recursion
159 assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
160 JavaThread* jt = (JavaThread*)THREAD;
162 // link super class before linking this class
163 instanceKlassHandle super(THREAD, this_oop->super());
164 if (super.not_null()) {
165 if (super->is_interface()) { // check if super class is an interface
166 ResourceMark rm(THREAD);
167 Exceptions::fthrow(
168 THREAD_AND_LOCATION,
169 vmSymbolHandles::java_lang_IncompatibleClassChangeError(),
170 "class %s has interface %s as super class",
171 this_oop->external_name(),
172 super->external_name()
173 );
174 return false;
175 }
177 link_class_impl(super, throw_verifyerror, CHECK_false);
178 }
180 // link all interfaces implemented by this class before linking this class
181 objArrayHandle interfaces (THREAD, this_oop->local_interfaces());
182 int num_interfaces = interfaces->length();
183 for (int index = 0; index < num_interfaces; index++) {
184 HandleMark hm(THREAD);
185 instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index)));
186 link_class_impl(ih, throw_verifyerror, CHECK_false);
187 }
189 // in case the class is linked in the process of linking its superclasses
190 if (this_oop->is_linked()) {
191 return true;
192 }
194 // trace only the link time for this klass that includes
195 // the verification time
196 PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(),
197 ClassLoader::perf_class_link_selftime(),
198 ClassLoader::perf_classes_linked(),
199 jt->get_thread_stat()->perf_recursion_counts_addr(),
200 jt->get_thread_stat()->perf_timers_addr(),
201 PerfClassTraceTime::CLASS_LINK);
203 // verification & rewriting
204 {
205 ObjectLocker ol(this_oop, THREAD);
206 // rewritten will have been set if loader constraint error found
207 // on an earlier link attempt
208 // don't verify or rewrite if already rewritten
209 if (!this_oop->is_linked()) {
210 if (!this_oop->is_rewritten()) {
211 {
212 // Timer includes any side effects of class verification (resolution,
213 // etc), but not recursive entry into verify_code().
214 PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
215 ClassLoader::perf_class_verify_selftime(),
216 ClassLoader::perf_classes_verified(),
217 jt->get_thread_stat()->perf_recursion_counts_addr(),
218 jt->get_thread_stat()->perf_timers_addr(),
219 PerfClassTraceTime::CLASS_VERIFY);
220 bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
221 if (!verify_ok) {
222 return false;
223 }
224 }
226 // Just in case a side-effect of verify linked this class already
227 // (which can sometimes happen since the verifier loads classes
228 // using custom class loaders, which are free to initialize things)
229 if (this_oop->is_linked()) {
230 return true;
231 }
233 // also sets rewritten
234 this_oop->rewrite_class(CHECK_false);
235 }
237 // Initialize the vtable and interface table after
238 // methods have been rewritten since rewrite may
239 // fabricate new methodOops.
240 // also does loader constraint checking
241 if (!this_oop()->is_shared()) {
242 ResourceMark rm(THREAD);
243 this_oop->vtable()->initialize_vtable(true, CHECK_false);
244 this_oop->itable()->initialize_itable(true, CHECK_false);
245 }
246 #ifdef ASSERT
247 else {
248 ResourceMark rm(THREAD);
249 this_oop->vtable()->verify(tty, true);
250 // In case itable verification is ever added.
251 // this_oop->itable()->verify(tty, true);
252 }
253 #endif
254 this_oop->set_init_state(linked);
255 if (JvmtiExport::should_post_class_prepare()) {
256 Thread *thread = THREAD;
257 assert(thread->is_Java_thread(), "thread->is_Java_thread()");
258 JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
259 }
260 }
261 }
262 return true;
263 }
266 // Rewrite the byte codes of all of the methods of a class.
267 // Three cases:
268 // During the link of a newly loaded class.
269 // During the preloading of classes to be written to the shared spaces.
270 // - Rewrite the methods and update the method entry points.
271 //
272 // During the link of a class in the shared spaces.
273 // - The methods were already rewritten, update the metho entry points.
274 //
275 // The rewriter must be called exactly once. Rewriting must happen after
276 // verification but before the first method of the class is executed.
278 void instanceKlass::rewrite_class(TRAPS) {
279 assert(is_loaded(), "must be loaded");
280 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
281 if (this_oop->is_rewritten()) {
282 assert(this_oop()->is_shared(), "rewriting an unshared class?");
283 return;
284 }
285 Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
286 this_oop->set_rewritten();
287 }
290 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
291 // Make sure klass is linked (verified) before initialization
292 // A class could already be verified, since it has been reflected upon.
293 this_oop->link_class(CHECK);
295 // refer to the JVM book page 47 for description of steps
296 // Step 1
297 { ObjectLocker ol(this_oop, THREAD);
299 Thread *self = THREAD; // it's passed the current thread
301 // Step 2
302 // If we were to use wait() instead of waitInterruptibly() then
303 // we might end up throwing IE from link/symbol resolution sites
304 // that aren't expected to throw. This would wreak havoc. See 6320309.
305 while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
306 ol.waitUninterruptibly(CHECK);
307 }
309 // Step 3
310 if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self))
311 return;
313 // Step 4
314 if (this_oop->is_initialized())
315 return;
317 // Step 5
318 if (this_oop->is_in_error_state()) {
319 ResourceMark rm(THREAD);
320 const char* desc = "Could not initialize class ";
321 const char* className = this_oop->external_name();
322 size_t msglen = strlen(desc) + strlen(className) + 1;
323 char* message = NEW_C_HEAP_ARRAY(char, msglen);
324 if (NULL == message) {
325 // Out of memory: can't create detailed error message
326 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
327 } else {
328 jio_snprintf(message, msglen, "%s%s", desc, className);
329 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
330 }
331 }
333 // Step 6
334 this_oop->set_init_state(being_initialized);
335 this_oop->set_init_thread(self);
336 }
338 // Step 7
339 klassOop super_klass = this_oop->super();
340 if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
341 Klass::cast(super_klass)->initialize(THREAD);
343 if (HAS_PENDING_EXCEPTION) {
344 Handle e(THREAD, PENDING_EXCEPTION);
345 CLEAR_PENDING_EXCEPTION;
346 {
347 EXCEPTION_MARK;
348 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
349 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, superclass initialization error is thrown below
350 }
351 THROW_OOP(e());
352 }
353 }
355 // Step 8
356 {
357 assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
358 JavaThread* jt = (JavaThread*)THREAD;
359 // Timer includes any side effects of class initialization (resolution,
360 // etc), but not recursive entry into call_class_initializer().
361 PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
362 ClassLoader::perf_class_init_selftime(),
363 ClassLoader::perf_classes_inited(),
364 jt->get_thread_stat()->perf_recursion_counts_addr(),
365 jt->get_thread_stat()->perf_timers_addr(),
366 PerfClassTraceTime::CLASS_CLINIT);
367 this_oop->call_class_initializer(THREAD);
368 }
370 // Step 9
371 if (!HAS_PENDING_EXCEPTION) {
372 this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
373 { ResourceMark rm(THREAD);
374 debug_only(this_oop->vtable()->verify(tty, true);)
375 }
376 }
377 else {
378 // Step 10 and 11
379 Handle e(THREAD, PENDING_EXCEPTION);
380 CLEAR_PENDING_EXCEPTION;
381 {
382 EXCEPTION_MARK;
383 this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
384 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below
385 }
386 if (e->is_a(SystemDictionary::Error_klass())) {
387 THROW_OOP(e());
388 } else {
389 JavaCallArguments args(e);
390 THROW_ARG(vmSymbolHandles::java_lang_ExceptionInInitializerError(),
391 vmSymbolHandles::throwable_void_signature(),
392 &args);
393 }
394 }
395 }
398 // Note: implementation moved to static method to expose the this pointer.
399 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
400 instanceKlassHandle kh(THREAD, this->as_klassOop());
401 set_initialization_state_and_notify_impl(kh, state, CHECK);
402 }
404 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
405 ObjectLocker ol(this_oop, THREAD);
406 this_oop->set_init_state(state);
407 ol.notify_all(CHECK);
408 }
410 void instanceKlass::add_implementor(klassOop k) {
411 assert(Compile_lock->owned_by_self(), "");
412 // Filter out my subinterfaces.
413 // (Note: Interfaces are never on the subklass list.)
414 if (instanceKlass::cast(k)->is_interface()) return;
416 // Filter out subclasses whose supers already implement me.
417 // (Note: CHA must walk subclasses of direct implementors
418 // in order to locate indirect implementors.)
419 klassOop sk = instanceKlass::cast(k)->super();
420 if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop()))
421 // We only need to check one immediate superclass, since the
422 // implements_interface query looks at transitive_interfaces.
423 // Any supers of the super have the same (or fewer) transitive_interfaces.
424 return;
426 // Update number of implementors
427 int i = _nof_implementors++;
429 // Record this implementor, if there are not too many already
430 if (i < implementors_limit) {
431 assert(_implementors[i] == NULL, "should be exactly one implementor");
432 oop_store_without_check((oop*)&_implementors[i], k);
433 } else if (i == implementors_limit) {
434 // clear out the list on first overflow
435 for (int i2 = 0; i2 < implementors_limit; i2++)
436 oop_store_without_check((oop*)&_implementors[i2], NULL);
437 }
439 // The implementor also implements the transitive_interfaces
440 for (int index = 0; index < local_interfaces()->length(); index++) {
441 instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k);
442 }
443 }
445 void instanceKlass::init_implementor() {
446 for (int i = 0; i < implementors_limit; i++)
447 oop_store_without_check((oop*)&_implementors[i], NULL);
448 _nof_implementors = 0;
449 }
452 void instanceKlass::process_interfaces(Thread *thread) {
453 // link this class into the implementors list of every interface it implements
454 KlassHandle this_as_oop (thread, this->as_klassOop());
455 for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
456 assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass");
457 instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i)));
458 assert(interf->is_interface(), "expected interface");
459 interf->add_implementor(this_as_oop());
460 }
461 }
463 bool instanceKlass::can_be_primary_super_slow() const {
464 if (is_interface())
465 return false;
466 else
467 return Klass::can_be_primary_super_slow();
468 }
470 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
471 // The secondaries are the implemented interfaces.
472 instanceKlass* ik = instanceKlass::cast(as_klassOop());
473 objArrayHandle interfaces (THREAD, ik->transitive_interfaces());
474 int num_secondaries = num_extra_slots + interfaces->length();
475 if (num_secondaries == 0) {
476 return Universe::the_empty_system_obj_array();
477 } else if (num_extra_slots == 0) {
478 return interfaces();
479 } else {
480 // a mix of both
481 objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
482 for (int i = 0; i < interfaces->length(); i++) {
483 secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i));
484 }
485 return secondaries;
486 }
487 }
489 bool instanceKlass::compute_is_subtype_of(klassOop k) {
490 if (Klass::cast(k)->is_interface()) {
491 return implements_interface(k);
492 } else {
493 return Klass::compute_is_subtype_of(k);
494 }
495 }
497 bool instanceKlass::implements_interface(klassOop k) const {
498 if (as_klassOop() == k) return true;
499 assert(Klass::cast(k)->is_interface(), "should be an interface class");
500 for (int i = 0; i < transitive_interfaces()->length(); i++) {
501 if (transitive_interfaces()->obj_at(i) == k) {
502 return true;
503 }
504 }
505 return false;
506 }
508 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) {
509 if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
510 if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
511 report_java_out_of_memory("Requested array size exceeds VM limit");
512 THROW_OOP_0(Universe::out_of_memory_error_array_size());
513 }
514 int size = objArrayOopDesc::object_size(length);
515 klassOop ak = array_klass(n, CHECK_NULL);
516 KlassHandle h_ak (THREAD, ak);
517 objArrayOop o =
518 (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL);
519 return o;
520 }
522 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) {
523 if (TraceFinalizerRegistration) {
524 tty->print("Registered ");
525 i->print_value_on(tty);
526 tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i);
527 }
528 instanceHandle h_i(THREAD, i);
529 // Pass the handle as argument, JavaCalls::call expects oop as jobjects
530 JavaValue result(T_VOID);
531 JavaCallArguments args(h_i);
532 methodHandle mh (THREAD, Universe::finalizer_register_method());
533 JavaCalls::call(&result, mh, &args, CHECK_NULL);
534 return h_i();
535 }
537 instanceOop instanceKlass::allocate_instance(TRAPS) {
538 bool has_finalizer_flag = has_finalizer(); // Query before possible GC
539 int size = size_helper(); // Query before forming handle.
541 KlassHandle h_k(THREAD, as_klassOop());
543 instanceOop i;
545 i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
546 if (has_finalizer_flag && !RegisterFinalizersAtInit) {
547 i = register_finalizer(i, CHECK_NULL);
548 }
549 return i;
550 }
552 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) {
553 // Finalizer registration occurs in the Object.<init> constructor
554 // and constructors normally aren't run when allocating perm
555 // instances so simply disallow finalizable perm objects. This can
556 // be relaxed if a need for it is found.
557 assert(!has_finalizer(), "perm objects not allowed to have finalizers");
558 int size = size_helper(); // Query before forming handle.
559 KlassHandle h_k(THREAD, as_klassOop());
560 instanceOop i = (instanceOop)
561 CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
562 return i;
563 }
565 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
566 if (is_interface() || is_abstract()) {
567 ResourceMark rm(THREAD);
568 THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
569 : vmSymbols::java_lang_InstantiationException(), external_name());
570 }
571 if (as_klassOop() == SystemDictionary::Class_klass()) {
572 ResourceMark rm(THREAD);
573 THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
574 : vmSymbols::java_lang_IllegalAccessException(), external_name());
575 }
576 }
578 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
579 instanceKlassHandle this_oop(THREAD, as_klassOop());
580 return array_klass_impl(this_oop, or_null, n, THREAD);
581 }
583 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
584 if (this_oop->array_klasses() == NULL) {
585 if (or_null) return NULL;
587 ResourceMark rm;
588 JavaThread *jt = (JavaThread *)THREAD;
589 {
590 // Atomic creation of array_klasses
591 MutexLocker mc(Compile_lock, THREAD); // for vtables
592 MutexLocker ma(MultiArray_lock, THREAD);
594 // Check if update has already taken place
595 if (this_oop->array_klasses() == NULL) {
596 objArrayKlassKlass* oakk =
597 (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
599 klassOop k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);
600 this_oop->set_array_klasses(k);
601 }
602 }
603 }
604 // _this will always be set at this point
605 objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part();
606 if (or_null) {
607 return oak->array_klass_or_null(n);
608 }
609 return oak->array_klass(n, CHECK_NULL);
610 }
612 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) {
613 return array_klass_impl(or_null, 1, THREAD);
614 }
616 void instanceKlass::call_class_initializer(TRAPS) {
617 instanceKlassHandle ik (THREAD, as_klassOop());
618 call_class_initializer_impl(ik, THREAD);
619 }
621 static int call_class_initializer_impl_counter = 0; // for debugging
623 methodOop instanceKlass::class_initializer() {
624 return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
625 }
627 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
628 methodHandle h_method(THREAD, this_oop->class_initializer());
629 assert(!this_oop->is_initialized(), "we cannot initialize twice");
630 if (TraceClassInitialization) {
631 tty->print("%d Initializing ", call_class_initializer_impl_counter++);
632 this_oop->name()->print_value();
633 tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
634 }
635 if (h_method() != NULL) {
636 JavaCallArguments args; // No arguments
637 JavaValue result(T_VOID);
638 JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
639 }
640 }
643 void instanceKlass::mask_for(methodHandle method, int bci,
644 InterpreterOopMap* entry_for) {
645 // Dirty read, then double-check under a lock.
646 if (_oop_map_cache == NULL) {
647 // Otherwise, allocate a new one.
648 MutexLocker x(OopMapCacheAlloc_lock);
649 // First time use. Allocate a cache in C heap
650 if (_oop_map_cache == NULL) {
651 _oop_map_cache = new OopMapCache();
652 }
653 }
654 // _oop_map_cache is constant after init; lookup below does is own locking.
655 _oop_map_cache->lookup(method, bci, entry_for);
656 }
659 bool instanceKlass::find_local_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
660 const int n = fields()->length();
661 for (int i = 0; i < n; i += next_offset ) {
662 int name_index = fields()->ushort_at(i + name_index_offset);
663 int sig_index = fields()->ushort_at(i + signature_index_offset);
664 symbolOop f_name = constants()->symbol_at(name_index);
665 symbolOop f_sig = constants()->symbol_at(sig_index);
666 if (f_name == name && f_sig == sig) {
667 fd->initialize(as_klassOop(), i);
668 return true;
669 }
670 }
671 return false;
672 }
675 void instanceKlass::field_names_and_sigs_iterate(OopClosure* closure) {
676 const int n = fields()->length();
677 for (int i = 0; i < n; i += next_offset ) {
678 int name_index = fields()->ushort_at(i + name_index_offset);
679 symbolOop name = constants()->symbol_at(name_index);
680 closure->do_oop((oop*)&name);
682 int sig_index = fields()->ushort_at(i + signature_index_offset);
683 symbolOop sig = constants()->symbol_at(sig_index);
684 closure->do_oop((oop*)&sig);
685 }
686 }
689 klassOop instanceKlass::find_interface_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
690 const int n = local_interfaces()->length();
691 for (int i = 0; i < n; i++) {
692 klassOop intf1 = klassOop(local_interfaces()->obj_at(i));
693 assert(Klass::cast(intf1)->is_interface(), "just checking type");
694 // search for field in current interface
695 if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) {
696 assert(fd->is_static(), "interface field must be static");
697 return intf1;
698 }
699 // search for field in direct superinterfaces
700 klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd);
701 if (intf2 != NULL) return intf2;
702 }
703 // otherwise field lookup fails
704 return NULL;
705 }
708 klassOop instanceKlass::find_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
709 // search order according to newest JVM spec (5.4.3.2, p.167).
710 // 1) search for field in current klass
711 if (find_local_field(name, sig, fd)) {
712 return as_klassOop();
713 }
714 // 2) search for field recursively in direct superinterfaces
715 { klassOop intf = find_interface_field(name, sig, fd);
716 if (intf != NULL) return intf;
717 }
718 // 3) apply field lookup recursively if superclass exists
719 { klassOop supr = super();
720 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd);
721 }
722 // 4) otherwise field lookup fails
723 return NULL;
724 }
727 klassOop instanceKlass::find_field(symbolOop name, symbolOop sig, bool is_static, fieldDescriptor* fd) const {
728 // search order according to newest JVM spec (5.4.3.2, p.167).
729 // 1) search for field in current klass
730 if (find_local_field(name, sig, fd)) {
731 if (fd->is_static() == is_static) return as_klassOop();
732 }
733 // 2) search for field recursively in direct superinterfaces
734 if (is_static) {
735 klassOop intf = find_interface_field(name, sig, fd);
736 if (intf != NULL) return intf;
737 }
738 // 3) apply field lookup recursively if superclass exists
739 { klassOop supr = super();
740 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd);
741 }
742 // 4) otherwise field lookup fails
743 return NULL;
744 }
747 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
748 int length = fields()->length();
749 for (int i = 0; i < length; i += next_offset) {
750 if (offset_from_fields( i ) == offset) {
751 fd->initialize(as_klassOop(), i);
752 if (fd->is_static() == is_static) return true;
753 }
754 }
755 return false;
756 }
759 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
760 klassOop klass = as_klassOop();
761 while (klass != NULL) {
762 if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) {
763 return true;
764 }
765 klass = Klass::cast(klass)->super();
766 }
767 return false;
768 }
771 void instanceKlass::methods_do(void f(methodOop method)) {
772 int len = methods()->length();
773 for (int index = 0; index < len; index++) {
774 methodOop m = methodOop(methods()->obj_at(index));
775 assert(m->is_method(), "must be method");
776 f(m);
777 }
778 }
780 void instanceKlass::do_local_static_fields(FieldClosure* cl) {
781 fieldDescriptor fd;
782 int length = fields()->length();
783 for (int i = 0; i < length; i += next_offset) {
784 fd.initialize(as_klassOop(), i);
785 if (fd.is_static()) cl->do_field(&fd);
786 }
787 }
790 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
791 instanceKlassHandle h_this(THREAD, as_klassOop());
792 do_local_static_fields_impl(h_this, f, CHECK);
793 }
796 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
797 fieldDescriptor fd;
798 int length = this_oop->fields()->length();
799 for (int i = 0; i < length; i += next_offset) {
800 fd.initialize(this_oop(), i);
801 if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
802 }
803 }
806 static int compare_fields_by_offset(int* a, int* b) {
807 return a[0] - b[0];
808 }
810 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
811 instanceKlass* super = superklass();
812 if (super != NULL) {
813 super->do_nonstatic_fields(cl);
814 }
815 fieldDescriptor fd;
816 int length = fields()->length();
817 // In DebugInfo nonstatic fields are sorted by offset.
818 int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
819 int j = 0;
820 for (int i = 0; i < length; i += next_offset) {
821 fd.initialize(as_klassOop(), i);
822 if (!fd.is_static()) {
823 fields_sorted[j + 0] = fd.offset();
824 fields_sorted[j + 1] = i;
825 j += 2;
826 }
827 }
828 if (j > 0) {
829 length = j;
830 // _sort_Fn is defined in growableArray.hpp.
831 qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
832 for (int i = 0; i < length; i += 2) {
833 fd.initialize(as_klassOop(), fields_sorted[i + 1]);
834 assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
835 cl->do_field(&fd);
836 }
837 }
838 FREE_C_HEAP_ARRAY(int, fields_sorted);
839 }
842 void instanceKlass::array_klasses_do(void f(klassOop k)) {
843 if (array_klasses() != NULL)
844 arrayKlass::cast(array_klasses())->array_klasses_do(f);
845 }
848 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
849 f(as_klassOop());
850 array_klasses_do(f);
851 }
853 #ifdef ASSERT
854 static int linear_search(objArrayOop methods, symbolOop name, symbolOop signature) {
855 int len = methods->length();
856 for (int index = 0; index < len; index++) {
857 methodOop m = (methodOop)(methods->obj_at(index));
858 assert(m->is_method(), "must be method");
859 if (m->signature() == signature && m->name() == name) {
860 return index;
861 }
862 }
863 return -1;
864 }
865 #endif
867 methodOop instanceKlass::find_method(symbolOop name, symbolOop signature) const {
868 return instanceKlass::find_method(methods(), name, signature);
869 }
871 methodOop instanceKlass::find_method(objArrayOop methods, symbolOop name, symbolOop signature) {
872 int len = methods->length();
873 // methods are sorted, so do binary search
874 int l = 0;
875 int h = len - 1;
876 while (l <= h) {
877 int mid = (l + h) >> 1;
878 methodOop m = (methodOop)methods->obj_at(mid);
879 assert(m->is_method(), "must be method");
880 int res = m->name()->fast_compare(name);
881 if (res == 0) {
882 // found matching name; do linear search to find matching signature
883 // first, quick check for common case
884 if (m->signature() == signature) return m;
885 // search downwards through overloaded methods
886 int i;
887 for (i = mid - 1; i >= l; i--) {
888 methodOop m = (methodOop)methods->obj_at(i);
889 assert(m->is_method(), "must be method");
890 if (m->name() != name) break;
891 if (m->signature() == signature) return m;
892 }
893 // search upwards
894 for (i = mid + 1; i <= h; i++) {
895 methodOop m = (methodOop)methods->obj_at(i);
896 assert(m->is_method(), "must be method");
897 if (m->name() != name) break;
898 if (m->signature() == signature) return m;
899 }
900 // not found
901 #ifdef ASSERT
902 int index = linear_search(methods, name, signature);
903 if (index != -1) fatal1("binary search bug: should have found entry %d", index);
904 #endif
905 return NULL;
906 } else if (res < 0) {
907 l = mid + 1;
908 } else {
909 h = mid - 1;
910 }
911 }
912 #ifdef ASSERT
913 int index = linear_search(methods, name, signature);
914 if (index != -1) fatal1("binary search bug: should have found entry %d", index);
915 #endif
916 return NULL;
917 }
919 methodOop instanceKlass::uncached_lookup_method(symbolOop name, symbolOop signature) const {
920 klassOop klass = as_klassOop();
921 while (klass != NULL) {
922 methodOop method = instanceKlass::cast(klass)->find_method(name, signature);
923 if (method != NULL) return method;
924 klass = instanceKlass::cast(klass)->super();
925 }
926 return NULL;
927 }
929 // lookup a method in all the interfaces that this class implements
930 methodOop instanceKlass::lookup_method_in_all_interfaces(symbolOop name,
931 symbolOop signature) const {
932 objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces();
933 int num_ifs = all_ifs->length();
934 instanceKlass *ik = NULL;
935 for (int i = 0; i < num_ifs; i++) {
936 ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i)));
937 methodOop m = ik->lookup_method(name, signature);
938 if (m != NULL) {
939 return m;
940 }
941 }
942 return NULL;
943 }
945 /* jni_id_for_impl for jfieldIds only */
946 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
947 MutexLocker ml(JfieldIdCreation_lock);
948 // Retry lookup after we got the lock
949 JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
950 if (probe == NULL) {
951 // Slow case, allocate new static field identifier
952 probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids());
953 this_oop->set_jni_ids(probe);
954 }
955 return probe;
956 }
959 /* jni_id_for for jfieldIds only */
960 JNIid* instanceKlass::jni_id_for(int offset) {
961 JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset);
962 if (probe == NULL) {
963 probe = jni_id_for_impl(this->as_klassOop(), offset);
964 }
965 return probe;
966 }
969 // Lookup or create a jmethodID.
970 // This code is called by the VMThread and JavaThreads so the
971 // locking has to be done very carefully to avoid deadlocks
972 // and/or other cache consistency problems.
973 //
974 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
975 size_t idnum = (size_t)method_h->method_idnum();
976 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
977 size_t length = 0;
978 jmethodID id = NULL;
980 // We use a double-check locking idiom here because this cache is
981 // performance sensitive. In the normal system, this cache only
982 // transitions from NULL to non-NULL which is safe because we use
983 // release_set_methods_jmethod_ids() to advertise the new cache.
984 // A partially constructed cache should never be seen by a racing
985 // thread. We also use release_store_ptr() to save a new jmethodID
986 // in the cache so a partially constructed jmethodID should never be
987 // seen either. Cache reads of existing jmethodIDs proceed without a
988 // lock, but cache writes of a new jmethodID requires uniqueness and
989 // creation of the cache itself requires no leaks so a lock is
990 // generally acquired in those two cases.
991 //
992 // If the RedefineClasses() API has been used, then this cache can
993 // grow and we'll have transitions from non-NULL to bigger non-NULL.
994 // Cache creation requires no leaks and we require safety between all
995 // cache accesses and freeing of the old cache so a lock is generally
996 // acquired when the RedefineClasses() API has been used.
998 if (jmeths != NULL) {
999 // the cache already exists
1000 if (!ik_h->idnum_can_increment()) {
1001 // the cache can't grow so we can just get the current values
1002 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1003 } else {
1004 // cache can grow so we have to be more careful
1005 if (Threads::number_of_threads() == 0 ||
1006 SafepointSynchronize::is_at_safepoint()) {
1007 // we're single threaded or at a safepoint - no locking needed
1008 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1009 } else {
1010 MutexLocker ml(JmethodIdCreation_lock);
1011 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1012 }
1013 }
1014 }
1015 // implied else:
1016 // we need to allocate a cache so default length and id values are good
1018 if (jmeths == NULL || // no cache yet
1019 length <= idnum || // cache is too short
1020 id == NULL) { // cache doesn't contain entry
1022 // This function can be called by the VMThread so we have to do all
1023 // things that might block on a safepoint before grabbing the lock.
1024 // Otherwise, we can deadlock with the VMThread or have a cache
1025 // consistency issue. These vars keep track of what we might have
1026 // to free after the lock is dropped.
1027 jmethodID to_dealloc_id = NULL;
1028 jmethodID* to_dealloc_jmeths = NULL;
1030 // may not allocate new_jmeths or use it if we allocate it
1031 jmethodID* new_jmeths = NULL;
1032 if (length <= idnum) {
1033 // allocate a new cache that might be used
1034 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
1035 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
1036 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
1037 // cache size is stored in element[0], other elements offset by one
1038 new_jmeths[0] = (jmethodID)size;
1039 }
1041 // allocate a new jmethodID that might be used
1042 jmethodID new_id = NULL;
1043 if (method_h->is_old() && !method_h->is_obsolete()) {
1044 // The method passed in is old (but not obsolete), we need to use the current version
1045 methodOop current_method = ik_h->method_with_idnum((int)idnum);
1046 assert(current_method != NULL, "old and but not obsolete, so should exist");
1047 methodHandle current_method_h(current_method == NULL? method_h() : current_method);
1048 new_id = JNIHandles::make_jmethod_id(current_method_h);
1049 } else {
1050 // It is the current version of the method or an obsolete method,
1051 // use the version passed in
1052 new_id = JNIHandles::make_jmethod_id(method_h);
1053 }
1055 if (Threads::number_of_threads() == 0 ||
1056 SafepointSynchronize::is_at_safepoint()) {
1057 // we're single threaded or at a safepoint - no locking needed
1058 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1059 &to_dealloc_id, &to_dealloc_jmeths);
1060 } else {
1061 MutexLocker ml(JmethodIdCreation_lock);
1062 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1063 &to_dealloc_id, &to_dealloc_jmeths);
1064 }
1066 // The lock has been dropped so we can free resources.
1067 // Free up either the old cache or the new cache if we allocated one.
1068 if (to_dealloc_jmeths != NULL) {
1069 FreeHeap(to_dealloc_jmeths);
1070 }
1071 // free up the new ID since it wasn't needed
1072 if (to_dealloc_id != NULL) {
1073 JNIHandles::destroy_jmethod_id(to_dealloc_id);
1074 }
1075 }
1076 return id;
1077 }
1080 // Common code to fetch the jmethodID from the cache or update the
1081 // cache with the new jmethodID. This function should never do anything
1082 // that causes the caller to go to a safepoint or we can deadlock with
1083 // the VMThread or have cache consistency issues.
1084 //
1085 jmethodID instanceKlass::get_jmethod_id_fetch_or_update(
1086 instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
1087 jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
1088 jmethodID** to_dealloc_jmeths_p) {
1089 assert(new_id != NULL, "sanity check");
1090 assert(to_dealloc_id_p != NULL, "sanity check");
1091 assert(to_dealloc_jmeths_p != NULL, "sanity check");
1092 assert(Threads::number_of_threads() == 0 ||
1093 SafepointSynchronize::is_at_safepoint() ||
1094 JmethodIdCreation_lock->owned_by_self(), "sanity check");
1096 // reacquire the cache - we are locked, single threaded or at a safepoint
1097 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1098 jmethodID id = NULL;
1099 size_t length = 0;
1101 if (jmeths == NULL || // no cache yet
1102 (length = (size_t)jmeths[0]) <= idnum) { // cache is too short
1103 if (jmeths != NULL) {
1104 // copy any existing entries from the old cache
1105 for (size_t index = 0; index < length; index++) {
1106 new_jmeths[index+1] = jmeths[index+1];
1107 }
1108 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete
1109 }
1110 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
1111 } else {
1112 // fetch jmethodID (if any) from the existing cache
1113 id = jmeths[idnum+1];
1114 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete
1115 }
1116 if (id == NULL) {
1117 // No matching jmethodID in the existing cache or we have a new
1118 // cache or we just grew the cache. This cache write is done here
1119 // by the first thread to win the foot race because a jmethodID
1120 // needs to be unique once it is generally available.
1121 id = new_id;
1123 // The jmethodID cache can be read while unlocked so we have to
1124 // make sure the new jmethodID is complete before installing it
1125 // in the cache.
1126 OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
1127 } else {
1128 *to_dealloc_id_p = new_id; // save new id for later delete
1129 }
1130 return id;
1131 }
1134 // Common code to get the jmethodID cache length and the jmethodID
1135 // value at index idnum if there is one.
1136 //
1137 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1138 size_t idnum, size_t *length_p, jmethodID* id_p) {
1139 assert(cache != NULL, "sanity check");
1140 assert(length_p != NULL, "sanity check");
1141 assert(id_p != NULL, "sanity check");
1143 // cache size is stored in element[0], other elements offset by one
1144 *length_p = (size_t)cache[0];
1145 if (*length_p <= idnum) { // cache is too short
1146 *id_p = NULL;
1147 } else {
1148 *id_p = cache[idnum+1]; // fetch jmethodID (if any)
1149 }
1150 }
1153 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles
1154 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
1155 size_t idnum = (size_t)method->method_idnum();
1156 jmethodID* jmeths = methods_jmethod_ids_acquire();
1157 size_t length; // length assigned as debugging crumb
1158 jmethodID id = NULL;
1159 if (jmeths != NULL && // If there is a cache
1160 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough,
1161 id = jmeths[idnum+1]; // Look up the id (may be NULL)
1162 }
1163 return id;
1164 }
1167 // Cache an itable index
1168 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
1169 int* indices = methods_cached_itable_indices_acquire();
1170 int* to_dealloc_indices = NULL;
1172 // We use a double-check locking idiom here because this cache is
1173 // performance sensitive. In the normal system, this cache only
1174 // transitions from NULL to non-NULL which is safe because we use
1175 // release_set_methods_cached_itable_indices() to advertise the
1176 // new cache. A partially constructed cache should never be seen
1177 // by a racing thread. Cache reads and writes proceed without a
1178 // lock, but creation of the cache itself requires no leaks so a
1179 // lock is generally acquired in that case.
1180 //
1181 // If the RedefineClasses() API has been used, then this cache can
1182 // grow and we'll have transitions from non-NULL to bigger non-NULL.
1183 // Cache creation requires no leaks and we require safety between all
1184 // cache accesses and freeing of the old cache so a lock is generally
1185 // acquired when the RedefineClasses() API has been used.
1187 if (indices == NULL || idnum_can_increment()) {
1188 // we need a cache or the cache can grow
1189 MutexLocker ml(JNICachedItableIndex_lock);
1190 // reacquire the cache to see if another thread already did the work
1191 indices = methods_cached_itable_indices_acquire();
1192 size_t length = 0;
1193 // cache size is stored in element[0], other elements offset by one
1194 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
1195 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
1196 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
1197 new_indices[0] = (int)size;
1198 // copy any existing entries
1199 size_t i;
1200 for (i = 0; i < length; i++) {
1201 new_indices[i+1] = indices[i+1];
1202 }
1203 // Set all the rest to -1
1204 for (i = length; i < size; i++) {
1205 new_indices[i+1] = -1;
1206 }
1207 if (indices != NULL) {
1208 // We have an old cache to delete so save it for after we
1209 // drop the lock.
1210 to_dealloc_indices = indices;
1211 }
1212 release_set_methods_cached_itable_indices(indices = new_indices);
1213 }
1215 if (idnum_can_increment()) {
1216 // this cache can grow so we have to write to it safely
1217 indices[idnum+1] = index;
1218 }
1219 } else {
1220 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1221 }
1223 if (!idnum_can_increment()) {
1224 // The cache cannot grow and this JNI itable index value does not
1225 // have to be unique like a jmethodID. If there is a race to set it,
1226 // it doesn't matter.
1227 indices[idnum+1] = index;
1228 }
1230 if (to_dealloc_indices != NULL) {
1231 // we allocated a new cache so free the old one
1232 FreeHeap(to_dealloc_indices);
1233 }
1234 }
1237 // Retrieve a cached itable index
1238 int instanceKlass::cached_itable_index(size_t idnum) {
1239 int* indices = methods_cached_itable_indices_acquire();
1240 if (indices != NULL && ((size_t)indices[0]) > idnum) {
1241 // indices exist and are long enough, retrieve possible cached
1242 return indices[idnum+1];
1243 }
1244 return -1;
1245 }
1248 //
1249 // nmethodBucket is used to record dependent nmethods for
1250 // deoptimization. nmethod dependencies are actually <klass, method>
1251 // pairs but we really only care about the klass part for purposes of
1252 // finding nmethods which might need to be deoptimized. Instead of
1253 // recording the method, a count of how many times a particular nmethod
1254 // was recorded is kept. This ensures that any recording errors are
1255 // noticed since an nmethod should be removed as many times are it's
1256 // added.
1257 //
1258 class nmethodBucket {
1259 private:
1260 nmethod* _nmethod;
1261 int _count;
1262 nmethodBucket* _next;
1264 public:
1265 nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
1266 _nmethod = nmethod;
1267 _next = next;
1268 _count = 1;
1269 }
1270 int count() { return _count; }
1271 int increment() { _count += 1; return _count; }
1272 int decrement() { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
1273 nmethodBucket* next() { return _next; }
1274 void set_next(nmethodBucket* b) { _next = b; }
1275 nmethod* get_nmethod() { return _nmethod; }
1276 };
1279 //
1280 // Walk the list of dependent nmethods searching for nmethods which
1281 // are dependent on the klassOop that was passed in and mark them for
1282 // deoptimization. Returns the number of nmethods found.
1283 //
1284 int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
1285 assert_locked_or_safepoint(CodeCache_lock);
1286 int found = 0;
1287 nmethodBucket* b = _dependencies;
1288 while (b != NULL) {
1289 nmethod* nm = b->get_nmethod();
1290 // since dependencies aren't removed until an nmethod becomes a zombie,
1291 // the dependency list may contain nmethods which aren't alive.
1292 if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
1293 if (TraceDependencies) {
1294 ResourceMark rm;
1295 tty->print_cr("Marked for deoptimization");
1296 tty->print_cr(" context = %s", this->external_name());
1297 changes.print();
1298 nm->print();
1299 nm->print_dependencies();
1300 }
1301 nm->mark_for_deoptimization();
1302 found++;
1303 }
1304 b = b->next();
1305 }
1306 return found;
1307 }
1310 //
1311 // Add an nmethodBucket to the list of dependencies for this nmethod.
1312 // It's possible that an nmethod has multiple dependencies on this klass
1313 // so a count is kept for each bucket to guarantee that creation and
1314 // deletion of dependencies is consistent.
1315 //
1316 void instanceKlass::add_dependent_nmethod(nmethod* nm) {
1317 assert_locked_or_safepoint(CodeCache_lock);
1318 nmethodBucket* b = _dependencies;
1319 nmethodBucket* last = NULL;
1320 while (b != NULL) {
1321 if (nm == b->get_nmethod()) {
1322 b->increment();
1323 return;
1324 }
1325 b = b->next();
1326 }
1327 _dependencies = new nmethodBucket(nm, _dependencies);
1328 }
1331 //
1332 // Decrement count of the nmethod in the dependency list and remove
1333 // the bucket competely when the count goes to 0. This method must
1334 // find a corresponding bucket otherwise there's a bug in the
1335 // recording of dependecies.
1336 //
1337 void instanceKlass::remove_dependent_nmethod(nmethod* nm) {
1338 assert_locked_or_safepoint(CodeCache_lock);
1339 nmethodBucket* b = _dependencies;
1340 nmethodBucket* last = NULL;
1341 while (b != NULL) {
1342 if (nm == b->get_nmethod()) {
1343 if (b->decrement() == 0) {
1344 if (last == NULL) {
1345 _dependencies = b->next();
1346 } else {
1347 last->set_next(b->next());
1348 }
1349 delete b;
1350 }
1351 return;
1352 }
1353 last = b;
1354 b = b->next();
1355 }
1356 #ifdef ASSERT
1357 tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
1358 nm->print();
1359 #endif // ASSERT
1360 ShouldNotReachHere();
1361 }
1364 #ifndef PRODUCT
1365 void instanceKlass::print_dependent_nmethods(bool verbose) {
1366 nmethodBucket* b = _dependencies;
1367 int idx = 0;
1368 while (b != NULL) {
1369 nmethod* nm = b->get_nmethod();
1370 tty->print("[%d] count=%d { ", idx++, b->count());
1371 if (!verbose) {
1372 nm->print_on(tty, "nmethod");
1373 tty->print_cr(" } ");
1374 } else {
1375 nm->print();
1376 nm->print_dependencies();
1377 tty->print_cr("--- } ");
1378 }
1379 b = b->next();
1380 }
1381 }
1384 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
1385 nmethodBucket* b = _dependencies;
1386 while (b != NULL) {
1387 if (nm == b->get_nmethod()) {
1388 return true;
1389 }
1390 b = b->next();
1391 }
1392 return false;
1393 }
1394 #endif //PRODUCT
1397 #ifdef ASSERT
1398 template <class T> void assert_is_in(T *p) {
1399 T heap_oop = oopDesc::load_heap_oop(p);
1400 if (!oopDesc::is_null(heap_oop)) {
1401 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1402 assert(Universe::heap()->is_in(o), "should be in heap");
1403 }
1404 }
1405 template <class T> void assert_is_in_closed_subset(T *p) {
1406 T heap_oop = oopDesc::load_heap_oop(p);
1407 if (!oopDesc::is_null(heap_oop)) {
1408 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1409 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
1410 }
1411 }
1412 template <class T> void assert_is_in_reserved(T *p) {
1413 T heap_oop = oopDesc::load_heap_oop(p);
1414 if (!oopDesc::is_null(heap_oop)) {
1415 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1416 assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
1417 }
1418 }
1419 template <class T> void assert_nothing(T *p) {}
1421 #else
1422 template <class T> void assert_is_in(T *p) {}
1423 template <class T> void assert_is_in_closed_subset(T *p) {}
1424 template <class T> void assert_is_in_reserved(T *p) {}
1425 template <class T> void assert_nothing(T *p) {}
1426 #endif // ASSERT
1428 //
1429 // Macros that iterate over areas of oops which are specialized on type of
1430 // oop pointer either narrow or wide, depending on UseCompressedOops
1431 //
1432 // Parameters are:
1433 // T - type of oop to point to (either oop or narrowOop)
1434 // start_p - starting pointer for region to iterate over
1435 // count - number of oops or narrowOops to iterate over
1436 // do_oop - action to perform on each oop (it's arbitrary C code which
1437 // makes it more efficient to put in a macro rather than making
1438 // it a template function)
1439 // assert_fn - assert function which is template function because performance
1440 // doesn't matter when enabled.
1441 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
1442 T, start_p, count, do_oop, \
1443 assert_fn) \
1444 { \
1445 T* p = (T*)(start_p); \
1446 T* const end = p + (count); \
1447 while (p < end) { \
1448 (assert_fn)(p); \
1449 do_oop; \
1450 ++p; \
1451 } \
1452 }
1454 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
1455 T, start_p, count, do_oop, \
1456 assert_fn) \
1457 { \
1458 T* const start = (T*)(start_p); \
1459 T* p = start + (count); \
1460 while (start < p) { \
1461 --p; \
1462 (assert_fn)(p); \
1463 do_oop; \
1464 } \
1465 }
1467 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
1468 T, start_p, count, low, high, \
1469 do_oop, assert_fn) \
1470 { \
1471 T* const l = (T*)(low); \
1472 T* const h = (T*)(high); \
1473 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
1474 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
1475 "bounded region must be properly aligned"); \
1476 T* p = (T*)(start_p); \
1477 T* end = p + (count); \
1478 if (p < l) p = l; \
1479 if (end > h) end = h; \
1480 while (p < end) { \
1481 (assert_fn)(p); \
1482 do_oop; \
1483 ++p; \
1484 } \
1485 }
1488 // The following macros call specialized macros, passing either oop or
1489 // narrowOop as the specialization type. These test the UseCompressedOops
1490 // flag.
1491 #define InstanceKlass_OOP_ITERATE(start_p, count, \
1492 do_oop, assert_fn) \
1493 { \
1494 if (UseCompressedOops) { \
1495 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1496 start_p, count, \
1497 do_oop, assert_fn) \
1498 } else { \
1499 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1500 start_p, count, \
1501 do_oop, assert_fn) \
1502 } \
1503 }
1505 #define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
1506 do_oop, assert_fn) \
1507 { \
1508 if (UseCompressedOops) { \
1509 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1510 start_p, count, \
1511 low, high, \
1512 do_oop, assert_fn) \
1513 } else { \
1514 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1515 start_p, count, \
1516 low, high, \
1517 do_oop, assert_fn) \
1518 } \
1519 }
1521 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \
1522 { \
1523 /* Compute oopmap block range. The common case \
1524 is nonstatic_oop_map_size == 1. */ \
1525 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1526 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
1527 if (UseCompressedOops) { \
1528 while (map < end_map) { \
1529 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1530 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1531 do_oop, assert_fn) \
1532 ++map; \
1533 } \
1534 } else { \
1535 while (map < end_map) { \
1536 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1537 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1538 do_oop, assert_fn) \
1539 ++map; \
1540 } \
1541 } \
1542 }
1544 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \
1545 { \
1546 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \
1547 OopMapBlock* map = start_map + nonstatic_oop_map_count(); \
1548 if (UseCompressedOops) { \
1549 while (start_map < map) { \
1550 --map; \
1551 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \
1552 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1553 do_oop, assert_fn) \
1554 } \
1555 } else { \
1556 while (start_map < map) { \
1557 --map; \
1558 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \
1559 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1560 do_oop, assert_fn) \
1561 } \
1562 } \
1563 }
1565 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \
1566 assert_fn) \
1567 { \
1568 /* Compute oopmap block range. The common case is \
1569 nonstatic_oop_map_size == 1, so we accept the \
1570 usually non-existent extra overhead of examining \
1571 all the maps. */ \
1572 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1573 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
1574 if (UseCompressedOops) { \
1575 while (map < end_map) { \
1576 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1577 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1578 low, high, \
1579 do_oop, assert_fn) \
1580 ++map; \
1581 } \
1582 } else { \
1583 while (map < end_map) { \
1584 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1585 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1586 low, high, \
1587 do_oop, assert_fn) \
1588 ++map; \
1589 } \
1590 } \
1591 }
1593 void instanceKlass::follow_static_fields() {
1594 InstanceKlass_OOP_ITERATE( \
1595 start_of_static_fields(), static_oop_field_size(), \
1596 MarkSweep::mark_and_push(p), \
1597 assert_is_in_closed_subset)
1598 }
1600 #ifndef SERIALGC
1601 void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
1602 InstanceKlass_OOP_ITERATE( \
1603 start_of_static_fields(), static_oop_field_size(), \
1604 PSParallelCompact::mark_and_push(cm, p), \
1605 assert_is_in)
1606 }
1607 #endif // SERIALGC
1609 void instanceKlass::adjust_static_fields() {
1610 InstanceKlass_OOP_ITERATE( \
1611 start_of_static_fields(), static_oop_field_size(), \
1612 MarkSweep::adjust_pointer(p), \
1613 assert_nothing)
1614 }
1616 #ifndef SERIALGC
1617 void instanceKlass::update_static_fields() {
1618 InstanceKlass_OOP_ITERATE( \
1619 start_of_static_fields(), static_oop_field_size(), \
1620 PSParallelCompact::adjust_pointer(p), \
1621 assert_nothing)
1622 }
1624 void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
1625 InstanceKlass_BOUNDED_OOP_ITERATE( \
1626 start_of_static_fields(), static_oop_field_size(), \
1627 beg_addr, end_addr, \
1628 PSParallelCompact::adjust_pointer(p), \
1629 assert_nothing )
1630 }
1631 #endif // SERIALGC
1633 void instanceKlass::oop_follow_contents(oop obj) {
1634 assert(obj != NULL, "can't follow the content of NULL object");
1635 obj->follow_header();
1636 InstanceKlass_OOP_MAP_ITERATE( \
1637 obj, \
1638 MarkSweep::mark_and_push(p), \
1639 assert_is_in_closed_subset)
1640 }
1642 #ifndef SERIALGC
1643 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
1644 oop obj) {
1645 assert(obj != NULL, "can't follow the content of NULL object");
1646 obj->follow_header(cm);
1647 InstanceKlass_OOP_MAP_ITERATE( \
1648 obj, \
1649 PSParallelCompact::mark_and_push(cm, p), \
1650 assert_is_in)
1651 }
1652 #endif // SERIALGC
1654 // closure's do_header() method dicates whether the given closure should be
1655 // applied to the klass ptr in the object header.
1657 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
1658 \
1659 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
1660 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1661 /* header */ \
1662 if (closure->do_header()) { \
1663 obj->oop_iterate_header(closure); \
1664 } \
1665 InstanceKlass_OOP_MAP_ITERATE( \
1666 obj, \
1667 SpecializationStats:: \
1668 record_do_oop_call##nv_suffix(SpecializationStats::ik); \
1669 (closure)->do_oop##nv_suffix(p), \
1670 assert_is_in_closed_subset) \
1671 return size_helper(); \
1672 }
1674 #ifndef SERIALGC
1675 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
1676 \
1677 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \
1678 OopClosureType* closure) { \
1679 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1680 /* header */ \
1681 if (closure->do_header()) { \
1682 obj->oop_iterate_header(closure); \
1683 } \
1684 /* instance variables */ \
1685 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1686 obj, \
1687 SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
1688 (closure)->do_oop##nv_suffix(p), \
1689 assert_is_in_closed_subset) \
1690 return size_helper(); \
1691 }
1692 #endif // !SERIALGC
1694 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1695 \
1696 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
1697 OopClosureType* closure, \
1698 MemRegion mr) { \
1699 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1700 if (closure->do_header()) { \
1701 obj->oop_iterate_header(closure, mr); \
1702 } \
1703 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1704 obj, mr.start(), mr.end(), \
1705 (closure)->do_oop##nv_suffix(p), \
1706 assert_is_in_closed_subset) \
1707 return size_helper(); \
1708 }
1710 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1711 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1712 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1713 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1714 #ifndef SERIALGC
1715 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1716 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1717 #endif // !SERIALGC
1719 void instanceKlass::iterate_static_fields(OopClosure* closure) {
1720 InstanceKlass_OOP_ITERATE( \
1721 start_of_static_fields(), static_oop_field_size(), \
1722 closure->do_oop(p), \
1723 assert_is_in_reserved)
1724 }
1726 void instanceKlass::iterate_static_fields(OopClosure* closure,
1727 MemRegion mr) {
1728 InstanceKlass_BOUNDED_OOP_ITERATE( \
1729 start_of_static_fields(), static_oop_field_size(), \
1730 mr.start(), mr.end(), \
1731 (closure)->do_oop_v(p), \
1732 assert_is_in_closed_subset)
1733 }
1735 int instanceKlass::oop_adjust_pointers(oop obj) {
1736 int size = size_helper();
1737 InstanceKlass_OOP_MAP_ITERATE( \
1738 obj, \
1739 MarkSweep::adjust_pointer(p), \
1740 assert_is_in)
1741 obj->adjust_header();
1742 return size;
1743 }
1745 #ifndef SERIALGC
1746 void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
1747 assert(!pm->depth_first(), "invariant");
1748 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1749 obj, \
1750 if (PSScavenge::should_scavenge(p)) { \
1751 pm->claim_or_forward_breadth(p); \
1752 }, \
1753 assert_nothing )
1754 }
1756 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
1757 assert(pm->depth_first(), "invariant");
1758 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1759 obj, \
1760 if (PSScavenge::should_scavenge(p)) { \
1761 pm->claim_or_forward_depth(p); \
1762 }, \
1763 assert_nothing )
1764 }
1766 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
1767 InstanceKlass_OOP_MAP_ITERATE( \
1768 obj, \
1769 PSParallelCompact::adjust_pointer(p), \
1770 assert_nothing)
1771 return size_helper();
1772 }
1774 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
1775 HeapWord* beg_addr, HeapWord* end_addr) {
1776 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1777 obj, beg_addr, end_addr, \
1778 PSParallelCompact::adjust_pointer(p), \
1779 assert_nothing)
1780 return size_helper();
1781 }
1783 void instanceKlass::copy_static_fields(PSPromotionManager* pm) {
1784 assert(!pm->depth_first(), "invariant");
1785 InstanceKlass_OOP_ITERATE( \
1786 start_of_static_fields(), static_oop_field_size(), \
1787 if (PSScavenge::should_scavenge(p)) { \
1788 pm->claim_or_forward_breadth(p); \
1789 }, \
1790 assert_nothing )
1791 }
1793 void instanceKlass::push_static_fields(PSPromotionManager* pm) {
1794 assert(pm->depth_first(), "invariant");
1795 InstanceKlass_OOP_ITERATE( \
1796 start_of_static_fields(), static_oop_field_size(), \
1797 if (PSScavenge::should_scavenge(p)) { \
1798 pm->claim_or_forward_depth(p); \
1799 }, \
1800 assert_nothing )
1801 }
1803 void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
1804 InstanceKlass_OOP_ITERATE( \
1805 start_of_static_fields(), static_oop_field_size(), \
1806 PSParallelCompact::adjust_pointer(p), \
1807 assert_is_in)
1808 }
1809 #endif // SERIALGC
1811 // This klass is alive but the implementor link is not followed/updated.
1812 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
1814 void instanceKlass::follow_weak_klass_links(
1815 BoolObjectClosure* is_alive, OopClosure* keep_alive) {
1816 assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
1817 if (ClassUnloading) {
1818 for (int i = 0; i < implementors_limit; i++) {
1819 klassOop impl = _implementors[i];
1820 if (impl == NULL) break; // no more in the list
1821 if (!is_alive->do_object_b(impl)) {
1822 // remove this guy from the list by overwriting him with the tail
1823 int lasti = --_nof_implementors;
1824 assert(lasti >= i && lasti < implementors_limit, "just checking");
1825 _implementors[i] = _implementors[lasti];
1826 _implementors[lasti] = NULL;
1827 --i; // rerun the loop at this index
1828 }
1829 }
1830 } else {
1831 for (int i = 0; i < implementors_limit; i++) {
1832 keep_alive->do_oop(&adr_implementors()[i]);
1833 }
1834 }
1835 Klass::follow_weak_klass_links(is_alive, keep_alive);
1836 }
1838 void instanceKlass::remove_unshareable_info() {
1839 Klass::remove_unshareable_info();
1840 init_implementor();
1841 }
1843 static void clear_all_breakpoints(methodOop m) {
1844 m->clear_all_breakpoints();
1845 }
1847 void instanceKlass::release_C_heap_structures() {
1848 // Deallocate oop map cache
1849 if (_oop_map_cache != NULL) {
1850 delete _oop_map_cache;
1851 _oop_map_cache = NULL;
1852 }
1854 // Deallocate JNI identifiers for jfieldIDs
1855 JNIid::deallocate(jni_ids());
1856 set_jni_ids(NULL);
1858 jmethodID* jmeths = methods_jmethod_ids_acquire();
1859 if (jmeths != (jmethodID*)NULL) {
1860 release_set_methods_jmethod_ids(NULL);
1861 FreeHeap(jmeths);
1862 }
1864 int* indices = methods_cached_itable_indices_acquire();
1865 if (indices != (int*)NULL) {
1866 release_set_methods_cached_itable_indices(NULL);
1867 FreeHeap(indices);
1868 }
1870 // release dependencies
1871 nmethodBucket* b = _dependencies;
1872 _dependencies = NULL;
1873 while (b != NULL) {
1874 nmethodBucket* next = b->next();
1875 delete b;
1876 b = next;
1877 }
1879 // Deallocate breakpoint records
1880 if (breakpoints() != 0x0) {
1881 methods_do(clear_all_breakpoints);
1882 assert(breakpoints() == 0x0, "should have cleared breakpoints");
1883 }
1885 // deallocate information about previous versions
1886 if (_previous_versions != NULL) {
1887 for (int i = _previous_versions->length() - 1; i >= 0; i--) {
1888 PreviousVersionNode * pv_node = _previous_versions->at(i);
1889 delete pv_node;
1890 }
1891 delete _previous_versions;
1892 _previous_versions = NULL;
1893 }
1895 // deallocate the cached class file
1896 if (_cached_class_file_bytes != NULL) {
1897 os::free(_cached_class_file_bytes);
1898 _cached_class_file_bytes = NULL;
1899 _cached_class_file_len = 0;
1900 }
1901 }
1903 const char* instanceKlass::signature_name() const {
1904 const char* src = (const char*) (name()->as_C_string());
1905 const int src_length = (int)strlen(src);
1906 char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
1907 int src_index = 0;
1908 int dest_index = 0;
1909 dest[dest_index++] = 'L';
1910 while (src_index < src_length) {
1911 dest[dest_index++] = src[src_index++];
1912 }
1913 dest[dest_index++] = ';';
1914 dest[dest_index] = '\0';
1915 return dest;
1916 }
1918 // different verisons of is_same_class_package
1919 bool instanceKlass::is_same_class_package(klassOop class2) {
1920 klassOop class1 = as_klassOop();
1921 oop classloader1 = instanceKlass::cast(class1)->class_loader();
1922 symbolOop classname1 = Klass::cast(class1)->name();
1924 if (Klass::cast(class2)->oop_is_objArray()) {
1925 class2 = objArrayKlass::cast(class2)->bottom_klass();
1926 }
1927 oop classloader2;
1928 if (Klass::cast(class2)->oop_is_instance()) {
1929 classloader2 = instanceKlass::cast(class2)->class_loader();
1930 } else {
1931 assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array");
1932 classloader2 = NULL;
1933 }
1934 symbolOop classname2 = Klass::cast(class2)->name();
1936 return instanceKlass::is_same_class_package(classloader1, classname1,
1937 classloader2, classname2);
1938 }
1940 bool instanceKlass::is_same_class_package(oop classloader2, symbolOop classname2) {
1941 klassOop class1 = as_klassOop();
1942 oop classloader1 = instanceKlass::cast(class1)->class_loader();
1943 symbolOop classname1 = Klass::cast(class1)->name();
1945 return instanceKlass::is_same_class_package(classloader1, classname1,
1946 classloader2, classname2);
1947 }
1949 // return true if two classes are in the same package, classloader
1950 // and classname information is enough to determine a class's package
1951 bool instanceKlass::is_same_class_package(oop class_loader1, symbolOop class_name1,
1952 oop class_loader2, symbolOop class_name2) {
1953 if (class_loader1 != class_loader2) {
1954 return false;
1955 } else if (class_name1 == class_name2) {
1956 return true; // skip painful bytewise comparison
1957 } else {
1958 ResourceMark rm;
1960 // The symbolOop's are in UTF8 encoding. Since we only need to check explicitly
1961 // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
1962 // Otherwise, we just compare jbyte values between the strings.
1963 jbyte *name1 = class_name1->base();
1964 jbyte *name2 = class_name2->base();
1966 jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
1967 jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
1969 if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
1970 // One of the two doesn't have a package. Only return true
1971 // if the other one also doesn't have a package.
1972 return last_slash1 == last_slash2;
1973 } else {
1974 // Skip over '['s
1975 if (*name1 == '[') {
1976 do {
1977 name1++;
1978 } while (*name1 == '[');
1979 if (*name1 != 'L') {
1980 // Something is terribly wrong. Shouldn't be here.
1981 return false;
1982 }
1983 }
1984 if (*name2 == '[') {
1985 do {
1986 name2++;
1987 } while (*name2 == '[');
1988 if (*name2 != 'L') {
1989 // Something is terribly wrong. Shouldn't be here.
1990 return false;
1991 }
1992 }
1994 // Check that package part is identical
1995 int length1 = last_slash1 - name1;
1996 int length2 = last_slash2 - name2;
1998 return UTF8::equal(name1, length1, name2, length2);
1999 }
2000 }
2001 }
2003 // Returns true iff super_method can be overridden by a method in targetclassname
2004 // See JSL 3rd edition 8.4.6.1
2005 // Assumes name-signature match
2006 // "this" is instanceKlass of super_method which must exist
2007 // note that the instanceKlass of the method in the targetclassname has not always been created yet
2008 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, symbolHandle targetclassname, TRAPS) {
2009 // Private methods can not be overridden
2010 if (super_method->is_private()) {
2011 return false;
2012 }
2013 // If super method is accessible, then override
2014 if ((super_method->is_protected()) ||
2015 (super_method->is_public())) {
2016 return true;
2017 }
2018 // Package-private methods are not inherited outside of package
2019 assert(super_method->is_package_private(), "must be package private");
2020 return(is_same_class_package(targetclassloader(), targetclassname()));
2021 }
2023 /* defined for now in jvm.cpp, for historical reasons *--
2024 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self,
2025 symbolOop& simple_name_result, TRAPS) {
2026 ...
2027 }
2028 */
2030 // tell if two classes have the same enclosing class (at package level)
2031 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
2032 klassOop class2_oop, TRAPS) {
2033 if (class2_oop == class1->as_klassOop()) return true;
2034 if (!Klass::cast(class2_oop)->oop_is_instance()) return false;
2035 instanceKlassHandle class2(THREAD, class2_oop);
2037 // must be in same package before we try anything else
2038 if (!class1->is_same_class_package(class2->class_loader(), class2->name()))
2039 return false;
2041 // As long as there is an outer1.getEnclosingClass,
2042 // shift the search outward.
2043 instanceKlassHandle outer1 = class1;
2044 for (;;) {
2045 // As we walk along, look for equalities between outer1 and class2.
2046 // Eventually, the walks will terminate as outer1 stops
2047 // at the top-level class around the original class.
2048 bool ignore_inner_is_member;
2049 klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
2050 CHECK_false);
2051 if (next == NULL) break;
2052 if (next == class2()) return true;
2053 outer1 = instanceKlassHandle(THREAD, next);
2054 }
2056 // Now do the same for class2.
2057 instanceKlassHandle outer2 = class2;
2058 for (;;) {
2059 bool ignore_inner_is_member;
2060 klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
2061 CHECK_false);
2062 if (next == NULL) break;
2063 // Might as well check the new outer against all available values.
2064 if (next == class1()) return true;
2065 if (next == outer1()) return true;
2066 outer2 = instanceKlassHandle(THREAD, next);
2067 }
2069 // If by this point we have not found an equality between the
2070 // two classes, we know they are in separate package members.
2071 return false;
2072 }
2075 jint instanceKlass::compute_modifier_flags(TRAPS) const {
2076 klassOop k = as_klassOop();
2077 jint access = access_flags().as_int();
2079 // But check if it happens to be member class.
2080 typeArrayOop inner_class_list = inner_classes();
2081 int length = (inner_class_list == NULL) ? 0 : inner_class_list->length();
2082 assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
2083 if (length > 0) {
2084 typeArrayHandle inner_class_list_h(THREAD, inner_class_list);
2085 instanceKlassHandle ik(THREAD, k);
2086 for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
2087 int ioff = inner_class_list_h->ushort_at(
2088 i + instanceKlass::inner_class_inner_class_info_offset);
2090 // Inner class attribute can be zero, skip it.
2091 // Strange but true: JVM spec. allows null inner class refs.
2092 if (ioff == 0) continue;
2094 // only look at classes that are already loaded
2095 // since we are looking for the flags for our self.
2096 symbolOop inner_name = ik->constants()->klass_name_at(ioff);
2097 if ((ik->name() == inner_name)) {
2098 // This is really a member class.
2099 access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset);
2100 break;
2101 }
2102 }
2103 }
2104 // Remember to strip ACC_SUPER bit
2105 return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS;
2106 }
2108 jint instanceKlass::jvmti_class_status() const {
2109 jint result = 0;
2111 if (is_linked()) {
2112 result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED;
2113 }
2115 if (is_initialized()) {
2116 assert(is_linked(), "Class status is not consistent");
2117 result |= JVMTI_CLASS_STATUS_INITIALIZED;
2118 }
2119 if (is_in_error_state()) {
2120 result |= JVMTI_CLASS_STATUS_ERROR;
2121 }
2122 return result;
2123 }
2125 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) {
2126 itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
2127 int method_table_offset_in_words = ioe->offset()/wordSize;
2128 int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
2129 / itableOffsetEntry::size();
2131 for (int cnt = 0 ; ; cnt ++, ioe ++) {
2132 // If the interface isn't implemented by the receiver class,
2133 // the VM should throw IncompatibleClassChangeError.
2134 if (cnt >= nof_interfaces) {
2135 THROW_OOP_0(vmSymbols::java_lang_IncompatibleClassChangeError());
2136 }
2138 klassOop ik = ioe->interface_klass();
2139 if (ik == holder) break;
2140 }
2142 itableMethodEntry* ime = ioe->first_method_entry(as_klassOop());
2143 methodOop m = ime[index].method();
2144 if (m == NULL) {
2145 THROW_OOP_0(vmSymbols::java_lang_AbstractMethodError());
2146 }
2147 return m;
2148 }
2150 // On-stack replacement stuff
2151 void instanceKlass::add_osr_nmethod(nmethod* n) {
2152 // only one compilation can be active
2153 NEEDS_CLEANUP
2154 // This is a short non-blocking critical region, so the no safepoint check is ok.
2155 OsrList_lock->lock_without_safepoint_check();
2156 assert(n->is_osr_method(), "wrong kind of nmethod");
2157 n->set_osr_link(osr_nmethods_head());
2158 set_osr_nmethods_head(n);
2159 // Remember to unlock again
2160 OsrList_lock->unlock();
2161 }
2164 void instanceKlass::remove_osr_nmethod(nmethod* n) {
2165 // This is a short non-blocking critical region, so the no safepoint check is ok.
2166 OsrList_lock->lock_without_safepoint_check();
2167 assert(n->is_osr_method(), "wrong kind of nmethod");
2168 nmethod* last = NULL;
2169 nmethod* cur = osr_nmethods_head();
2170 // Search for match
2171 while(cur != NULL && cur != n) {
2172 last = cur;
2173 cur = cur->osr_link();
2174 }
2175 if (cur == n) {
2176 if (last == NULL) {
2177 // Remove first element
2178 set_osr_nmethods_head(osr_nmethods_head()->osr_link());
2179 } else {
2180 last->set_osr_link(cur->osr_link());
2181 }
2182 }
2183 n->set_osr_link(NULL);
2184 // Remember to unlock again
2185 OsrList_lock->unlock();
2186 }
2188 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci) const {
2189 // This is a short non-blocking critical region, so the no safepoint check is ok.
2190 OsrList_lock->lock_without_safepoint_check();
2191 nmethod* osr = osr_nmethods_head();
2192 while (osr != NULL) {
2193 assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
2194 if (osr->method() == m &&
2195 (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
2196 // Found a match - return it.
2197 OsrList_lock->unlock();
2198 return osr;
2199 }
2200 osr = osr->osr_link();
2201 }
2202 OsrList_lock->unlock();
2203 return NULL;
2204 }
2206 // -----------------------------------------------------------------------------------------------------
2207 #ifndef PRODUCT
2209 // Printing
2211 #define BULLET " - "
2213 void FieldPrinter::do_field(fieldDescriptor* fd) {
2214 _st->print(BULLET);
2215 if (fd->is_static() || (_obj == NULL)) {
2216 fd->print_on(_st);
2217 _st->cr();
2218 } else {
2219 fd->print_on_for(_st, _obj);
2220 _st->cr();
2221 }
2222 }
2225 void instanceKlass::oop_print_on(oop obj, outputStream* st) {
2226 Klass::oop_print_on(obj, st);
2228 if (as_klassOop() == SystemDictionary::String_klass()) {
2229 typeArrayOop value = java_lang_String::value(obj);
2230 juint offset = java_lang_String::offset(obj);
2231 juint length = java_lang_String::length(obj);
2232 if (value != NULL &&
2233 value->is_typeArray() &&
2234 offset <= (juint) value->length() &&
2235 offset + length <= (juint) value->length()) {
2236 st->print(BULLET"string: ");
2237 Handle h_obj(obj);
2238 java_lang_String::print(h_obj, st);
2239 st->cr();
2240 if (!WizardMode) return; // that is enough
2241 }
2242 }
2244 st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj));
2245 FieldPrinter print_nonstatic_field(st, obj);
2246 do_nonstatic_fields(&print_nonstatic_field);
2248 if (as_klassOop() == SystemDictionary::Class_klass()) {
2249 st->print(BULLET"signature: ");
2250 java_lang_Class::print_signature(obj, st);
2251 st->cr();
2252 klassOop mirrored_klass = java_lang_Class::as_klassOop(obj);
2253 st->print(BULLET"fake entry for mirror: ");
2254 mirrored_klass->print_value_on(st);
2255 st->cr();
2256 st->print(BULLET"fake entry resolved_constructor: ");
2257 methodOop ctor = java_lang_Class::resolved_constructor(obj);
2258 ctor->print_value_on(st);
2259 klassOop array_klass = java_lang_Class::array_klass(obj);
2260 st->cr();
2261 st->print(BULLET"fake entry for array: ");
2262 array_klass->print_value_on(st);
2263 st->cr();
2264 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2265 st->print(BULLET"signature: ");
2266 java_dyn_MethodType::print_signature(obj, st);
2267 st->cr();
2268 }
2269 }
2271 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
2272 st->print("a ");
2273 name()->print_value_on(st);
2274 obj->print_address_on(st);
2275 if (as_klassOop() == SystemDictionary::String_klass()
2276 && java_lang_String::value(obj) != NULL) {
2277 ResourceMark rm;
2278 int len = java_lang_String::length(obj);
2279 int plen = (len < 24 ? len : 12);
2280 char* str = java_lang_String::as_utf8_string(obj, 0, plen);
2281 st->print(" = \"%s\"", str);
2282 if (len > plen)
2283 st->print("...[%d]", len);
2284 } else if (as_klassOop() == SystemDictionary::Class_klass()) {
2285 klassOop k = java_lang_Class::as_klassOop(obj);
2286 st->print(" = ");
2287 if (k != NULL) {
2288 k->print_value_on(st);
2289 } else {
2290 const char* tname = type2name(java_lang_Class::primitive_type(obj));
2291 st->print("%s", tname ? tname : "type?");
2292 }
2293 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2294 st->print(" = ");
2295 java_dyn_MethodType::print_signature(obj, st);
2296 } else if (java_lang_boxing_object::is_instance(obj)) {
2297 st->print(" = ");
2298 java_lang_boxing_object::print(obj, st);
2299 }
2300 }
2302 #endif // ndef PRODUCT
2304 const char* instanceKlass::internal_name() const {
2305 return external_name();
2306 }
2308 // Verification
2310 class VerifyFieldClosure: public OopClosure {
2311 protected:
2312 template <class T> void do_oop_work(T* p) {
2313 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
2314 oop obj = oopDesc::load_decode_heap_oop(p);
2315 if (!obj->is_oop_or_null()) {
2316 tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
2317 Universe::print();
2318 guarantee(false, "boom");
2319 }
2320 }
2321 public:
2322 virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); }
2323 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
2324 };
2326 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
2327 Klass::oop_verify_on(obj, st);
2328 VerifyFieldClosure blk;
2329 oop_oop_iterate(obj, &blk);
2330 }
2332 #ifndef PRODUCT
2334 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
2335 // This verification code is disabled. JDK_Version::is_gte_jdk14x_version()
2336 // cannot be called since this function is called before the VM is
2337 // able to determine what JDK version is running with.
2338 // The check below always is false since 1.4.
2339 return;
2341 // This verification code temporarily disabled for the 1.4
2342 // reflection implementation since java.lang.Class now has
2343 // Java-level instance fields. Should rewrite this to handle this
2344 // case.
2345 if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
2346 // Verify that java.lang.Class instances have a fake oop field added.
2347 instanceKlass* ik = instanceKlass::cast(k);
2349 // Check that we have the right class
2350 static bool first_time = true;
2351 guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
2352 first_time = false;
2353 const int extra = java_lang_Class::number_of_fake_oop_fields;
2354 guarantee(ik->nonstatic_field_size() == extra, "just checking");
2355 guarantee(ik->nonstatic_oop_map_count() == 1, "just checking");
2356 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
2358 // Check that the map is (2,extra)
2359 int offset = java_lang_Class::klass_offset;
2361 OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
2362 guarantee(map->offset() == offset && map->count() == (unsigned int) extra,
2363 "sanity");
2364 }
2365 }
2367 #endif // ndef PRODUCT
2369 // JNIid class for jfieldIDs only
2370 // Note to reviewers:
2371 // These JNI functions are just moved over to column 1 and not changed
2372 // in the compressed oops workspace.
2373 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
2374 _holder = holder;
2375 _offset = offset;
2376 _next = next;
2377 debug_only(_is_static_field_id = false;)
2378 }
2381 JNIid* JNIid::find(int offset) {
2382 JNIid* current = this;
2383 while (current != NULL) {
2384 if (current->offset() == offset) return current;
2385 current = current->next();
2386 }
2387 return NULL;
2388 }
2390 void JNIid::oops_do(OopClosure* f) {
2391 for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
2392 f->do_oop(cur->holder_addr());
2393 }
2394 }
2396 void JNIid::deallocate(JNIid* current) {
2397 while (current != NULL) {
2398 JNIid* next = current->next();
2399 delete current;
2400 current = next;
2401 }
2402 }
2405 void JNIid::verify(klassOop holder) {
2406 int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
2407 int end_field_offset;
2408 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
2410 JNIid* current = this;
2411 while (current != NULL) {
2412 guarantee(current->holder() == holder, "Invalid klass in JNIid");
2413 #ifdef ASSERT
2414 int o = current->offset();
2415 if (current->is_static_field_id()) {
2416 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
2417 }
2418 #endif
2419 current = current->next();
2420 }
2421 }
2424 #ifdef ASSERT
2425 void instanceKlass::set_init_state(ClassState state) {
2426 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
2427 : (_init_state < state);
2428 assert(good_state || state == allocated, "illegal state transition");
2429 _init_state = state;
2430 }
2431 #endif
2434 // RedefineClasses() support for previous versions:
2436 // Add an information node that contains weak references to the
2437 // interesting parts of the previous version of the_class.
2438 // This is also where we clean out any unused weak references.
2439 // Note that while we delete nodes from the _previous_versions
2440 // array, we never delete the array itself until the klass is
2441 // unloaded. The has_been_redefined() query depends on that fact.
2442 //
2443 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2444 BitMap* emcp_methods, int emcp_method_count) {
2445 assert(Thread::current()->is_VM_thread(),
2446 "only VMThread can add previous versions");
2448 if (_previous_versions == NULL) {
2449 // This is the first previous version so make some space.
2450 // Start with 2 elements under the assumption that the class
2451 // won't be redefined much.
2452 _previous_versions = new (ResourceObj::C_HEAP)
2453 GrowableArray<PreviousVersionNode *>(2, true);
2454 }
2456 // RC_TRACE macro has an embedded ResourceMark
2457 RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
2458 ikh->external_name(), _previous_versions->length(), emcp_method_count));
2459 constantPoolHandle cp_h(ikh->constants());
2460 jobject cp_ref;
2461 if (cp_h->is_shared()) {
2462 // a shared ConstantPool requires a regular reference; a weak
2463 // reference would be collectible
2464 cp_ref = JNIHandles::make_global(cp_h);
2465 } else {
2466 cp_ref = JNIHandles::make_weak_global(cp_h);
2467 }
2468 PreviousVersionNode * pv_node = NULL;
2469 objArrayOop old_methods = ikh->methods();
2471 if (emcp_method_count == 0) {
2472 // non-shared ConstantPool gets a weak reference
2473 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL);
2474 RC_TRACE(0x00000400,
2475 ("add: all methods are obsolete; flushing any EMCP weak refs"));
2476 } else {
2477 int local_count = 0;
2478 GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP)
2479 GrowableArray<jweak>(emcp_method_count, true);
2480 for (int i = 0; i < old_methods->length(); i++) {
2481 if (emcp_methods->at(i)) {
2482 // this old method is EMCP so save a weak ref
2483 methodOop old_method = (methodOop) old_methods->obj_at(i);
2484 methodHandle old_method_h(old_method);
2485 jweak method_ref = JNIHandles::make_weak_global(old_method_h);
2486 method_refs->append(method_ref);
2487 if (++local_count >= emcp_method_count) {
2488 // no more EMCP methods so bail out now
2489 break;
2490 }
2491 }
2492 }
2493 // non-shared ConstantPool gets a weak reference
2494 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs);
2495 }
2497 _previous_versions->append(pv_node);
2499 // Using weak references allows the interesting parts of previous
2500 // classes to be GC'ed when they are no longer needed. Since the
2501 // caller is the VMThread and we are at a safepoint, this is a good
2502 // time to clear out unused weak references.
2504 RC_TRACE(0x00000400, ("add: previous version length=%d",
2505 _previous_versions->length()));
2507 // skip the last entry since we just added it
2508 for (int i = _previous_versions->length() - 2; i >= 0; i--) {
2509 // check the previous versions array for a GC'ed weak refs
2510 pv_node = _previous_versions->at(i);
2511 cp_ref = pv_node->prev_constant_pool();
2512 assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2513 if (cp_ref == NULL) {
2514 delete pv_node;
2515 _previous_versions->remove_at(i);
2516 // Since we are traversing the array backwards, we don't have to
2517 // do anything special with the index.
2518 continue; // robustness
2519 }
2521 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2522 if (cp == NULL) {
2523 // this entry has been GC'ed so remove it
2524 delete pv_node;
2525 _previous_versions->remove_at(i);
2526 // Since we are traversing the array backwards, we don't have to
2527 // do anything special with the index.
2528 continue;
2529 } else {
2530 RC_TRACE(0x00000400, ("add: previous version @%d is alive", i));
2531 }
2533 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2534 if (method_refs != NULL) {
2535 RC_TRACE(0x00000400, ("add: previous methods length=%d",
2536 method_refs->length()));
2537 for (int j = method_refs->length() - 1; j >= 0; j--) {
2538 jweak method_ref = method_refs->at(j);
2539 assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2540 if (method_ref == NULL) {
2541 method_refs->remove_at(j);
2542 // Since we are traversing the array backwards, we don't have to
2543 // do anything special with the index.
2544 continue; // robustness
2545 }
2547 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2548 if (method == NULL || emcp_method_count == 0) {
2549 // This method entry has been GC'ed or the current
2550 // RedefineClasses() call has made all methods obsolete
2551 // so remove it.
2552 JNIHandles::destroy_weak_global(method_ref);
2553 method_refs->remove_at(j);
2554 } else {
2555 // RC_TRACE macro has an embedded ResourceMark
2556 RC_TRACE(0x00000400,
2557 ("add: %s(%s): previous method @%d in version @%d is alive",
2558 method->name()->as_C_string(), method->signature()->as_C_string(),
2559 j, i));
2560 }
2561 }
2562 }
2563 }
2565 int obsolete_method_count = old_methods->length() - emcp_method_count;
2567 if (emcp_method_count != 0 && obsolete_method_count != 0 &&
2568 _previous_versions->length() > 1) {
2569 // We have a mix of obsolete and EMCP methods. If there is more
2570 // than the previous version that we just added, then we have to
2571 // clear out any matching EMCP method entries the hard way.
2572 int local_count = 0;
2573 for (int i = 0; i < old_methods->length(); i++) {
2574 if (!emcp_methods->at(i)) {
2575 // only obsolete methods are interesting
2576 methodOop old_method = (methodOop) old_methods->obj_at(i);
2577 symbolOop m_name = old_method->name();
2578 symbolOop m_signature = old_method->signature();
2580 // skip the last entry since we just added it
2581 for (int j = _previous_versions->length() - 2; j >= 0; j--) {
2582 // check the previous versions array for a GC'ed weak refs
2583 pv_node = _previous_versions->at(j);
2584 cp_ref = pv_node->prev_constant_pool();
2585 assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2586 if (cp_ref == NULL) {
2587 delete pv_node;
2588 _previous_versions->remove_at(j);
2589 // Since we are traversing the array backwards, we don't have to
2590 // do anything special with the index.
2591 continue; // robustness
2592 }
2594 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2595 if (cp == NULL) {
2596 // this entry has been GC'ed so remove it
2597 delete pv_node;
2598 _previous_versions->remove_at(j);
2599 // Since we are traversing the array backwards, we don't have to
2600 // do anything special with the index.
2601 continue;
2602 }
2604 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2605 if (method_refs == NULL) {
2606 // We have run into a PreviousVersion generation where
2607 // all methods were made obsolete during that generation's
2608 // RedefineClasses() operation. At the time of that
2609 // operation, all EMCP methods were flushed so we don't
2610 // have to go back any further.
2611 //
2612 // A NULL method_refs is different than an empty method_refs.
2613 // We cannot infer any optimizations about older generations
2614 // from an empty method_refs for the current generation.
2615 break;
2616 }
2618 for (int k = method_refs->length() - 1; k >= 0; k--) {
2619 jweak method_ref = method_refs->at(k);
2620 assert(method_ref != NULL,
2621 "weak method ref was unexpectedly cleared");
2622 if (method_ref == NULL) {
2623 method_refs->remove_at(k);
2624 // Since we are traversing the array backwards, we don't
2625 // have to do anything special with the index.
2626 continue; // robustness
2627 }
2629 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2630 if (method == NULL) {
2631 // this method entry has been GC'ed so skip it
2632 JNIHandles::destroy_weak_global(method_ref);
2633 method_refs->remove_at(k);
2634 continue;
2635 }
2637 if (method->name() == m_name &&
2638 method->signature() == m_signature) {
2639 // The current RedefineClasses() call has made all EMCP
2640 // versions of this method obsolete so mark it as obsolete
2641 // and remove the weak ref.
2642 RC_TRACE(0x00000400,
2643 ("add: %s(%s): flush obsolete method @%d in version @%d",
2644 m_name->as_C_string(), m_signature->as_C_string(), k, j));
2646 method->set_is_obsolete();
2647 JNIHandles::destroy_weak_global(method_ref);
2648 method_refs->remove_at(k);
2649 break;
2650 }
2651 }
2653 // The previous loop may not find a matching EMCP method, but
2654 // that doesn't mean that we can optimize and not go any
2655 // further back in the PreviousVersion generations. The EMCP
2656 // method for this generation could have already been GC'ed,
2657 // but there still may be an older EMCP method that has not
2658 // been GC'ed.
2659 }
2661 if (++local_count >= obsolete_method_count) {
2662 // no more obsolete methods so bail out now
2663 break;
2664 }
2665 }
2666 }
2667 }
2668 } // end add_previous_version()
2671 // Determine if instanceKlass has a previous version.
2672 bool instanceKlass::has_previous_version() const {
2673 if (_previous_versions == NULL) {
2674 // no previous versions array so answer is easy
2675 return false;
2676 }
2678 for (int i = _previous_versions->length() - 1; i >= 0; i--) {
2679 // Check the previous versions array for an info node that hasn't
2680 // been GC'ed
2681 PreviousVersionNode * pv_node = _previous_versions->at(i);
2683 jobject cp_ref = pv_node->prev_constant_pool();
2684 assert(cp_ref != NULL, "cp reference was unexpectedly cleared");
2685 if (cp_ref == NULL) {
2686 continue; // robustness
2687 }
2689 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2690 if (cp != NULL) {
2691 // we have at least one previous version
2692 return true;
2693 }
2695 // We don't have to check the method refs. If the constant pool has
2696 // been GC'ed then so have the methods.
2697 }
2699 // all of the underlying nodes' info has been GC'ed
2700 return false;
2701 } // end has_previous_version()
2703 methodOop instanceKlass::method_with_idnum(int idnum) {
2704 methodOop m = NULL;
2705 if (idnum < methods()->length()) {
2706 m = (methodOop) methods()->obj_at(idnum);
2707 }
2708 if (m == NULL || m->method_idnum() != idnum) {
2709 for (int index = 0; index < methods()->length(); ++index) {
2710 m = (methodOop) methods()->obj_at(index);
2711 if (m->method_idnum() == idnum) {
2712 return m;
2713 }
2714 }
2715 }
2716 return m;
2717 }
2720 // Set the annotation at 'idnum' to 'anno'.
2721 // We don't want to create or extend the array if 'anno' is NULL, since that is the
2722 // default value. However, if the array exists and is long enough, we must set NULL values.
2723 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) {
2724 objArrayOop md = *md_p;
2725 if (md != NULL && md->length() > idnum) {
2726 md->obj_at_put(idnum, anno);
2727 } else if (anno != NULL) {
2728 // create the array
2729 int length = MAX2(idnum+1, (int)_idnum_allocated_count);
2730 md = oopFactory::new_system_objArray(length, Thread::current());
2731 if (*md_p != NULL) {
2732 // copy the existing entries
2733 for (int index = 0; index < (*md_p)->length(); index++) {
2734 md->obj_at_put(index, (*md_p)->obj_at(index));
2735 }
2736 }
2737 set_annotations(md, md_p);
2738 md->obj_at_put(idnum, anno);
2739 } // if no array and idnum isn't included there is nothing to do
2740 }
2742 // Construct a PreviousVersionNode entry for the array hung off
2743 // the instanceKlass.
2744 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool,
2745 bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) {
2747 _prev_constant_pool = prev_constant_pool;
2748 _prev_cp_is_weak = prev_cp_is_weak;
2749 _prev_EMCP_methods = prev_EMCP_methods;
2750 }
2753 // Destroy a PreviousVersionNode
2754 PreviousVersionNode::~PreviousVersionNode() {
2755 if (_prev_constant_pool != NULL) {
2756 if (_prev_cp_is_weak) {
2757 JNIHandles::destroy_weak_global(_prev_constant_pool);
2758 } else {
2759 JNIHandles::destroy_global(_prev_constant_pool);
2760 }
2761 }
2763 if (_prev_EMCP_methods != NULL) {
2764 for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) {
2765 jweak method_ref = _prev_EMCP_methods->at(i);
2766 if (method_ref != NULL) {
2767 JNIHandles::destroy_weak_global(method_ref);
2768 }
2769 }
2770 delete _prev_EMCP_methods;
2771 }
2772 }
2775 // Construct a PreviousVersionInfo entry
2776 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
2777 _prev_constant_pool_handle = constantPoolHandle(); // NULL handle
2778 _prev_EMCP_method_handles = NULL;
2780 jobject cp_ref = pv_node->prev_constant_pool();
2781 assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared");
2782 if (cp_ref == NULL) {
2783 return; // robustness
2784 }
2786 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2787 if (cp == NULL) {
2788 // Weak reference has been GC'ed. Since the constant pool has been
2789 // GC'ed, the methods have also been GC'ed.
2790 return;
2791 }
2793 // make the constantPoolOop safe to return
2794 _prev_constant_pool_handle = constantPoolHandle(cp);
2796 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2797 if (method_refs == NULL) {
2798 // the instanceKlass did not have any EMCP methods
2799 return;
2800 }
2802 _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
2804 int n_methods = method_refs->length();
2805 for (int i = 0; i < n_methods; i++) {
2806 jweak method_ref = method_refs->at(i);
2807 assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2808 if (method_ref == NULL) {
2809 continue; // robustness
2810 }
2812 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2813 if (method == NULL) {
2814 // this entry has been GC'ed so skip it
2815 continue;
2816 }
2818 // make the methodOop safe to return
2819 _prev_EMCP_method_handles->append(methodHandle(method));
2820 }
2821 }
2824 // Destroy a PreviousVersionInfo
2825 PreviousVersionInfo::~PreviousVersionInfo() {
2826 // Since _prev_EMCP_method_handles is not C-heap allocated, we
2827 // don't have to delete it.
2828 }
2831 // Construct a helper for walking the previous versions array
2832 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) {
2833 _previous_versions = ik->previous_versions();
2834 _current_index = 0;
2835 // _hm needs no initialization
2836 _current_p = NULL;
2837 }
2840 // Destroy a PreviousVersionWalker
2841 PreviousVersionWalker::~PreviousVersionWalker() {
2842 // Delete the current info just in case the caller didn't walk to
2843 // the end of the previous versions list. No harm if _current_p is
2844 // already NULL.
2845 delete _current_p;
2847 // When _hm is destroyed, all the Handles returned in
2848 // PreviousVersionInfo objects will be destroyed.
2849 // Also, after this destructor is finished it will be
2850 // safe to delete the GrowableArray allocated in the
2851 // PreviousVersionInfo objects.
2852 }
2855 // Return the interesting information for the next previous version
2856 // of the klass. Returns NULL if there are no more previous versions.
2857 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
2858 if (_previous_versions == NULL) {
2859 // no previous versions so nothing to return
2860 return NULL;
2861 }
2863 delete _current_p; // cleanup the previous info for the caller
2864 _current_p = NULL; // reset to NULL so we don't delete same object twice
2866 int length = _previous_versions->length();
2868 while (_current_index < length) {
2869 PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
2870 PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP)
2871 PreviousVersionInfo(pv_node);
2873 constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
2874 if (cp_h.is_null()) {
2875 delete pv_info;
2877 // The underlying node's info has been GC'ed so try the next one.
2878 // We don't have to check the methods. If the constant pool has
2879 // GC'ed then so have the methods.
2880 continue;
2881 }
2883 // Found a node with non GC'ed info so return it. The caller will
2884 // need to delete pv_info when they are done with it.
2885 _current_p = pv_info;
2886 return pv_info;
2887 }
2889 // all of the underlying nodes' info has been GC'ed
2890 return NULL;
2891 } // end next_previous_version()